From dc40c3a6ce0a45bf636e5df14393d1bd4a2244d4 Mon Sep 17 00:00:00 2001 From: johngreenough Date: Thu, 13 Nov 2025 15:21:11 +0100 Subject: [PATCH 1/3] Update project dependencies and language handling --- .devcontainer/Dockerfile | 117 +++---- furhat_skills/Conversation/build.gradle | 8 +- .../gradle/wrapper/gradle-wrapper.jar | Bin 54708 -> 59203 bytes .../gradle/wrapper/gradle-wrapper.properties | 2 +- furhat_skills/Conversation/gradlew | 282 +++++++++------- furhat_skills/Conversation/gradlew.bat | 173 +++++----- .../app/templateadvancedskill/flow/init.kt | 5 +- .../flow/main/conversation.kt | 304 ++++++++++++------ .../flow/main/documentWaitingToStart.kt | 48 ++- .../flow/main/greeting.kt | 99 +----- .../templateadvancedskill/flow/main/idle.kt | 3 +- .../language/AppLanguage.kt | 6 + .../templateadvancedskill/language/I18n.kt | 28 ++ .../language/LangDetect.kt | 22 ++ .../language/LanguageManager.kt | 35 ++ .../perception/PerceptionClient.kt | 0 .../perception/UserProfile.kt | 0 .../perception/UserState.kt | 0 my_furhat_backend/RAG/rag_flow.py | 12 +- my_furhat_backend/agents/document_agent.py | 86 +++-- my_furhat_backend/config/settings.py | 18 +- my_furhat_backend/main.py | 0 my_furhat_backend/memory/summarizer.py | 0 my_furhat_backend/models/chatbot_factory.py | 96 +++++- my_furhat_backend/models/llm_factory.py | 84 ++++- my_furhat_backend/perception/face.py | 0 my_furhat_backend/perception/language.py | 0 my_furhat_backend/perception/session_state.py | 0 my_furhat_backend/perception/voice.py | 0 .../perception/websocket_handler.py | 0 my_furhat_backend/pyproject.toml | 47 +++ pyproject.toml | 2 +- requirements_poetry.txt | 23 +- tests/test_language.py | 0 tests/test_memory_summarizer.py | 0 tests/test_perception_ws.py | 0 36 files changed, 999 insertions(+), 501 deletions(-) create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/AppLanguage.kt create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/I18n.kt create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserProfile.kt create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt create mode 100644 my_furhat_backend/main.py create mode 100644 my_furhat_backend/memory/summarizer.py create mode 100644 my_furhat_backend/perception/face.py create mode 100644 my_furhat_backend/perception/language.py create mode 100644 my_furhat_backend/perception/session_state.py create mode 100644 my_furhat_backend/perception/voice.py create mode 100644 my_furhat_backend/perception/websocket_handler.py create mode 100644 my_furhat_backend/pyproject.toml create mode 100644 tests/test_language.py create mode 100644 tests/test_memory_summarizer.py create mode 100644 tests/test_perception_ws.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index ed5351d..b4d0896 100755 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,55 +1,64 @@ -# Use an official Python 3.11 runtime as a base image -FROM python:3.11-slim +# ------------------------------------------------------------- +# Stage 1: Base Python environment with build tools +# ------------------------------------------------------------- + FROM python:3.11-slim -# Install system dependencies, including build tools, git, cmake, clang, libc++-dev, libc++abi-dev, libomp-dev, ninja-build, Python development headers, OpenBLAS, and pkg-config -RUN apt-get update && apt-get install -y --no-install-recommends \ - build-essential \ - git \ - cmake \ - clang \ - libc++-dev \ - libc++abi-dev \ - libomp-dev \ - ninja-build \ - python3-dev \ - libopenblas-dev \ - pkg-config \ - && rm -rf /var/lib/apt/lists/* - -# Upgrade pip, setuptools, and wheel, then install Poetry 2.0.1 -RUN pip install --upgrade pip setuptools wheel && \ - pip install poetry==2.0.1 - -# Set the working directory in the container -WORKDIR /app - -# Copy dependency files first to leverage Docker cache -COPY pyproject.toml poetry.lock* /app/ - -# Install dependencies using Poetry without installing the root package -RUN poetry config virtualenvs.create false && \ - poetry install --no-root --no-interaction --no-ansi - -# (Optional) Copy and install additional dependencies from requirements_poetry.txt if present -COPY requirements_poetry.txt /app/ -RUN if [ -f requirements_poetry.txt ]; then pip install --no-cache-dir -r requirements_poetry.txt; fi - -# # Set CMake arguments for OpenBLAS support -# ENV CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" - -# # Install llama_cpp_python with verbose output -# RUN pip install --no-cache-dir --verbose llama_cpp_python==0.3.7 - -# RUN CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python - -RUN CMAKE_ARGS="-DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8-a" pip install llama-cpp-python - - -# Copy the rest of the project files -COPY . /app - -# Expose the port if needed (or you can omit if not running the server automatically) -EXPOSE 8000 - -# Instead of running the app automatically, start a shell for interactive work -CMD [ "bash" ] + # System dependencies for building and for Ollama + RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential git curl wget cmake clang libc++-dev libc++abi-dev \ + libomp-dev ninja-build python3-dev libopenblas-dev pkg-config ca-certificates \ + && rm -rf /var/lib/apt/lists/* + + # ------------------------------------------------------------- + # Install Poetry and project dependencies + # ------------------------------------------------------------- + RUN pip install --upgrade pip setuptools wheel && pip install poetry==2.0.1 + WORKDIR /app + + # Copy Poetry files + COPY pyproject.toml poetry.lock* /app/ + RUN poetry config virtualenvs.create false && poetry install --no-root --no-interaction --no-ansi + + # Optional extra dependencies + COPY requirements_poetry.txt /app/ + RUN if [ -f requirements_poetry.txt ]; then pip install --no-cache-dir -r requirements_poetry.txt; fi + + # ------------------------------------------------------------- + # Install Ollama + # ------------------------------------------------------------- + # Ollama official Linux install script: + RUN curl -fsSL https://ollama.com/install.sh | sh + + # Add Ollama binary to PATH (just to be sure) + ENV PATH="/usr/local/bin:${PATH}" + + # ------------------------------------------------------------- + # (Optional) Install llama-cpp-python if you still want local GGUF support + # Comment out if you’ll use Ollama exclusively + # RUN CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python==0.3.7 + + # ------------------------------------------------------------- + # Copy the rest of your app + # ------------------------------------------------------------- + COPY . /app + + # Environment variables + ENV OLLAMA_BASE_URL="http://localhost:11434" + ENV HF_HOME=/app/.cache/hf + ENV TRANSFORMERS_CACHE=/app/.cache/hf + ENV SENTENCE_TRANSFORMERS_HOME=/app/.cache/st + + # Expose the FastAPI port + EXPOSE 8000 + + # ------------------------------------------------------------- + # Entrypoint: start Ollama service + your API + # ------------------------------------------------------------- + # Ollama runs as a background service; we then start FastAPI + CMD bash -c "\ + echo 'Starting Ollama service...' && \ + ollama serve & \ + sleep 3 && \ + echo 'Starting FastAPI app...' && \ + uvicorn main:app --host 0.0.0.0 --port 8000 \ + " diff --git a/furhat_skills/Conversation/build.gradle b/furhat_skills/Conversation/build.gradle index 8f49f67..6204e8d 100755 --- a/furhat_skills/Conversation/build.gradle +++ b/furhat_skills/Conversation/build.gradle @@ -1,6 +1,6 @@ plugins { - id "org.jetbrains.kotlin.jvm" version "1.8.21" - id 'com.github.johnrengelman.shadow' version '2.0.4' + id "org.jetbrains.kotlin.jvm" version "1.9.22" + id "com.github.johnrengelman.shadow" version "8.1.1" } apply plugin: 'java' @@ -74,7 +74,8 @@ shadowJar { properties.load(project.file('skill.properties').newDataInputStream()) def version = properties.getProperty('version') def name = properties.getProperty('name') - archiveName = "${name}_${version}.skill" + archiveFileName.set("${name}_${version}.skill") + archiveExtension.set("skill") manifest { exclude '**/Log4j2Plugins.dat' @@ -82,5 +83,4 @@ shadowJar { } from "skill.properties" from "assets" - extension 'skill' } diff --git a/furhat_skills/Conversation/gradle/wrapper/gradle-wrapper.jar b/furhat_skills/Conversation/gradle/wrapper/gradle-wrapper.jar index 7a3265ee94c0ab25cf079ac8ccdf87f41d455d42..e708b1c023ec8b20f512888fe07c5bd3ff77bb8f 100755 GIT binary patch literal 59203 zcma&O1CT9Y(k9%tZQHhO+qUh#ZQHhO+qmuS+qP|E@9xZO?0h@l{(r>DQ>P;GjjD{w zH}lENr;dU&FbEU?00aa80D$0M0RRB{U*7-#kbjS|qAG&4l5%47zyJ#WrfA#1$1Ctx zf&Z_d{GW=lf^w2#qRJ|CvSJUi(^E3iv~=^Z(zH}F)3Z%V3`@+rNB7gTVU{Bb~90p|f+0(v;nz01EG7yDMX9@S~__vVgv%rS$+?IH+oZ03D5zYrv|^ zC1J)SruYHmCki$jLBlTaE5&dFG9-kq3!^i>^UQL`%gn6)jz54$WDmeYdsBE9;PqZ_ zoGd=P4+|(-u4U1dbAVQrFWoNgNd;0nrghPFbQrJctO>nwDdI`Q^i0XJDUYm|T|RWc zZ3^Qgo_Qk$%Fvjj-G}1NB#ZJqIkh;kX%V{THPqOyiq)d)0+(r9o(qKlSp*hmK#iIY zA^)Vr$-Hz<#SF=0@tL@;dCQsm`V9s1vYNq}K1B)!XSK?=I1)tX+bUV52$YQu*0%fnWEukW>mxkz+%3-S!oguE8u#MGzST8_Dy^#U?fA@S#K$S@9msUiX!gd_ow>08w5)nX{-KxqMOo7d?k2&?Vf z&diGDtZr(0cwPe9z9FAUSD9KC)7(n^lMWuayCfxzy8EZsns%OEblHFSzP=cL6}?J| z0U$H!4S_TVjj<`6dy^2j`V`)mC;cB%* z8{>_%E1^FH!*{>4a7*C1v>~1*@TMcLK{7nEQ!_igZC}ikJ$*<$yHy>7)oy79A~#xE zWavoJOIOC$5b6*q*F_qN1>2#MY)AXVyr$6x4b=$x^*aqF*L?vmj>Mgv+|ITnw_BoW zO?jwHvNy^prH{9$rrik1#fhyU^MpFqF2fYEt(;4`Q&XWOGDH8k6M=%@fics4ajI;st# zCU^r1CK&|jzUhRMv;+W~6N;u<;#DI6cCw-otsc@IsN3MoSD^O`eNflIoR~l4*&-%RBYk@gb^|-JXs&~KuSEmMxB}xSb z@K76cXD=Y|=I&SNC2E+>Zg?R6E%DGCH5J1nU!A|@eX9oS(WPaMm==k2s_ueCqdZw| z&hqHp)47`c{BgwgvY2{xz%OIkY1xDwkw!<0veB#yF4ZKJyabhyyVS`gZepcFIk%e2 zTcrmt2@-8`7i-@5Nz>oQWFuMC_KlroCl(PLSodswHqJ3fn<;gxg9=}~3x_L3P`9Sn zChIf}8vCHvTriz~T2~FamRi?rh?>3bX1j}%bLH+uFX+p&+^aXbOK7clZxdU~6Uxgy z8R=obwO4dL%pmVo*Ktf=lH6hnlz_5k3cG;m8lgaPp~?eD!Yn2kf)tU6PF{kLyn|oI@eQ`F z3IF7~Blqg8-uwUuWZScRKn%c2_}dXB6Dx_&xR*n9M9LXasJhtZdr$vBY!rP{c@=)& z#!?L$2UrkvClwQO>U*fSMs67oSj2mxiJ$t;E|>q%Kh_GzzWWO&3;ufU%2z%ucBU8H z3WIwr$n)cfCXR&>tyB7BcSInK>=ByZA%;cVEJhcg<#6N{aZC4>K41XF>ZgjG`z_u& zGY?;Ad?-sgiOnI`oppF1o1Gurqbi*;#x2>+SSV6|1^G@ooVy@fg?wyf@0Y!UZ4!}nGuLeC^l)6pwkh|oRY`s1Pm$>zZ3u-83T|9 zGaKJIV3_x+u1>cRibsaJpJqhcm%?0-L;2 zitBrdRxNmb0OO2J%Y&Ym(6*`_P3&&5Bw157{o7LFguvxC$4&zTy#U=W*l&(Q2MNO} zfaUwYm{XtILD$3864IA_nn34oVa_g^FRuHL5wdUd)+W-p-iWCKe8m_cMHk+=? zeKX)M?Dt(|{r5t7IenkAXo%&EXIb-i^w+0CX0D=xApC=|Xy(`xy+QG^UyFe z+#J6h_&T5i#sV)hj3D4WN%z;2+jJcZxcI3*CHXGmOF3^)JD5j&wfX)e?-|V0GPuA+ zQFot%aEqGNJJHn$!_}#PaAvQ^{3-Ye7b}rWwrUmX53(|~i0v{}G_sI9uDch_brX&6 zWl5Ndj-AYg(W9CGfQf<6!YmY>Ey)+uYd_JNXH=>|`OH-CDCmcH(0%iD_aLlNHKH z7bcW-^5+QV$jK?R*)wZ>r9t}loM@XN&M-Pw=F#xn(;u3!(3SXXY^@=aoj70;_=QE9 zGghsG3ekq#N||u{4We_25U=y#T*S{4I{++Ku)> zQ!DZW;pVcn>b;&g2;YE#+V`v*Bl&Y-i@X6D*OpNA{G@JAXho&aOk(_j^weW{#3X5Y z%$q_wpb07EYPdmyH(1^09i$ca{O<}7) zRWncXdSPgBE%BM#by!E>tdnc$8RwUJg1*x($6$}ae$e9Knj8gvVZe#bLi!<+&BkFj zg@nOpDneyc+hU9P-;jmOSMN|*H#>^Ez#?;%C3hg_65leSUm;iz)UkW)jX#p)e&S&M z1|a?wDzV5NVnlhRBCd_;F87wp>6c<&nkgvC+!@KGiIqWY4l}=&1w7|r6{oBN8xyzh zG$b#2=RJp_iq6)#t5%yLkKx(0@D=C3w+oiXtSuaQ%I1WIb-eiE$d~!)b@|4XLy!CZ z9p=t=%3ad@Ep+<9003D2KZ5VyP~_n$=;~r&YUg5UZ0KVD&tR1DHy9x)qWtKJp#Kq# zP*8p#W(8JJ_*h_3W}FlvRam?<4Z+-H77^$Lvi+#vmhL9J zJ<1SV45xi;SrO2f=-OB(7#iNA5)x1uNC-yNxUw|!00vcW2PufRm>e~toH;M0Q85MQLWd?3O{i8H+5VkR@l9Dg-ma ze2fZ%>G(u5(k9EHj2L6!;(KZ8%8|*-1V|B#EagbF(rc+5iL_5;Eu)L4Z-V;0HfK4d z*{utLse_rvHZeQ>V5H=f78M3Ntg1BPxFCVD{HbNA6?9*^YIq;B-DJd{Ca2L#)qWP? zvX^NhFmX?CTWw&Ns}lgs;r3i+Bq@y}Ul+U%pzOS0Fcv9~aB(0!>GT0)NO?p=25LjN z2bh>6RhgqD7bQj#k-KOm@JLgMa6>%-ok1WpOe)FS^XOU{c?d5shG(lIn3GiVBxmg`u%-j=)^v&pX1JecJics3&jvPI)mDut52? z3jEA)DM%}BYbxxKrizVYwq?(P&19EXlwD9^-6J+4!}9{ywR9Gk42jjAURAF&EO|~N z)?s>$Da@ikI4|^z0e{r`J8zIs>SpM~Vn^{3fArRu;?+43>lD+^XtUcY1HidJwnR6+ z!;oG2=B6Z_=M%*{z-RaHc(n|1RTKQdNjjV!Pn9lFt^4w|AeN06*j}ZyhqZ^!-=cyGP_ShV1rGxkx8t zB;8`h!S{LD%ot``700d0@Grql(DTt4Awgmi+Yr0@#jbe=2#UkK%rv=OLqF)9D7D1j z!~McAwMYkeaL$~kI~90)5vBhBzWYc3Cj1WI0RS`z000R8-@ET0dA~*r(gSiCJmQMN&4%1D zyVNf0?}sBH8zNbBLn>~(W{d3%@kL_eQ6jEcR{l>C|JK z(R-fA!z|TTRG40|zv}7E@PqCAXP3n`;%|SCQ|ZS%ym$I{`}t3KPL&^l5`3>yah4*6 zifO#{VNz3)?ZL$be;NEaAk9b#{tV?V7 zP|wf5YA*1;s<)9A4~l3BHzG&HH`1xNr#%){4xZ!jq%o=7nN*wMuXlFV{HaiQLJ`5G zBhDi#D(m`Q1pLh@Tq+L;OwuC52RdW7b8}~60WCOK5iYMUad9}7aWBuILb({5=z~YF zt?*Jr5NG+WadM{mDL>GyiByCuR)hd zA=HM?J6l1Xv0Dl+LW@w$OTcEoOda^nFCw*Sy^I@$sSuneMl{4ys)|RY#9&NxW4S)9 zq|%83IpslTLoz~&vTo!Ga@?rj_kw{|k{nv+w&Ku?fyk4Ki4I?);M|5Axm)t+BaE)D zm(`AQ#k^DWrjbuXoJf2{Aj^KT zFb1zMSqxq|vceV+Mf-)$oPflsO$@*A0n0Z!R{&(xh8s}=;t(lIy zv$S8x>m;vQNHuRzoaOo?eiWFe{0;$s`Bc+Osz~}Van${u;g(su`3lJ^TEfo~nERfP z)?aFzpDgnLYiERsKPu|0tq4l2wT)Atr6Qb%m-AUn6HnCue*yWICp7TjW$@sO zm5rm4aTcPQ(rfi7a`xP7cKCFrJD}*&_~xgLyr^-bmsL}y;A5P|al8J3WUoBSjqu%v zxC;mK!g(7r6RRJ852Z~feoC&sD3(6}^5-uLK8o)9{8L_%%rItZK9C){UxB|;G>JbP zsRRtS4-3B*5c+K2kvmgZK8472%l>3cntWUOVHxB|{Ay~aOg5RN;{PJgeVD*H%ac+y!h#wi%o2bF2Ca8IyMyH{>4#{E_8u^@+l-+n=V}Sq?$O z{091@v%Bd*3pk0^2UtiF9Z+(a@wy6 zUdw8J*ze$K#=$48IBi1U%;hmhO>lu!uU;+RS}p&6@rQila7WftH->*A4=5W|Fmtze z)7E}jh@cbmr9iup^i%*(uF%LG&!+Fyl@LFA-}Ca#bxRfDJAiR2dt6644TaYw1Ma79 zt8&DYj31j^5WPNf5P&{)J?WlCe@<3u^78wnd(Ja4^a>{^Tw}W>|Cjt^If|7l^l)^Q zbz|7~CF(k_9~n|h;ysZ+jHzkXf(*O*@5m zLzUmbHp=x!Q|!9NVXyipZ3)^GuIG$k;D)EK!a5=8MFLI_lpf`HPKl=-Ww%z8H_0$j ztJ||IfFG1lE9nmQ0+jPQy zCBdKkjArH@K7jVcMNz);Q(Q^R{d5G?-kk;Uu_IXSyWB)~KGIizZL(^&qF;|1PI7!E zTP`%l)gpX|OFn&)M%txpQ2F!hdA~hX1Cm5)IrdljqzRg!f{mN%G~H1&oqe`5eJCIF zHdD7O;AX-{XEV(a`gBFJ9ews#CVS2y!&>Cm_dm3C8*n3MA*e67(WC?uP@8TXuMroq z{#w$%z@CBIkRM7?}Xib+>hRjy?%G!fiw8! z8(gB+8J~KOU}yO7UGm&1g_MDJ$IXS!`+*b*QW2x)9>K~Y*E&bYMnjl6h!{17_8d!%&9D`a7r&LKZjC<&XOvTRaKJ1 zUY@hl5^R&kZl3lU3njk`3dPzxj$2foOL26r(9zsVF3n_F#v)s5vv3@dgs|lP#eylq62{<-vczqP!RpVBTgI>@O6&sU>W|do17+#OzQ7o5A$ICH z?GqwqnK^n2%LR;$^oZM;)+>$X3s2n}2jZ7CdWIW0lnGK-b#EG01)P@aU`pg}th&J-TrU`tIpb5t((0eu|!u zQz+3ZiOQ^?RxxK4;zs=l8q!-n7X{@jSwK(iqNFiRColuEOg}!7cyZi`iBX4g1pNBj zAPzL?P^Ljhn;1$r8?bc=#n|Ed7wB&oHcw()&*k#SS#h}jO?ZB246EGItsz*;^&tzp zu^YJ0=lwsi`eP_pU8}6JA7MS;9pfD;DsSsLo~ogzMNP70@@;Fm8f0^;>$Z>~}GWRw!W5J3tNX*^2+1f3hz{~rIzJo z6W%J(H!g-eI_J1>0juX$X4Cl6i+3wbc~k146UIX&G22}WE>0ga#WLsn9tY(&29zBvH1$`iWtTe zG2jYl@P!P)eb<5DsR72BdI7-zP&cZNI{7q3e@?N8IKc4DE#UVr->|-ryuJXk^u^>4 z$3wE~=q390;XuOQP~TNoDR?#|NSPJ%sTMInA6*rJ%go|=YjGe!B>z6u$IhgQSwoV* zjy3F2#I>uK{42{&IqP59)Y(1*Z>>#W8rCf4_eVsH)`v!P#^;BgzKDR`ARGEZzkNX+ zJUQu=*-ol=Xqqt5=`=pA@BIn@6a9G8C{c&`i^(i+BxQO9?YZ3iu%$$da&Kb?2kCCo zo7t$UpSFWqmydXf@l3bVJ=%K?SSw)|?srhJ-1ZdFu*5QhL$~-IQS!K1s@XzAtv6*Y zl8@(5BlWYLt1yAWy?rMD&bwze8bC3-GfNH=p zynNFCdxyX?K&G(ZZ)afguQ2|r;XoV^=^(;Cku#qYn4Lus`UeKt6rAlFo_rU`|Rq z&G?~iWMBio<78of-2X(ZYHx~=U0Vz4btyXkctMKdc9UM!vYr~B-(>)(Hc|D zMzkN4!PBg%tZoh+=Gba!0++d193gbMk2&krfDgcbx0jI92cq?FFESVg0D$>F+bil} zY~$)|>1HZsX=5sAZ2WgPB5P=8X#TI+NQ(M~GqyVB53c6IdX=k>Wu@A0Svf5#?uHaF zsYn|koIi3$(%GZ2+G+7Fv^lHTb#5b8sAHSTnL^qWZLM<(1|9|QFw9pnRU{svj}_Al zL)b9>fN{QiA($8peNEJyy`(a{&uh-T4_kdZFIVsKKVM(?05}76EEz?#W za^fiZOAd14IJ4zLX-n7Lq0qlQ^lW8Cvz4UKkV9~P}>sq0?xD3vg+$4vLm~C(+ zM{-3Z#qnZ09bJ>}j?6ry^h+@PfaD7*jZxBEY4)UG&daWb??6)TP+|3#Z&?GL?1i+280CFsE|vIXQbm| zM}Pk!U`U5NsNbyKzkrul-DzwB{X?n3E6?TUHr{M&+R*2%yOiXdW-_2Yd6?38M9Vy^ z*lE%gA{wwoSR~vN0=no}tP2Ul5Gk5M(Xq`$nw#ndFk`tcpd5A=Idue`XZ!FS>Q zG^0w#>P4pPG+*NC9gLP4x2m=cKP}YuS!l^?sHSFftZy{4CoQrb_ z^20(NnG`wAhMI=eq)SsIE~&Gp9Ne0nD4%Xiu|0Fj1UFk?6avDqjdXz{O1nKao*46y zT8~iA%Exu=G#{x=KD;_C&M+Zx4+n`sHT>^>=-1YM;H<72k>$py1?F3#T1*ef9mLZw z5naLQr?n7K;2l+{_uIw*_1nsTn~I|kkCgrn;|G~##hM;9l7Jy$yJfmk+&}W@JeKcF zx@@Woiz8qdi|D%aH3XTx5*wDlbs?dC1_nrFpm^QbG@wM=i2?Zg;$VK!c^Dp8<}BTI zyRhAq@#%2pGV49*Y5_mV4+OICP|%I(dQ7x=6Ob}>EjnB_-_18*xrY?b%-yEDT(wrO z9RY2QT0`_OpGfMObKHV;QLVnrK%mc?$WAdIT`kJQT^n%GuzE7|9@k3ci5fYOh(287 zuIbg!GB3xLg$YN=n)^pHGB0jH+_iIiC=nUcD;G6LuJsjn2VI1cyZx=a?ShCsF==QK z;q~*m&}L<-cb+mDDXzvvrRsybcgQ;Vg21P(uLv5I+eGc7o7tc6`;OA9{soHFOz zT~2?>Ts}gprIX$wRBb4yE>ot<8+*Bv`qbSDv*VtRi|cyWS>)Fjs>fkNOH-+PX&4(~ z&)T8Zam2L6puQl?;5zg9h<}k4#|yH9czHw;1jw-pwBM*O2hUR6yvHATrI%^mvs9q_ z&ccT0>f#eDG<^WG^q@oVqlJrhxH)dcq2cty@l3~|5#UDdExyXUmLQ}f4#;6fI{f^t zDCsgIJ~0`af%YR%Ma5VQq-p21k`vaBu6WE?66+5=XUd%Ay%D$irN>5LhluRWt7 zov-=f>QbMk*G##&DTQyou$s7UqjjW@k6=!I@!k+S{pP8R(2=e@io;N8E`EOB;OGoI zw6Q+{X1_I{OO0HPpBz!X!@`5YQ2)t{+!?M_iH25X(d~-Zx~cXnS9z>u?+If|iNJbx zyFU2d1!ITX64D|lE0Z{dLRqL1Ajj=CCMfC4lD3&mYR_R_VZ>_7_~|<^o*%_&jevU+ zQ4|qzci=0}Jydw|LXLCrOl1_P6Xf@c0$ieK2^7@A9UbF{@V_0p%lqW|L?5k>bVM8|p5v&2g;~r>B8uo<4N+`B zH{J)h;SYiIVx@#jI&p-v3dwL5QNV1oxPr8J%ooezTnLW>i*3Isb49%5i!&ac_dEXv zvXmVUck^QHmyrF8>CGXijC_R-y(Qr{3Zt~EmW)-nC!tiH`wlw5D*W7Pip;T?&j%kX z6DkZX4&}iw>hE(boLyjOoupf6JpvBG8}jIh!!VhnD0>}KSMMo{1#uU6kiFcA04~|7 zVO8eI&x1`g4CZ<2cYUI(n#wz2MtVFHx47yE5eL~8bot~>EHbevSt}LLMQX?odD{Ux zJMnam{d)W4da{l7&y-JrgiU~qY3$~}_F#G7|MxT)e;G{U`In&?`j<5D->}cb{}{T(4DF0BOk-=1195KB-E*o@c?`>y#4=dMtYtSY=&L{!TAjFVcq0y@AH`vH! z$41+u!Ld&}F^COPgL(EE{0X7LY&%D7-(?!kjFF7=qw<;`V{nwWBq<)1QiGJgUc^Vz ztMUlq1bZqKn17|6x6iAHbWc~l1HcmAxr%$Puv!znW)!JiukwIrqQ00|H$Z)OmGG@= zv%A8*4cq}(?qn4rN6o`$Y))(MyXr8R<2S^J+v(wmFmtac!%VOfN?&(8Nr!T@kV`N; z*Q33V3t`^rN&aBiHet)18wy{*wi1=W!B%B-Q6}SCrUl$~Hl{@!95ydml@FK8P=u4s z4e*7gV2s=YxEvskw2Ju!2%{8h01rx-3`NCPc(O zH&J0VH5etNB2KY6k4R@2Wvl^Ck$MoR3=)|SEclT2ccJ!RI9Nuter7u9@;sWf-%um;GfI!=eEIQ2l2p_YWUd{|6EG ze{yO6;lMc>;2tPrsNdi@&1K6(1;|$xe8vLgiouj%QD%gYk`4p{Ktv9|j+!OF-P?@p z;}SV|oIK)iwlBs+`ROXkhd&NK zzo__r!B>tOXpBJMDcv!Mq54P+n4(@dijL^EpO1wdg~q+!DT3lB<>9AANSe!T1XgC=J^)IP0XEZ()_vpu!!3HQyJhwh?r`Ae%Yr~b% zO*NY9t9#qWa@GCPYOF9aron7thfWT`eujS4`t2uG6)~JRTI;f(ZuoRQwjZjp5Pg34 z)rp$)Kr?R+KdJ;IO;pM{$6|2y=k_siqvp%)2||cHTe|b5Ht8&A{wazGNca zX$Ol?H)E_R@SDi~4{d-|8nGFhZPW;Cts1;08TwUvLLv&_2$O6Vt=M)X;g%HUr$&06 zISZb(6)Q3%?;3r~*3~USIg=HcJhFtHhIV(siOwV&QkQe#J%H9&E21!C*d@ln3E@J* zVqRO^<)V^ky-R|%{(9`l-(JXq9J)1r$`uQ8a}$vr9E^nNiI*thK8=&UZ0dsFN_eSl z(q~lnD?EymWLsNa3|1{CRPW60>DSkY9YQ;$4o3W7Ms&@&lv9eH!tk~N&dhqX&>K@} zi1g~GqglxkZ5pEFkllJ)Ta1I^c&Bt6#r(QLQ02yHTaJB~- zCcE=5tmi`UA>@P=1LBfBiqk)HB4t8D?02;9eXj~kVPwv?m{5&!&TFYhu>3=_ zsGmYZ^mo*-j69-42y&Jj0cBLLEulNRZ9vXE)8~mt9C#;tZs;=#M=1*hebkS;7(aGf zcs7zH(I8Eui9UU4L--))yy`&d&$In&VA2?DAEss4LAPCLd>-$i?lpXvn!gu^JJ$(DoUlc6wE98VLZ*z`QGQov5l4Fm_h?V-;mHLYDVOwKz7>e4+%AzeO>P6v}ndPW| zM>m#6Tnp7K?0mbK=>gV}=@k*0Mr_PVAgGMu$j+pWxzq4MAa&jpCDU&-5eH27Iz>m^ zax1?*HhG%pJ((tkR(V(O(L%7v7L%!_X->IjS3H5kuXQT2!ow(;%FDE>16&3r){!ex zhf==oJ!}YU89C9@mfDq!P3S4yx$aGB?rbtVH?sHpg?J5C->!_FHM%Hl3#D4eplxzQ zRA+<@LD%LKSkTk2NyWCg7u=$%F#;SIL44~S_OGR}JqX}X+=bc@swpiClB`Zbz|f!4 z7Ysah7OkR8liXfI`}IIwtEoL}(URrGe;IM8%{>b1SsqXh)~w}P>yiFRaE>}rEnNkT z!HXZUtxUp1NmFm)Dm@-{FI^aRQqpSkz}ZSyKR%Y}YHNzBk)ZIp} zMtS=aMvkgWKm9&oTcU0?S|L~CDqA+sHpOxwnswF-fEG)cXCzUR?ps@tZa$=O)=L+5 zf%m58cq8g_o}3?Bhh+c!w4(7AjxwQ3>WnVi<{{38g7yFboo>q|+7qs<$8CPXUFAN< zG&}BHbbyQ5n|qqSr?U~GY{@GJ{(Jny{bMaOG{|IkUj7tj^9pa9|FB_<+KHLxSxR;@ zHpS$4V)PP+tx}22fWx(Ku9y+}Ap;VZqD0AZW4gCDTPCG=zgJmF{|x;(rvdM|2|9a}cex6xrMkERnkE;}jvU-kmzd%_J50$M`lIPCKf+^*zL=@LW`1SaEc%=m zQ+lT06Gw+wVwvQ9fZ~#qd430v2HndFsBa9WjD0P}K(rZYdAt^5WQIvb%D^Q|pkVE^ zte$&#~zmULFACGfS#g=2OLOnIf2Of-k!(BIHjs77nr!5Q1*I9 z1%?=~#Oss!rV~?-6Gm~BWJiA4mJ5TY&iPm_$)H1_rTltuU1F3I(qTQ^U$S>%$l z)Wx1}R?ij0idp@8w-p!Oz{&*W;v*IA;JFHA9%nUvVDy7Q8woheC#|8QuDZb-L_5@R zOqHwrh|mVL9b=+$nJxM`3eE{O$sCt$UK^2@L$R(r^-_+z?lOo+me-VW=Zw z-Bn>$4ovfWd%SPY`ab-u9{INc*k2h+yH%toDHIyqQ zO68=u`N}RIIs7lsn1D){)~%>ByF<>i@qFb<-axvu(Z+6t7v<^z&gm9McRB~BIaDn$ z#xSGT!rzgad8o>~kyj#h1?7g96tOcCJniQ+*#=b7wPio>|6a1Z?_(TS{)KrPe}(8j z!#&A=k(&Pj^F;r)CI=Z{LVu>uj!_W1q4b`N1}E(i%;BWjbEcnD=mv$FL$l?zS6bW!{$7j1GR5ocn94P2u{ z70tAAcpqtQo<@cXw~@i-@6B23;317|l~S>CB?hR5qJ%J3EFgyBdJd^fHZu7AzHF(BQ!tyAz^L0`X z23S4Fe{2X$W0$zu9gm%rg~A>ijaE#GlYlrF9$ds^QtaszE#4M(OLVP2O-;XdT(XIC zatwzF*)1c+t~c{L=fMG8Z=k5lv>U0;C{caN1NItnuSMp)6G3mbahu>E#sj&oy94KC zpH}8oEw{G@N3pvHhp{^-YaZeH;K+T_1AUv;IKD<=mv^&Ueegrb!yf`4VlRl$M?wsl zZyFol(2|_QM`e_2lYSABpKR{{NlxlDSYQNkS;J66aT#MSiTx~;tUmvs-b*CrR4w=f z8+0;*th6kfZ3|5!Icx3RV11sp=?`0Jy3Fs0N4GZQMN=8HmT6%x9@{Dza)k}UwL6JT zHRDh;%!XwXr6yuuy`4;Xsn0zlR$k%r%9abS1;_v?`HX_hI|+EibVnlyE@3aL5vhQq zlIG?tN^w@0(v9M*&L+{_+RQZw=o|&BRPGB>e5=ys7H`nc8nx)|-g;s7mRc7hg{GJC zAe^vCIJhajmm7C6g! zL&!WAQ~5d_5)00?w_*|*H>3$loHrvFbitw#WvLB!JASO?#5Ig5$Ys10n>e4|3d;tS zELJ0|R4n3Az(Fl3-r^QiV_C;)lQ1_CW{5bKS15U|E9?ZgLec@%kXr84>5jV2a5v=w z?pB1GPdxD$IQL4)G||B_lI+A=08MUFFR4MxfGOu07vfIm+j=z9tp~5i_6jb`tR>qV z$#`=BQ*jpCjm$F0+F)L%xRlnS%#&gro6PiRfu^l!EVan|r3y}AHJQOORGx4~ z&<)3=K-tx518DZyp%|!EqpU!+X3Et7n2AaC5(AtrkW>_57i}$eqs$rupubg0a1+WO zGHZKLN2L0D;ab%{_S1Plm|hx8R?O14*w*f&2&bB050n!R2by zw!@XOQx$SqZ5I<(Qu$V6g>o#A!JVwErWv#(Pjx=KeS0@hxr4?13zj#oWwPS(7Ro|v z>Mp@Kmxo79q|}!5qtX2-O@U&&@6s~!I&)1WQIl?lTnh6UdKT_1R640S4~f=_xoN3- zI+O)$R@RjV$F=>Ti7BlnG1-cFKCC(t|Qjm{SalS~V-tX#+2ekRhwmN zZr`8{QF6y~Z!D|{=1*2D-JUa<(1Z=;!Ei!KiRNH?o{p5o3crFF=_pX9O-YyJchr$~ zRC`+G+8kx~fD2k*ZIiiIGR<8r&M@3H?%JVOfE>)})7ScOd&?OjgAGT@WVNSCZ8N(p zuQG~76GE3%(%h1*vUXg$vH{ua0b`sQ4f0*y=u~lgyb^!#CcPJa2mkSEHGLsnO^kb$ zru5_l#nu=Y{rSMWiYx?nO{8I!gH+?wEj~UM?IrG}E|bRIBUM>UlY<`T1EHpRr36vv zBi&dG8oxS|J$!zoaq{+JpJy+O^W(nt*|#g32bd&K^w-t>!Vu9N!k9eA8r!Xc{utY> zg9aZ(D2E0gL#W0MdjwES-7~Wa8iubPrd?8-$C4BP?*wok&O8+ykOx{P=Izx+G~hM8 z*9?BYz!T8~dzcZr#ux8kS7u7r@A#DogBH8km8Ry4slyie^n|GrTbO|cLhpqgMdsjX zJ_LdmM#I&4LqqsOUIXK8gW;V0B(7^$y#h3h>J0k^WJfAMeYek%Y-Dcb_+0zPJez!GM zAmJ1u;*rK=FNM0Nf}Y!!P9c4)HIkMnq^b;JFd!S3?_Qi2G#LIQ)TF|iHl~WKK6JmK zbv7rPE6VkYr_%_BT}CK8h=?%pk@3cz(UrZ{@h40%XgThP*-Oeo`T0eq9 zA8BnWZKzCy5e&&_GEsU4*;_k}(8l_&al5K-V*BFM=O~;MgRkYsOs%9eOY6s6AtE*<7GQAR2ulC3RAJrG_P1iQK5Z~&B z&f8X<>yJV6)oDGIlS$Y*D^Rj(cszTy5c81a5IwBr`BtnC6_e`ArI8CaTX_%rx7;cn zR-0?J_LFg*?(#n~G8cXut(1nVF0Oka$A$1FGcERU<^ggx;p@CZc?3UB41RY+wLS`LWFNSs~YP zuw1@DNN3lTd|jDL7gjBsd9}wIw}4xT2+8dBQzI00m<@?c2L%>}QLfK5%r!a-iII`p zX@`VEUH)uj^$;7jVUYdADQ2k*!1O3WdfgF?OMtUXNpQ1}QINamBTKDuv19^{$`8A1 zeq%q*O0mi@(%sZU>Xdb0Ru96CFqk9-L3pzLVsMQ`Xpa~N6CR{9Rm2)A|CI21L(%GW zh&)Y$BNHa=FD+=mBw3{qTgw)j0b!Eahs!rZnpu)z!!E$*eXE~##yaXz`KE5(nQM`s zD!$vW9XH)iMxu9R>r$VlLk9oIR%HxpUiW=BK@4U)|1WNQ=mz9a z^!KkO=>GaJ!GBXm{KJj^;kh-MkUlEQ%lza`-G&}C5y1>La1sR6hT=d*NeCnuK%_LV zOXt$}iP6(YJKc9j-Fxq~*ItVUqljQ8?oaysB-EYtFQp9oxZ|5m0^Hq(qV!S+hq#g( z?|i*H2MIr^Kxgz+3vIljQ*Feejy6S4v~jKEPTF~Qhq!(ms5>NGtRgO5vfPPc4Z^AM zTj!`5xEreIN)vaNxa|q6qWdg>+T`Ol0Uz)ckXBXEGvPNEL3R8hB3=C5`@=SYgAju1 z!)UBr{2~=~xa{b8>x2@C7weRAEuatC)3pkRhT#pMPTpSbA|tan%U7NGMvzmF?c!V8 z=pEWxbdXbTAGtWTyI?Fml%lEr-^AE}w#l(<7OIw;ctw}imYax&vR4UYNJZK6P7ZOd zP87XfhnUHxCUHhM@b*NbTi#(-8|wcv%3BGNs#zRCVV(W?1Qj6^PPQa<{yaBwZ`+<`w|;rqUY_C z&AeyKwwf*q#OW-F()lir=T^<^wjK65Lif$puuU5+tk$;e_EJ;Lu+pH>=-8=PDhkBg z8cWt%@$Sc#C6F$Vd+0507;{OOyT7Hs%nKS88q-W!$f~9*WGBpHGgNp}=C*7!RiZ5s zn1L_DbKF@B8kwhDiLKRB@lsXVVLK|ph=w%_`#owlf@s@V(pa`GY$8h%;-#h@TsO|Y8V=n@*!Rog7<7Cid%apR|x zOjhHCyfbIt%+*PCveTEcuiDi%Wx;O;+K=W?OFUV%)%~6;gl?<0%)?snDDqIvkHF{ zyI02)+lI9ov42^hL>ZRrh*HhjF9B$A@=H94iaBESBF=eC_KT$8A@uB^6$~o?3Wm5t1OIaqF^~><2?4e3c&)@wKn9bD? zoeCs;H>b8DL^F&>Xw-xjZEUFFTv>JD^O#1E#)CMBaG4DX9bD(Wtc8Rzq}9soQ8`jf zeSnHOL}<+WVSKp4kkq&?SbETjq6yr@4%SAqOG=9E(3YeLG9dtV+8vmzq+6PFPk{L; z(&d++iu=^F%b+ea$i2UeTC{R*0Isk;vFK!no<;L+(`y`3&H-~VTdKROkdyowo1iqR zbVW(3`+(PQ2>TKY>N!jGmGo7oeoB8O|P_!Ic@ zZ^;3dnuXo;WJ?S+)%P>{Hcg!Jz#2SI(s&dY4QAy_vRlmOh)QHvs_7c&zkJCmJGVvV zX;Mtb>QE+xp`KyciG$Cn*0?AK%-a|=o!+7x&&yzHQOS>8=B*R=niSnta^Pxp1`=md z#;$pS$4WCT?mbiCYU?FcHGZ#)kHVJTTBt^%XE(Q};aaO=Zik0UgLcc0I(tUpt(>|& zcxB_|fxCF7>&~5eJ=Dpn&5Aj{A^cV^^}(7w#p;HG&Q)EaN~~EqrE1qKrMAc&WXIE;>@<&)5;gD2?={Xf@Mvn@OJKw=8Mgn z!JUFMwD+s==JpjhroT&d{$kQAy%+d`a*XxDEVxy3`NHzmITrE`o!;5ClXNPb4t*8P zzAivdr{j_v!=9!^?T3y?gzmqDWX6mkzhIzJ-3S{T5bcCFMr&RPDryMcdwbBuZbsgN zGrp@^i?rcfN7v0NKGzDPGE#4yszxu=I_`MI%Z|10nFjU-UjQXXA?k8Pk|OE<(?ae) zE%vG#eZAlj*E7_3dx#Zz4kMLj>H^;}33UAankJiDy5ZvEhrjr`!9eMD8COp}U*hP+ zF}KIYx@pkccIgyxFm#LNw~G&`;o&5)2`5aogs`1~7cMZQ7zj!%L4E`2yzlQN6REX20&O<9 zKV6fyr)TScJPPzNTC2gL+0x#=u>(({{D7j)c-%tvqls3#Y?Z1m zV5WUE)zdJ{$p>yX;^P!UcXP?UD~YM;IRa#Rs5~l+*$&nO(;Ers`G=0D!twR(0GF@c zHl9E5DQI}Oz74n zfKP>&$q0($T4y$6w(p=ERAFh+>n%iaeRA%!T%<^+pg?M)@ucY<&59$x9M#n+V&>}=nO9wCV{O~lg&v#+jcUj(tQ z`0u1YH)-`U$15a{pBkGyPL0THv1P|4e@pf@3IBZS4dVJPo#H>pWq%Lr0YS-SeWash z8R7=jb28KPMI|_lo#GEO|5B?N_e``H*23{~a!AmUJ+fb4HX-%QI@lSEUxKlGV7z7Q zSKw@-TR>@1RL%w{x}dW#k1NgW+q4yt2Xf1J62Bx*O^WG8OJ|FqI4&@d3_o8Id@*)4 zYrk=>@!wv~mh7YWv*bZhxqSmFh2Xq)o=m;%n$I?GSz49l1$xRpPu_^N(vZ>*>Z<04 z2+rP70oM=NDysd!@fQdM2OcyT?3T^Eb@lIC-UG=Bw{BjQ&P`KCv$AcJ;?`vdZ4){d z&gkoUK{$!$$K`3*O-jyM1~p-7T*qb)Ys>Myt^;#1&a%O@x8A+E>! zY8=eD`ZG)LVagDLBeHg>=atOG?Kr%h4B%E6m@J^C+U|y)XX@f z8oyJDW|9g=<#f<{JRr{y#~euMnv)`7j=%cHWLc}ngjq~7k**6%4u>Px&W%4D94(r* z+akunK}O0DC2A%Xo9jyF;DobX?!1I(7%}@7F>i%&nk*LMO)bMGg2N+1iqtg+r(70q zF5{Msgsm5GS7DT`kBsjMvOrkx&|EU!{{~gL4d2MWrAT=KBQ-^zQCUq{5PD1orxlIL zq;CvlWx#f1NWvh`hg011I%?T_s!e38l*lWVt|~z-PO4~~1g)SrJ|>*tXh=QfXT)%( z+ex+inPvD&O4Ur;JGz>$sUOnWdpSLcm1X%aQDw4{dB!cnj`^muI$CJ2%p&-kULVCE z>$eMR36kN$wCPR+OFDM3-U(VOrp9k3)lI&YVFqd;Kpz~K)@Fa&FRw}L(SoD z9B4a+hQzZT-BnVltst&=kq6Y(f^S4hIGNKYBgMxGJ^;2yrO}P3;r)(-I-CZ)26Y6? z&rzHI_1GCvGkgy-t1E;r^3Le30|%$ebDRu2+gdLG)r=A~Qz`}~&L@aGJ{}vVs_GE* zVUjFnzHiXfKQbpv&bR&}l2bzIjAooB)=-XNcYmrGmBh(&iu@o!^hn0^#}m2yZZUK8 zufVm7Gq0y`Mj;9b>`c?&PZkU0j4>IL=UL&-Lp3j&47B5pAW4JceG{!XCA)kT<%2nqCxj<)uy6XR_uws~>_MEKPOpAQ!H zkn>FKh)<9DwwS*|Y(q?$^N!6(51O0 z^JM~Ax{AI1Oj$fs-S5d4T7Z_i1?{%0SsIuQ&r8#(JA=2iLcTN+?>wOL532%&dMYkT z*T5xepC+V6zxhS@vNbMoi|i)=rpli@R9~P!39tWbSSb904ekv7D#quKbgFEMTb48P zuq(VJ+&L8aWU(_FCD$3^uD!YM%O^K(dvy~Wm2hUuh6bD|#(I39Xt>N1Y{ZqXL`Fg6 zKQ?T2htHN!(Bx;tV2bfTtIj7e)liN-29s1kew>v(D^@)#v;}C4-G=7x#;-dM4yRWm zyY`cS21ulzMK{PoaQ6xChEZ}o_#}X-o}<&0)$1#3we?+QeLt;aVCjeA)hn!}UaKt< zat1fHEx13y-rXNMvpUUmCVzocPmN~-Y4(YJvQ#db)4|%B!rBsgAe+*yor~}FrNH08 z3V!97S}D7d$zbSD{$z;@IYMxM6aHdypIuS*pr_U6;#Y!_?0i|&yU*@16l z*dcMqDQgfNBf}?quiu4e>H)yTVfsp#f+Du0@=Kc41QockXkCkvu>FBd6Q+@FL!(Yx z2`YuX#eMEiLEDhp+9uFqME_E^faV&~9qjBHJkIp~%$x^bN=N)K@kvSVEMdDuzA0sn z88CBG?`RX1@#hQNd`o^V{37)!w|nA)QfiYBE^m=yQKv-fQF+UCMcuEe1d4BH7$?>b zJl-r9@0^Ie=)guO1vOd=i$_4sz>y3x^R7n4ED!5oXL3@5**h(xr%Hv)_gILarO46q+MaDOF%ChaymKoI6JU5Pg;7#2n9-18|S1;AK+ zgsn6;k6-%!QD>D?cFy}8F;r@z8H9xN1jsOBw2vQONVqBVEbkiNUqgw~*!^##ht>w0 zUOykwH=$LwX2j&nLy=@{hr)2O&-wm-NyjW7n~Zs9UlH;P7iP3 zI}S(r0YFVYacnKH(+{*)Tbw)@;6>%=&Th=+Z6NHo_tR|JCI8TJiXv2N7ei7M^Q+RM z?9o`meH$5Yi;@9XaNR#jIK^&{N|DYNNbtdb)XW1Lv2k{E>;?F`#Pq|&_;gm~&~Zc9 zf+6ZE%{x4|{YdtE?a^gKyzr}dA>OxQv+pq|@IXL%WS0CiX!V zm$fCePA%lU{%pTKD7|5NJHeXg=I0jL@$tOF@K*MI$)f?om)D63K*M|r`gb9edD1~Y zc|w7N)Y%do7=0{RC|AziW7#am$)9jciRJ?IWl9PE{G3U+$%FcyKs_0Cgq`=K3@ttV z9g;M!3z~f_?P%y3-ph%vBMeS@p7P&Ea8M@97+%XEj*(1E6vHj==d zjsoviB>j^$_^OI_DEPvFkVo(BGRo%cJeD){6Uckei=~1}>sp299|IRjhXe)%?uP0I zF5+>?0#Ye}T^Y$u_rc4=lPcq4K^D(TZG-w30-YiEM=dcK+4#o*>lJ8&JLi+3UcpZk z!^?95S^C0ja^jwP`|{<+3cBVog$(mRdQmadS+Vh~z zS@|P}=|z3P6uS+&@QsMp0no9Od&27O&14zHXGAOEy zh~OKpymK5C%;LLb467@KgIiVwYbYd6wFxI{0-~MOGfTq$nBTB!{SrWmL9Hs}C&l&l#m?s*{tA?BHS4mVKHAVMqm63H<|c5n0~k)-kbg zXidai&9ZUy0~WFYYKT;oe~rytRk?)r8bptITsWj(@HLI;@=v5|XUnSls7$uaxFRL+ zRVMGuL3w}NbV1`^=Pw*0?>bm8+xfeY(1PikW*PB>>Tq(FR`91N0c2&>lL2sZo5=VD zQY{>7dh_TX98L2)n{2OV=T10~*YzX27i2Q7W86M4$?gZIXZaBq#sA*{PH8){|GUi;oM>e?ua7eF4WFuFYZSG| zze?srg|5Ti8Og{O zeFxuw9!U+zhyk?@w zjsA6(oKD=Ka;A>Ca)oPORxK+kxH#O@zhC!!XS4@=swnuMk>t+JmLmFiE^1aX3f<)D@`%K0FGK^gg1a1j>zi z2KhV>sjU7AX3F$SEqrXSC}fRx64GDoc%!u2Yag68Lw@w9v;xOONf@o)Lc|Uh3<21ctTYu-mFZuHk*+R{GjXHIGq3p)tFtQp%TYqD=j1&y)>@zxoxUJ!G@ zgI0XKmP6MNzw>nRxK$-Gbzs}dyfFzt>#5;f6oR27ql!%+{tr+(`(>%51|k`ML} zY4eE)Lxq|JMas(;JibNQds1bUB&r}ydMQXBY4x(^&fY_&LlQC)3hylc$~8&~|06-D z#T+%66rYbHX%^KuqJED_wuGB+=h`nWA!>1n0)3wZrBG3%`b^Ozv6__dNa@%V14|!D zQ?o$z5u0^8`giv%qE!BzZ!3j;BlDlJDk)h@9{nSQeEk!z9RGW) z${RSF3phEM*ce*>Xdp}585vj$|40=&S{S-GTiE?Op*vY&Lvr9}BO$XWy80IF+6@%n z5*2ueT_g@ofP#u5pxb7n*fv^Xtt7&?SRc{*2Ka-*!BuOpf}neHGCiHy$@Ka1^Dint z;DkmIL$-e)rj4o2WQV%Gy;Xg(_Bh#qeOsTM2f@KEe~4kJ8kNLQ+;(!j^bgJMcNhvklP5Z6I+9Fq@c&D~8Fb-4rmDT!MB5QC{Dsb;BharP*O;SF4& zc$wj-7Oep7#$WZN!1nznc@Vb<_Dn%ga-O#J(l=OGB`dy=Sy&$(5-n3zzu%d7E#^8`T@}V+5B;PP8J14#4cCPw-SQTdGa2gWL0*zKM z#DfSXs_iWOMt)0*+Y>Lkd=LlyoHjublNLefhKBv@JoC>P7N1_#> zv=mLWe96%EY;!ZGSQDbZWb#;tzqAGgx~uk+-$+2_8U`!ypbwXl z^2E-FkM1?lY@yt8=J3%QK+xaZ6ok=-y%=KXCD^0r!5vUneW>95PzCkOPO*t}p$;-> ze5j-BLT_;)cZQzR2CEsm@rU7GZfFtdp*a|g4wDr%8?2QkIGasRfDWT-Dvy*U{?IHT z*}wGnzdlSptl#ZF^sf)KT|BJs&kLG91^A6ls{CzFprZ6-Y!V0Xysh%9p%iMd7HLsS zN+^Un$tDV)T@i!v?3o0Fsx2qI(AX_$dDkBzQ@fRM%n zRXk6hb9Py#JXUs+7)w@eo;g%QQ95Yq!K_d=z{0dGS+pToEI6=Bo8+{k$7&Z zo4>PH(`ce8E-Ps&uv`NQ;U$%t;w~|@E3WVOCi~R4oj5wP?%<*1C%}Jq%a^q~T7u>K zML5AKfQDv6>PuT`{SrKHRAF+^&edg6+5R_#H?Lz3iGoWo#PCEd0DS;)2U({{X#zU^ zw_xv{4x7|t!S)>44J;KfA|DC?;uQ($l+5Vp7oeqf7{GBF9356nx|&B~gs+@N^gSdd zvb*>&W)|u#F{Z_b`f#GVtQ`pYv3#||N{xj1NgB<#=Odt6{eB%#9RLt5v zIi|0u70`#ai}9fJjKv7dE!9ZrOIX!3{$z_K5FBd-Kp-&e4(J$LD-)NMTp^_pB`RT; zftVVlK2g@+1Ahv2$D){@Y#cL#dUj9*&%#6 zd2m9{1NYp>)6=oAvqdCn5#cx{AJ%S8skUgMglu2*IAtd+z1>B&`MuEAS(D(<6X#Lj z?f4CFx$)M&$=7*>9v1ER4b6!SIz-m0e{o0BfkySREchp?WdVPpQCh!q$t>?rL!&Jg zd#heM;&~A}VEm8Dvy&P|J*eAV&w!&Nx6HFV&B8jJFVTmgLaswn!cx$&%JbTsloz!3 zMEz1d`k==`Ueub_JAy_&`!ogbwx27^ZXgFNAbx=g_I~5nO^r)}&myw~+yY*cJl4$I znNJ32M&K=0(2Dj_>@39`3=FX!v3nZHno_@q^!y}%(yw0PqOo=);6Y@&ylVe>nMOZ~ zd>j#QQSBn3oaWd;qy$&5(5H$Ayi)0haAYO6TH>FR?rhqHmNOO+(})NB zLI@B@v0)eq!ug`>G<@htRlp3n!EpU|n+G+AvXFrWSUsLMBfL*ZB`CRsIVHNTR&b?K zxBgsN0BjfB>UVcJ|x%=-zb%OV7lmZc& zxiupadZVF7)6QuhoY;;FK2b*qL0J-Rn-8!X4ZY$-ZSUXV5DFd7`T41c(#lAeLMoeT z4%g655v@7AqT!i@)Edt5JMbN(=Q-6{=L4iG8RA%}w;&pKmtWvI4?G9pVRp|RTw`g0 zD5c12B&A2&P6Ng~8WM2eIW=wxd?r7A*N+&!Be7PX3s|7~z=APxm=A?5 zt>xB4WG|*Td@VX{Rs)PV0|yK`oI3^xn(4c_j&vgxk_Y3o(-`_5o`V zRTghg6%l@(qodXN;dB#+OKJEEvhfcnc#BeO2|E(5df-!fKDZ!%9!^BJ_4)9P+9Dq5 zK1=(v?KmIp34r?z{NEWnLB3Px{XYwy-akun4F7xTRr2^zeYW{gcK9)>aJDdU5;w5@ zak=<+-PLH-|04pelTb%ULpuuuJC7DgyT@D|p{!V!0v3KpDnRjANN12q6SUR3mb9<- z>2r~IApQGhstZ!3*?5V z8#)hJ0TdZg0M-BK#nGFP>$i=qk82DO z7h;Ft!D5E15OgW)&%lej*?^1~2=*Z5$2VX>V{x8SC+{i10BbtUk9@I#Vi&hX)q
Q!LwySI{Bnv%Sm)yh{^sSVJ8&h_D-BJ_YZe5eCaAWU9b$O2c z$T|{vWVRtOL!xC0DTc(Qbe`ItNtt5hr<)VijD0{U;T#bUEp381_y`%ZIav?kuYG{iyYdEBPW=*xNSc;Rlt6~F4M`5G+VtOjc z*0qGzCb@gME5udTjJA-9O<&TWd~}ysBd(eVT1-H82-doyH9RST)|+Pb{o*;$j9Tjs zhU!IlsPsj8=(x3bAKJTopW3^6AKROHR^7wZ185wJGVhA~hEc|LP;k7NEz-@4p5o}F z`AD6naG3(n=NF9HTH81=F+Q|JOz$7wm9I<+#BSmB@o_cLt2GkW9|?7mM;r!JZp89l zbo!Hp8=n!XH1{GwaDU+k)pGp`C|cXkCU5%vcH)+v@0eK>%7gWxmuMu9YLlChA|_D@ zi#5zovN_!a-0?~pUV-Rj*1P)KwdU-LguR>YM&*Nen+ln8Q$?WFCJg%DY%K}2!!1FE zDv-A%Cbwo^p(lzac&_TZ-l#9kq`mhLcY3h9ZTUVCM(Ad&=EriQY5{jJv<5K&g|*Lk zgV%ILnf1%8V2B0E&;Sp4sYbYOvvMebLwYwzkRQ#F8GpTQq#uv=J`uaSJ34OWITeSGo6+-8Xw znCk*n{kdDEi)Hi&u^)~cs@iyCkFWB2SWZU|Uc%^43ZIZQ-vWNExCCtDWjqHs;;tWf$v{}0{p0Rvxkq``)*>+Akq%|Na zA`@~-Vfe|+(AIlqru+7Ceh4nsVmO9p9jc8}HX^W&ViBDXT+uXbT#R#idPn&L>+#b6 zflC-4C5-X;kUnR~L>PSLh*gvL68}RBsu#2l`s_9KjUWRhiqF`j)`y`2`YU(>3bdBj z?>iyjEhe-~$^I5!nn%B6Wh+I`FvLNvauve~eX<+Ipl&04 zT}};W&1a3%W?dJ2=N#0t?e+aK+%t}5q%jSLvp3jZ%?&F}nOOWr>+{GFIa%wO_2`et z=JzoRR~}iKuuR+azPI8;Gf9)z3kyA4EIOSl!sRR$DlW}0>&?GbgPojmjmnln;cTqCt=ADbE zZ8GAnoM+S1(5$i8^O4t`ue;vO4i}z0wz-QEIVe5_u03;}-!G1NyY8;h^}y;tzY}i5 zqQr#Ur3Fy8sSa$Q0ys+f`!`+>9WbvU_I`Sj;$4{S>O3?#inLHCrtLy~!s#WXV=oVP zeE93*Nc`PBi4q@%Ao$x4lw9vLHM!6mn3-b_cebF|n-2vt-zYVF_&sDE--J-P;2WHo z+@n2areE0o$LjvjlV2X7ZU@j+`{*8zq`JR3gKF#EW|#+{nMyo-a>nFFTg&vhyT=b} zDa8+v0(Dgx0yRL@ZXOYIlVSZ0|MFizy0VPW8;AfA5|pe!#j zX}Py^8fl5SyS4g1WSKKtnyP+_PoOwMMwu`(i@Z)diJp~U54*-miOchy7Z35eL>^M z4p<-aIxH4VUZgS783@H%M7P9hX>t{|RU7$n4T(brCG#h9e9p! z+o`i;EGGq3&pF;~5V~eBD}lC)>if$w%Vf}AFxGqO88|ApfHf&Bvu+xdG)@vuF}Yvk z)o;~k-%+0K0g+L`Wala!$=ZV|z$e%>f0%XoLib%)!R^RoS+{!#X?h-6uu zF&&KxORdZU&EwQFITIRLo(7TA3W}y6X{?Y%y2j0It!ekU#<)$qghZtpcS>L3uh`Uj z7GY;6f$9qKynP#oS3$$a{p^{D+0oJQ71`1?OAn_m8)UGZmj3l*ZI)`V-a>MKGGFG< z&^jg#Ok%(hhm>hSrZ5;Qga4u(?^i>GiW_j9%_7M>j(^|Om$#{k+^*ULnEgzW_1gCICtAD^WpC`A z{9&DXkG#01Xo)U$OC(L5Y$DQ|Q4C6CjUKk1UkPj$nXH##J{c8e#K|&{mA*;b$r0E4 zUNo0jthwA(c&N1l=PEe8Rw_8cEl|-eya9z&H3#n`B$t#+aJ03RFMzrV@gowbe8v(c zIFM60^0&lCFO10NU4w@|61xiZ4CVXeaKjd;d?sv52XM*lS8XiVjgWpRB;&U_C0g+`6B5V&w|O6B*_q zsATxL!M}+$He)1eOWECce#eS@2n^xhlB4<_Nn?yCVEQWDs(r`|@2GqLe<#(|&P0U? z$7V5IgpWf09uIf_RazRwC?qEqRaHyL?iiS05UiGesJy%^>-C{{ypTBI&B0-iUYhk> zIk<5xpsuV@g|z(AZD+C-;A!fTG=df1=<%nxy(a(IS+U{ME4ZbDEBtcD_3V=icT6*_ z)>|J?>&6%nvHhZERBtjK+s4xnut*@>GAmA5m*OTp$!^CHTr}vM4n(X1Q*;{e-Rd2BCF-u@1ZGm z!S8hJ6L=Gl4T_SDa7Xx|-{4mxveJg=ctf`BJ*fy!yF6Dz&?w(Q_6B}WQVtNI!BVBC zKfX<>7vd6C96}XAQmF-Jd?1Q4eTfRB3q7hCh0f!(JkdWT5<{iAE#dKy*Jxq&3a1@~ z8C||Dn2mFNyrUV|<-)C^_y7@8c2Fz+2jrae9deBDu;U}tJ{^xAdxCD248(k;dCJ%o z`y3sADe>U%suxwwv~8A1+R$VB=Q?%U?4joI$um;aH+eCrBqpn- z%79D_7rb;R-;-9RTrwi9dPlg8&@tfWhhZ(Vx&1PQ+6(huX`;M9x~LrW~~#3{j0Bh2kDU$}@!fFQej4VGkJv?M4rU^x!RU zEwhu$!CA_iDjFjrJa`aocySDX16?~;+wgav;}Zut6Mg%C4>}8FL?8)Kgwc(Qlj{@#2Pt0?G`$h7P#M+qoXtlV@d}%c&OzO+QYKK`kyXaK{U(O^2DyIXCZlNQjt0^8~8JzNGrIxhj}}M z&~QZlbx%t;MJ(Vux;2tgNKGlAqphLq%pd}JG9uoVHUo?|hN{pLQ6Em%r*+7t^<);X zm~6=qChlNAVXNN*Sow->*4;}T;l;D1I-5T{Bif@4_}=>l`tK;qqDdt5zvisCKhMAH z#r}`)7VW?LZqfdmXQ%zo5bJ00{Xb9^YKrk0Nf|oIW*K@(=`o2Vndz}ZDyk{!u}PVx zzd--+_WC*U{~DH3{?GI64IB+@On&@9X>EUAo&L+G{L^dozaI4C3G#2wr~hseW@K&g zKWs{uHu-9Je!3;4pE>eBltKUXb^*hG8I&413)$J&{D4N%7PcloU6bn%jPxJyQL?g* z9g+YFFEDiE`8rW^laCNzQmi7CTnPfwyg3VDHRAl>h=In6jeaVOP@!-CP60j3+#vpL zEYmh_oP0{-gTe7Or`L6x)6w?77QVi~jD8lWN@3RHcm80iV%M1A!+Y6iHM)05iC64tb$X2lV_%Txk@0l^hZqi^%Z?#- zE;LE0uFx)R08_S-#(wC=dS&}vj6P4>5ZWjhthP=*Hht&TdLtKDR;rXEX4*z0h74FA zMCINqrh3Vq;s%3MC1YL`{WjIAPkVL#3rj^9Pj9Ss7>7duy!9H0vYF%>1jh)EPqvlr6h%R%CxDsk| z!BACz7E%j?bm=pH6Eaw{+suniuY7C9Ut~1cWfOX9KW9=H><&kQlinPV3h9R>3nJvK z4L9(DRM=x;R&d#a@oFY7mB|m8h4692U5eYfcw|QKwqRsshN(q^v$4$)HgPpAJDJ`I zkqjq(8Cd!K!+wCd=d@w%~e$=gdUgD&wj$LQ1r>-E=O@c ze+Z$x{>6(JA-fNVr)X;*)40Eym1TtUZI1Pwwx1hUi+G1Jlk~vCYeXMNYtr)1?qwyg zsX_e*$h?380O00ou?0R@7-Fc59o$UvyVs4cUbujHUA>sH!}L54>`e` zHUx#Q+Hn&Og#YVOuo*niy*GU3rH;%f``nk#NN5-xrZ34NeH$l`4@t);4(+0|Z#I>Y z)~Kzs#exIAaf--65L0UHT_SvV8O2WYeD>Mq^Y6L!Xu8%vnpofG@w!}R7M28?i1*T&zp3X4^OMCY6(Dg<-! zXmcGQrRgHXGYre7GfTJ)rhl|rs%abKT_Nt24_Q``XH{88NVPW+`x4ZdrMuO0iZ0g` z%p}y};~T5gbb9SeL8BSc`SO#ixC$@QhXxZ=B}L`tP}&k?1oSPS=4%{UOHe0<_XWln zwbl5cn(j-qK`)vGHY5B5C|QZd5)W7c@{bNVXqJ!!n$^ufc?N9C-BF2QK1(kv++h!>$QbAjq)_b$$PcJdV+F7hz0Hu@ zqj+}m0qn{t^tD3DfBb~0B36|Q`bs*xs|$i^G4uNUEBl4g;op-;Wl~iThgga?+dL7s zUP(8lMO?g{GcYpDS{NM!UA8Hco?#}eNEioRBHy4`mq!Pd-9@-97|k$hpEX>xoX+dY zDr$wfm^P&}Wu{!%?)U_(%Mn79$(ywvu*kJ9r4u|MyYLI_67U7%6Gd_vb##Nerf@>& z8W11z$$~xEZt$dPG}+*IZky+os5Ju2eRi;1=rUEeIn>t-AzC_IGM-IXWK3^6QNU+2pe=MBn4I*R@A%-iLDCOHTE-O^wo$sL_h{dcPl=^muAQb`_BRm};=cy{qSkui;`WSsj9%c^+bIDQ z0`_?KX0<-=o!t{u(Ln)v>%VGL z0pC=GB7*AQ?N7N{ut*a%MH-tdtNmNC+Yf$|KS)BW(gQJ*z$d{+{j?(e&hgTy^2|AR9vx1Xre2fagGv0YXWqtNkg*v%40v?BJBt|f9wX5 z{QTlCM}b-0{mV?IG>TW_BdviUKhtosrBqdfq&Frdz>cF~yK{P@(w{Vr7z2qKFwLhc zQuogKO@~YwyS9%+d-zD7mJG~@?EFJLSn!a&mhE5$_4xBl&6QHMzL?CdzEnC~C3$X@ zvY!{_GR06ep5;<#cKCSJ%srxX=+pn?ywDwtJ2{TV;0DKBO2t++B(tIO4)Wh`rD13P z4fE$#%zkd=UzOB74gi=-*CuID&Z3zI^-`4U^S?dHxK8fP*;fE|a(KYMgMUo`THIS1f!*6dOI2 zFjC3O=-AL`6=9pp;`CYPTdVX z8(*?V&%QoipuH0>WKlL8A*zTKckD!paN@~hh zmXzm~qZhMGVdQGd=AG8&20HW0RGV8X{$9LldFZYm zE?}`Q3i?xJRz43S?VFMmqRyvWaS#(~Lempg9nTM$EFDP(Gzx#$r)W&lpFKqcAoJh-AxEw$-bjW>`_+gEi z2w`99#UbFZGiQjS8kj~@PGqpsPX`T{YOj`CaEqTFag;$jY z8_{Wzz>HXx&G*Dx<5skhpETxIdhKH?DtY@b9l8$l?UkM#J-Snmts7bd7xayKTFJ(u zyAT&@6cAYcs{PBfpqZa%sxhJ5nSZBPji?Zlf&}#L?t)vC4X5VLp%~fz2Sx<*oN<7` z?ge=k<=X7r<~F7Tvp9#HB{!mA!QWBOf%EiSJ6KIF8QZNjg&x~-%e*tflL(ji_S^sO ztmib1rp09uon}RcsFi#k)oLs@$?vs(i>5k3YN%$T(5Or(TZ5JW9mA6mIMD08=749$ z!d+l*iu{Il7^Yu}H;lgw=En1sJpCKPSqTCHy4(f&NPelr31^*l%KHq^QE>z>Ks_bH zjbD?({~8Din7IvZeJ>8Ey=e;I?thpzD=zE5UHeO|neioJwG;IyLk?xOz(yO&0DTU~ z^#)xcs|s>Flgmp;SmYJ4g(|HMu3v7#;c*Aa8iF#UZo7CvDq4>8#qLJ|YdZ!AsH%^_7N1IQjCro

K7UpUK$>l@ zw`1S}(D?mUXu_C{wupRS-jiX~w=Uqqhf|Vb3Cm9L=T+w91Cu^ z*&Ty%sN?x*h~mJc4g~k{xD4ZmF%FXZNC;oVDwLZ_WvrnzY|{v8hc1nmx4^}Z;yriXsAf+Lp+OFLbR!&Ox?xABwl zu8w&|5pCxmu#$?Cv2_-Vghl2LZ6m7}VLEfR5o2Ou$x02uA-%QB2$c(c1rH3R9hesc zfpn#oqpbKuVsdfV#cv@5pV4^f_!WS+F>SV6N0JQ9E!T90EX((_{bSSFv9ld%I0&}9 zH&Jd4MEX1e0iqDtq~h?DBrxQX1iI0lIs<|kB$Yrh&cpeK0-^K%=FBsCBT46@h#yi!AyDq1V(#V}^;{{V*@T4WJ&U-NTq43w=|K>z8%pr_nC>%C(Wa_l78Ufib$r8Od)IIN=u>417 z`Hl{9A$mI5A(;+-Q&$F&h-@;NR>Z<2U;Y21>>Z;s@0V@SbkMQQj%_;~+qTuQ?c|AV zcWm3XZQHhP&R%QWarS%mJ!9R^&!_)*s(v+VR@I#QrAT}`17Y+l<`b-nvmDNW`De%y zrwTZ9EJrj1AFA>B`1jYDow}~*dfPs}IZMO3=a{Fy#IOILc8F0;JS4x(k-NSpbN@qM z`@aE_e}5{!$v3+qVs7u?sOV(y@1Os*Fgu`fCW9=G@F_#VQ%xf$hj0~wnnP0$hFI+@ zkQj~v#V>xn)u??YutKsX>pxKCl^p!C-o?+9;!Nug^ z{rP!|+KsP5%uF;ZCa5F;O^9TGac=M|=V z_H(PfkV1rz4jl?gJ(ArXMyWT4y(86d3`$iI4^l9`vLdZkzpznSd5Ikfrs8qcSy&>z zTIZgWZGXw0n9ibQxYWE@gI0(3#KA-dAdPcsL_|hg2@~C!VZDM}5;v_Nykfq!*@*Zf zE_wVgx82GMDryKO{U{D>vSzSc%B~|cjDQrt5BN=Ugpsf8H8f1lR4SGo#hCuXPL;QQ z#~b?C4MoepT3X`qdW2dNn& zo8)K}%Lpu>0tQei+{>*VGErz|qjbK#9 zvtd8rcHplw%YyQCKR{kyo6fgg!)6tHUYT(L>B7er5)41iG`j$qe*kSh$fY!PehLcD zWeKZHn<492B34*JUQh=CY1R~jT9Jt=k=jCU2=SL&&y5QI2uAG2?L8qd2U(^AW#{(x zThSy=C#>k+QMo^7caQcpU?Qn}j-`s?1vXuzG#j8(A+RUAY})F@=r&F(8nI&HspAy4 z4>(M>hI9c7?DCW8rw6|23?qQMSq?*Vx?v30U%luBo)B-k2mkL)Ljk5xUha3pK>EEj z@(;tH|M@xkuN?gsz;*bygizwYR!6=(Xgcg^>WlGtRYCozY<rFX2E>kaZo)O<^J7a`MX8Pf`gBd4vrtD|qKn&B)C&wp0O-x*@-|m*0egT=-t@%dD zgP2D+#WPptnc;_ugD6%zN}Z+X4=c61XNLb7L1gWd8;NHrBXwJ7s0ce#lWnnFUMTR& z1_R9Fin4!d17d4jpKcfh?MKRxxQk$@)*hradH2$3)nyXep5Z;B z?yX+-Bd=TqO2!11?MDtG0n(*T^!CIiF@ZQymqq1wPM_X$Iu9-P=^}v7npvvPBu!d$ z7K?@CsA8H38+zjA@{;{kG)#AHME>Ix<711_iQ@WWMObXyVO)a&^qE1GqpP47Q|_AG zP`(AD&r!V^MXQ^e+*n5~Lp9!B+#y3#f8J^5!iC@3Y@P`;FoUH{G*pj*q7MVV)29+j z>BC`a|1@U_v%%o9VH_HsSnM`jZ-&CDvbiqDg)tQEnV>b%Ptm)T|1?TrpIl)Y$LnG_ zzKi5j2Fx^K^PG1=*?GhK;$(UCF-tM~^=Z*+Wp{FSuy7iHt9#4n(sUuHK??@v+6*|10Csdnyg9hAsC5_OrSL;jVkLlf zHXIPukLqbhs~-*oa^gqgvtpgTk_7GypwH><53riYYL*M=Q@F-yEPLqQ&1Sc zZB%w}T~RO|#jFjMWcKMZccxm-SL)s_ig?OC?y_~gLFj{n8D$J_Kw%{r0oB8?@dWzn zB528d-wUBQzrrSSLq?fR!K%59Zv9J4yCQhhDGwhptpA5O5U?Hjqt>8nOD zi{)0CI|&Gu%zunGI*XFZh(ix)q${jT8wnnzbBMPYVJc4HX*9d^mz|21$=R$J$(y7V zo0dxdbX3N#=F$zjstTf*t8vL)2*{XH!+<2IJ1VVFa67|{?LP&P41h$2i2;?N~RA30LV`BsUcj zfO9#Pg1$t}7zpv#&)8`mis3~o+P(DxOMgz-V*(?wWaxi?R=NhtW}<#^Z?(BhSwyar zG|A#Q7wh4OfK<|DAcl9THc-W4*>J4nTevsD%dkj`U~wSUCh15?_N@uMdF^Kw+{agk zJ`im^wDqj`Ev)W3k3stasP`88-M0ZBs7;B6{-tSm3>I@_e-QfT?7|n0D~0RRqDb^G zyHb=is;IwuQ&ITzL4KsP@Z`b$d%B0Wuhioo1CWttW8yhsER1ZUZzA{F*K=wmi-sb#Ju+j z-l@In^IKnb{bQG}Ps>+Vu_W#grNKNGto+yjA)?>0?~X`4I3T@5G1)RqGUZuP^NJCq&^HykuYtMDD8qq+l8RcZNJsvN(10{ zQ1$XcGt}QH-U^WU!-wRR1d--{B$%vY{JLWIV%P4-KQuxxDeJaF#{eu&&r!3Qu{w}0f--8^H|KwE>)ORrcR+2Qf zb})DRcH>k0zWK8@{RX}NYvTF;E~phK{+F;MkIP$)T$93Ba2R2TvKc>`D??#mv9wg$ zd~|-`Qx5LwwsZ2hb*Rt4S9dsF%Cny5<1fscy~)d;0m2r$f=83<->c~!GNyb!U)PA; zq^!`@@)UaG)Ew(9V?5ZBq#c%dCWZrplmuM`o~TyHjAIMh0*#1{B>K4po-dx$Tk-Cq z=WZDkP5x2W&Os`N8KiYHRH#UY*n|nvd(U>yO=MFI-2BEp?x@=N<~CbLJBf6P)}vLS?xJXYJ2^<3KJUdrwKnJnTp{ zjIi|R=L7rn9b*D#Xxr4*R<3T5AuOS+#U8hNlfo&^9JO{VbH!v9^JbK=TCGR-5EWR@ zN8T-_I|&@A}(hKeL4_*eb!1G8p~&_Im8|wc>Cdir+gg90n1dw?QaXcx6Op_W1r=axRw>4;rM*UOpT#Eb9xU1IiWo@h?|5uP zka>-XW0Ikp@dIe;MN8B01a7+5V@h3WN{J=HJ*pe0uwQ3S&MyWFni47X32Q7SyCTNQ z+sR!_9IZa5!>f&V$`q!%H8ci!a|RMx5}5MA_kr+bhtQy{-^)(hCVa@I!^TV4RBi zAFa!Nsi3y37I5EK;0cqu|9MRj<^r&h1lF}u0KpKQD^5Y+LvFEwM zLU@@v4_Na#Axy6tn3P%sD^5P#<7F;sd$f4a7LBMk zGU^RZHBcxSA%kCx*eH&wgA?Qwazm8>9SCSz_!;MqY-QX<1@p$*T8lc?@`ikEqJ>#w zcG``^CoFMAhdEXT9qt47g0IZkaU)4R7wkGs^Ax}usqJ5HfDYAV$!=6?>J6+Ha1I<5 z|6=9soU4>E))tW$<#>F ziZ$6>KJf0bPfbx_)7-}tMINlc=}|H+$uX)mhC6-Hz+XZxsKd^b?RFB6et}O#+>Wmw9Ec9) z{q}XFWp{3@qmyK*Jvzpyqv57LIR;hPXKsrh{G?&dRjF%Zt5&m20Ll?OyfUYC3WRn{cgQ?^V~UAv+5 z&_m#&nIwffgX1*Z2#5^Kl4DbE#NrD&Hi4|7SPqZ}(>_+JMz=s|k77aEL}<=0Zfb)a z%F(*L3zCA<=xO)2U3B|pcTqDbBoFp>QyAEU(jMu8(jLA61-H!ucI804+B!$E^cQQa z)_ERrW3g!B9iLb3nn3dlkvD7KsY?sRvls3QC0qPi>o<)GHx%4Xb$5a3GBTJ(k@`e@ z$RUa^%S15^1oLEmA=sayrP5;9qtf!Z1*?e$ORVPsXpL{jL<6E)0sj&swP3}NPmR%FM?O>SQgN5XfHE< zo(4#Cv11(%Nnw_{_Ro}r6=gKd{k?NebJ~<~Kv0r(r0qe4n3LFx$5%x(BKvrz$m?LG zjLIc;hbj0FMdb9aH9Lpsof#yG$(0sG2%RL;d(n>;#jb!R_+dad+K;Ccw!|RY?uS(a zj~?=&M!4C(5LnlH6k%aYvz@7?xRa^2gml%vn&eKl$R_lJ+e|xsNfXzr#xuh(>`}9g zLHSyiFwK^-p!;p$yt7$F|3*IfO3Mlu9e>Dpx8O`37?fA`cj`C0B-m9uRhJjs^mRp# zWB;Aj6|G^1V6`jg7#7V9UFvnB4((nIwG?k%c7h`?0tS8J3Bn0t#pb#SA}N-|45$-j z$R>%7cc2ebAClXc(&0UtHX<>pd)akR3Kx_cK+n<}FhzmTx!8e9^u2e4%x{>T6pQ`6 zO182bh$-W5A3^wos0SV_TgPmF4WUP-+D25KjbC{y_6W_9I2_vNKwU(^qSdn&>^=*t z&uvp*@c8#2*paD!ZMCi3;K{Na;I4Q35zw$YrW5U@Kk~)&rw;G?d7Q&c9|x<Hg|CNMsxovmfth*|E*GHezPTWa^Hd^F4!B3sF;)? z(NaPyAhocu1jUe(!5Cy|dh|W2=!@fNmuNOzxi^tE_jAtzNJ0JR-avc_H|ve#KO}#S z#a(8secu|^Tx553d4r@3#6^MHbH)vmiBpn0X^29xEv!Vuh1n(Sr5I0V&`jA2;WS|Y zbf0e}X|)wA-Pf5gBZ>r4YX3Mav1kKY(ulAJ0Q*jB)YhviHK)w!TJsi3^dMa$L@^{` z_De`fF4;M87vM3Ph9SzCoCi$#Fsd38u!^0#*sPful^p5oI(xGU?yeYjn;Hq1!wzFk zG&2w}W3`AX4bxoVm03y>ts{KaDf!}b&7$(P4KAMP=vK5?1In^-YYNtx1f#}+2QK@h zeSeAI@E6Z8a?)>sZ`fbq9_snl6LCu6g>o)rO;ijp3|$vig+4t} zylEo7$SEW<_U+qgVcaVhk+4k+C9THI5V10qV*dOV6pPtAI$)QN{!JRBKh-D zk2^{j@bZ}yqW?<#VVuI_27*cI-V~sJiqQv&m07+10XF+#ZnIJdr8t`9s_EE;T2V;B z4UnQUH9EdX%zwh-5&wflY#ve!IWt0UE-My3?L#^Bh%kcgP1q{&26eXLn zTkjJ*w+(|_>Pq0v8{%nX$QZbf)tbJaLY$03;MO=Ic-uqYUmUCuXD>J>o6BCRF=xa% z3R4SK9#t1!K4I_d>tZgE>&+kZ?Q}1qo4&h%U$GfY058s%*=!kac{0Z+4Hwm!)pFLR zJ+5*OpgWUrm0FPI2ib4NPJ+Sk07j(`diti^i#kh&f}i>P4~|d?RFb#!JN)~D@)beox}bw?4VCf^y*`2{4`-@%SFTry2h z>9VBc9#JxEs1+0i2^LR@B1J`B9Ac=#FW=(?2;5;#U$0E0UNag_!jY$&2diQk_n)bT zl5Me_SUvqUjwCqmVcyb`igygB_4YUB*m$h5oeKv3uIF0sk}~es!{D>4r%PC*F~FN3owq5e0|YeUTSG#Vq%&Gk7uwW z0lDo#_wvflqHeRm*}l?}o;EILszBt|EW*zNPmq#?4A+&i0xx^?9obLyY4xx=Y9&^G;xYXYPxG)DOpPg!i_Ccl#3L}6xAAZzNhPK1XaC_~ z!A|mlo?Be*8Nn=a+FhgpOj@G7yYs(Qk(8&|h@_>w8Y^r&5nCqe0V60rRz?b5%J;GYeBqSAjo|K692GxD4` zRZyM2FdI+-jK2}WAZTZ()w_)V{n5tEb@>+JYluDozCb$fA4H)$bzg(Ux{*hXurjO^ zwAxc+UXu=&JV*E59}h3kzQPG4M)X8E*}#_&}w*KEgtX)cU{vm9b$atHa;s>| z+L6&cn8xUL*OSjx4YGjf6{Eq+Q3{!ZyhrL&^6Vz@jGbI%cAM9GkmFlamTbcQGvOlL zmJ?(FI)c86=JEs|*;?h~o)88>12nXlpMR4@yh%qdwFNpct;vMlc=;{FSo*apJ;p}! zAX~t;3tb~VuP|ZW;z$=IHf->F@Ml)&-&Bnb{iQyE#;GZ@C$PzEf6~q}4D>9jic@mTO5x76ulDz@+XAcm35!VSu zT*Gs>;f0b2TNpjU_BjHZ&S6Sqk6V1370+!eppV2H+FY!q*n=GHQ!9Rn6MjY!Jc77A zG7Y!lFp8?TIHN!LXO?gCnsYM-gQxsm=Ek**VmZu7vnuufD7K~GIxfxbsQ@qv2T zPa`tvHB$fFCyZl>3oYg?_wW)C>^_iDOc^B7klnTOoytQH18WkOk)L2BSD0r%xgRSW zQS9elF^?O=_@|58zKLK;(f77l-Zzu}4{fXed2saq!5k#UZAoDBqYQS{sn@j@Vtp|$ zG%gnZ$U|9@u#w1@11Sjl8ze^Co=)7yS(}=;68a3~g;NDe_X^}yJj;~s8xq9ahQ5_r zxAlTMnep*)w1e(TG%tWsjo3RR;yVGPEO4V{Zp?=a_0R#=V^ioQu4YL=BO4r0$$XTX zZfnw#_$V}sDAIDrezGQ+h?q24St0QNug_?{s-pI(^jg`#JRxM1YBV;a@@JQvH8*>> zIJvku74E0NlXkYe_624>znU0J@L<-c=G#F3k4A_)*;ky!C(^uZfj%WB3-*{*B$?9+ zDm$WFp=0(xnt6`vDQV3Jl5f&R(Mp};;q8d3I%Kn>Kx=^;uSVCw0L=gw53%Bp==8Sw zxtx=cs!^-_+i{2OK`Q;913+AXc_&Z5$@z3<)So0CU3;JAv=H?@Zpi~riQ{z-zLtVL z!oF<}@IgJp)Iyz1zVJ42!SPHSkjYNS4%ulVVIXdRuiZ@5Mx8LJS}J#qD^Zi_xQ@>DKDr-_e#>5h3dtje*NcwH_h;i{Sx7}dkdpuW z(yUCjckQsagv*QGMSi9u1`Z|V^}Wjf7B@q%j2DQXyd0nOyqg%m{CK_lAoKlJ7#8M} z%IvR?Vh$6aDWK2W!=i?*<77q&B8O&3?zP(Cs@kapc)&p7En?J;t-TX9abGT#H?TW? ztO5(lPKRuC7fs}zwcUKbRh=7E8wzTsa#Z{a`WR}?UZ%!HohN}d&xJ=JQhpO1PI#>X zHkb>pW04pU%Bj_mf~U}1F1=wxdBZu1790>3Dm44bQ#F=T4V3&HlOLsGH)+AK$cHk6 zia$=$kog?)07HCL*PI6}DRhpM^*%I*kHM<#1Se+AQ!!xyhcy6j7`iDX7Z-2i73_n# zas*?7LkxS-XSqv;YBa zW_n*32D(HTYQ0$feV_Fru1ZxW0g&iwqixPX3=9t4o)o|kOo79V$?$uh?#8Q8e>4e)V6;_(x&ViUVxma+i25qea;d-oK7ouuDsB^ab{ zu1qjQ%`n56VtxBE#0qAzb7lph`Eb-}TYpXB!H-}3Ykqyp`otprp7{VEuW*^IR2n$Fb99*nAtqT&oOFIf z@w*6>YvOGw@Ja?Pp1=whZqydzx@9X4n^2!n83C5{C?G@|E?&$?p*g68)kNvUTJ)I6 z1Q|(#UuP6pj78GUxq11m-GSszc+)X{C2eo-?8ud9sB=3(D47v?`JAa{V(IF zPZQ_0AY*9M97>Jf<o%#O_%Wq}8>YM=q0|tGY+hlXcpE=Z4Od z`NT7Hu2hnvRoqOw@g1f=bv`+nba{GwA$Ak0INlqI1k<9!x_!sL()h?hEWoWrdU3w` zZ%%)VR+Bc@_v!C#koM1p-3v_^L6)_Ktj4HE>aUh%2XZE@JFMOn)J~c`_7VWNb9c-N z2b|SZMR4Z@E7j&q&9(6H3yjEu6HV7{2!1t0lgizD;mZ9$r(r7W5G$ky@w(T_dFnOD z*p#+z$@pKE+>o@%eT(2-p_C}wbQ5s(%Sn_{$HDN@MB+Ev?t@3dPy`%TZ!z}AThZSu zN<1i$siJhXFdjV zP*y|V<`V8t=h#XTRUR~5`c`Z9^-`*BZf?WAehGdg)E2Je)hqFa!k{V(u+(hTf^Yq& zoruUh2(^3pe)2{bvt4&4Y9CY3js)PUHtd4rVG57}uFJL)D(JfSIo^{P=7liFXG zq5yqgof0V8paQcP!gy+;^pp-DA5pj=gbMN0eW=-eY+N8~y+G>t+x}oa!5r>tW$xhI zPQSv=pi;~653Gvf6~*JcQ%t1xOrH2l3Zy@8AoJ+wz@daW@m7?%LXkr!bw9GY@ns3e zSfuWF_gkWnesv?s3I`@}NgE2xwgs&rj?kH-FEy82=O8`+szN ziHch`vvS`zNfap14!&#i9H@wF7}yIPm=UB%(o(}F{wsZ(wA0nJ2aD^@B41>>o-_U6 zUqD~vdo48S8~FTb^+%#zcbQiiYoDKYcj&$#^;Smmb+Ljp(L=1Kt_J!;0s%1|JK}Wi z;={~oL!foo5n8=}rs6MmUW~R&;SIJO3TL4Ky?kh+b2rT9B1Jl4>#Uh-Bec z`Hsp<==#UEW6pGPhNk8H!!DUQR~#F9jEMI6T*OWfN^Ze&X(4nV$wa8QUJ>oTkruH# zm~O<`J7Wxseo@FqaZMl#Y(mrFW9AHM9Kb|XBMqaZ2a)DvJgYipkDD_VUF_PKd~dT7 z#02}bBfPn9a!X!O#83=lbJSK#E}K&yx-HI#T6ua)6o0{|={*HFusCkHzs|Fn&|C3H zBck1cmfcWVUN&i>X$YU^Sn6k2H;r3zuXbJFz)r5~3$d$tUj(l1?o={MM){kjgqXRO zc5R*#{;V7AQh|G|)jLM@wGAK&rm2~@{Pewv#06pHbKn#wL0P6F1!^qw9g&cW3Z=9} zj)POhOlwsh@eF=>z?#sIs*C-Nl(yU!#DaiaxhEs#iJqQ8w%(?+6lU02MYSeDkr!B- zPjMv+on6OLXgGnAtl(ao>|X2Y8*Hb}GRW5}-IzXnoo-d0!m4Vy$GS!XOLy>3_+UGs z2D|YcQx@M#M|}TDOetGi{9lGo9m-=0-^+nKE^*?$^uHkxZh}I{#UTQd;X!L+W@jm( zDg@N4+lUqI92o_rNk{3P>1gxAL=&O;x)ZT=q1mk0kLlE$WeWuY_$0`0jY-Kkt zP*|m3AF}Ubd=`<>(Xg0har*_@x2YH}bn0Wk*OZz3*e5;Zc;2uBdnl8?&XjupbkOeNZsNh6pvsq_ydmJI+*z**{I{0K)-;p1~k8cpJXL$^t!-`E}=*4G^-E8>H!LjTPxSx zcF+cS`ommfKMhNSbas^@YbTpH1*RFrBuATUR zt{oFWSk^$xU&kbFQ;MCX22RAN5F6eq9UfR$ut`Jw--p2YX)A*J69m^!oYfj2y7NYcH6&r+0~_sH^c^nzeN1AU4Ga7=FlR{S|Mm~MpzY0$Z+p2W(a={b-pR9EO1Rs zB%KY|@wLcAA@)KXi!d2_BxrkhDn`DT1=Dec}V!okd{$+wK z4E{n8R*xKyci1(CnNdhf$Dp2(Jpof0-0%-38X=Dd9PQgT+w%Lshx9+loPS~MOm%ZT zt%2B2iL_KU_ita%N>xjB!#71_3=3c}o zgeW~^U_ZTJQ2!PqXulQd=3b=XOQhwATK$y(9$#1jOQ4}4?~l#&nek)H(04f(Sr=s| zWv7Lu1=%WGk4FSw^;;!8&YPM)pQDCY9DhU`hMty1@sq1=Tj7bFsOOBZOFlpR`W>-J$-(kezWJj;`?x-v>ev{*8V z8p|KXJPV$HyQr1A(9LVrM47u-XpcrIyO`yWvx1pVYc&?154aneRpLqgx)EMvRaa#|9?Wwqs2+W8n5~79G z(}iCiLk;?enn}ew`HzhG+tu+Ru@T+K5juvZN)wY;x6HjvqD!&!)$$;1VAh~7fg0K| zEha#aN=Yv|3^~YFH}cc38ovVb%L|g@9W6fo(JtT6$fa?zf@Ct88e}m?i)b*Jgc{fl zExfdvw-BYDmH6>(4QMt#p0;FUIQqkhD}aH?a7)_%JtA~soqj{ppP_82yi9kaxuK>~ ze_)Zt>1?q=ZH*kF{1iq9sr*tVuy=u>Zev}!gEZx@O6-fjyu9X00gpIl-fS_pzjpqJ z1yqBmf9NF!jaF<+YxgH6oXBdK)sH(>VZ)1siyA$P<#KDt;8NT*l_0{xit~5j1P)FN zI8hhYKhQ)i z37^aP13B~u65?sg+_@2Kr^iWHN=U;EDSZ@2W2!5ALhGNWXnFBY%7W?1 z=HI9JzQ-pLKZDYTv<0-lt|6c-RwhxZ)mU2Os{bsX_i^@*fKUj8*aDO5pks=qn3Dv6 zwggpKLuyRCTVPwmw1r}B#AS}?X7b837UlXwp~E2|PJw2SGVueL7){Y&z!jL!XN=0i zU^Eig`S2`{+gU$68aRdWx?BZ{sU_f=8sn~>s~M?GU~`fH5kCc; z8ICp+INM3(3{#k32RZdv6b9MQYdZXNuk7ed8;G?S2nT+NZBG=Tar^KFl2SvhW$bGW#kdWL-I)s_IqVnCDDM9fm8g;P;8 z7t4yZn3^*NQfx7SwmkzP$=fwdC}bafQSEF@pd&P8@H#`swGy_rz;Z?Ty5mkS%>m#% zp_!m9e<()sfKiY(nF<1zBz&&`ZlJf6QLvLhl`_``%RW&{+O>Xhp;lwSsyRqGf=RWd zpftiR`={2(siiPAS|p}@q=NhVc0ELprt%=fMXO3B)4ryC2LT(o=sLM7hJC!}T1@)E zA3^J$3&1*M6Xq>03FX`R&w*NkrZE?FwU+Muut;>qNhj@bX17ZJxnOlPSZ=Zeiz~T_ zOu#yc3t6ONHB;?|r4w+pI)~KGN;HOGC)txxiUN8#mexj+W(cz%9a4sx|IRG=}ia zuEBuba3AHsV2feqw-3MvuL`I+2|`Ud4~7ZkN=JZ;L20|Oxna5vx1qbIh#k2O4$RQF zo`tL()zxaqibg^GbB+BS5#U{@K;WWQj~GcB1zb}zJkPwH|5hZ9iH2308!>_;%msji zJHSL~s)YHBR=Koa1mLEOHos*`gp=s8KA-C zu0aE+W!#iJ*0xqKm3A`fUGy#O+X+5W36myS>Uh2!R*s$aCU^`K&KKLCCDkejX2p=5 z%o7-fl03x`gaSNyr?3_JLv?2RLS3F*8ub>Jd@^Cc17)v8vYEK4aqo?OS@W9mt%ITJ z9=S2%R8M){CugT@k~~0x`}Vl!svYqX=E)c_oU6o}#Hb^%G1l3BudxA{F*tbjG;W_>=xV73pKY53v%>I)@D36I_@&p$h|Aw zonQS`07z_F#@T-%@-Tb|)7;;anoD_WH>9ewFy(ZcEOM$#Y)8>qi7rCnsH9GO-_7zF zu*C87{Df1P4TEOsnzZ@H%&lvV(3V@;Q!%+OYRp`g05PjY^gL$^$-t0Y>H*CDDs?FZly*oZ&dxvsxaUWF!{em4{A>n@vpXg$dwvt@_rgmHF z-MER`ABa8R-t_H*kv>}CzOpz;!>p^^9ztHMsHL|SRnS<-y5Z*r(_}c4=fXF`l^-i}>e7v!qs_jv zqvWhX^F=2sDNWA9c@P0?lUlr6ecrTKM%pNQ^?*Lq?p-0~?_j50xV%^(+H>sMul#Tw zeciF*1=?a7cI(}352%>LO96pD+?9!fNyl^9v3^v&Y4L)mNGK0FN43&Xf8jUlxW1Bw zyiu2;qW-aGNhs=zbuoxnxiwZ3{PFZM#Kw)9H@(hgX23h(`Wm~m4&TvoZoYp{plb^> z_#?vXcxd>r7K+1HKJvhed>gtK`TAbJUazUWQY6T~t2af%#<+Veyr%7-#*A#@&*;@g58{i|E%6yC_InGXCOd{L0;$)z#?n7M`re zh!kO{6=>7I?*}czyF7_frt#)s1CFJ_XE&VrDA?Dp3XbvF{qsEJgb&OLSNz_5g?HpK z9)8rsr4JN!Af3G9!#Qn(6zaUDqLN(g2g8*M)Djap?WMK9NKlkC)E2|-g|#-rp%!Gz zAHd%`iq|81efi93m3yTBw3g0j#;Yb2X{mhRAI?&KDmbGqou(2xiRNb^sV}%%Wu0?< z?($L>(#BO*)^)rSgyNRni$i`R4v;GhlCZ8$@e^ROX(p=2_v6Y!%^As zu022)fHdv_-~Yu_H6WVPLpHQx!W%^6j)cBhS`O3QBW#x(eX54d&I22op(N59b*&$v zFiSRY6rOc^(dgSV1>a7-5C;(5S5MvKcM2Jm-LD9TGqDpP097%52V+0>Xqq!! zq4e3vj53SE6i8J`XcQB|MZPP8j;PAOnpGnllH6#Ku~vS42xP*Nz@~y%db7Xi8s09P z1)e%8ys6&M8D=Dt6&t`iKG_4X=!kgRQoh%Z`dc&mlOUqXk-k`jKv9@(a^2-Upw>?< zt5*^DV~6Zedbec4NVl($2T{&b)zA@b#dUyd>`2JC0=xa_fIm8{5um zr-!ApXZhC8@=vC2WyxO|!@0Km)h8ep*`^he92$@YwP>VcdoS5OC^s38e#7RPsg4j+ zbVGG}WRSET&ZfrcR(x~k8n1rTP%CnfUNKUonD$P?FtNFF#cn!wEIab-;jU=B1dHK@ z(;(yAQJ`O$sMn>h;pf^8{JISW%d+@v6@CnXh9n5TXGC}?FI9i-D0OMaIg&mAg=0Kn zNJ7oz5*ReJukD55fUsMuaP+H4tDN&V9zfqF@ zr=#ecUk9wu{0;!+gl;3Bw=Vn^)z$ahVhhw)io!na&9}LmWurLb0zubxK=UEnU*{5P z+SP}&*(iBKSO4{alBHaY^)5Q=mZ+2OwIooJ7*Q5XJ+2|q`9#f?6myq!&oz?klihLq z4C)$XP!BNS0G_Z1&TM>?Jk{S~{F3n83ioli=IO6f%wkvCl(RFFw~j0tb{GvXTx>*sB0McY0s&SNvj4+^h`9nJ_wM>F!Uc>X}9PifQekn0sKI2SAJP!a4h z5cyGTuCj3ZBM^&{dRelIlT^9zcfaAuL5Y~bl!ppSf`wZbK$z#6U~rdclk``e+!qhe z6Qspo*%<)eu6?C;Bp<^VuW6JI|Ncvyn+LlSl;Mp22Bl7ARQ0Xc24%29(ZrdsIPw&-=yHQ7_Vle|5h>AST0 zUGX2Zk34vp?U~IHT|;$U86T+UUHl_NE4m|}>E~6q``7hccCaT^#y+?wD##Q%HwPd8 zV3x4L4|qqu`B$4(LXqDJngNy-{&@aFBvVsywt@X^}iH7P%>bR?ciC$I^U-4Foa`YKI^qDyGK7k%E%c_P=yzAi`YnxGA%DeNd++j3*h^ z=rn>oBd0|~lZ<6YvmkKY*ZJlJ;Im0tqgWu&E92eqt;+NYdxx`eS(4Hw_Jb5|yVvBg z*tbdY^!AN;luEyN4VRhS@-_DC{({ziH{&Z}iGElSV~qvT>L-8G%+yEL zX#MFOhj{InyKG=mvW-<1B@c-}x$vA(nU?>S>0*eN#!SLzQ)Ex7fvQ)S4D<8|I#N$3 zT5Ei`Z?cxBODHX8(Xp73v`IsAYC@9b;t}z0wxVuQSY1J^GRwDPN@qbM-ZF48T$GZ< z8WU+;Pqo?{ghI-KZ-i*ydXu`Ep0Xw^McH_KE9J0S7G;x8Fe`DVG?j3Pv=0YzJ}yZR z%2=oqHiUjvuk0~Ca>Kol4CFi0_xQT~;_F?=u+!kIDl-9g`#ZNZ9HCy17Ga1v^Jv9# z{T4Kb1-AzUxq*MutfOWWZgD*HnFfyYg0&e9f(5tZ>krPF6{VikNeHoc{linPPt#Si z&*g>(c54V8rT_AX!J&bNm-!umPvOR}vDai#`CX___J#=zeB*{4<&2WpaDncZsOkp* zsg<%@@rbrMkR_ux9?LsQxzoBa1s%$BBn6vk#{&&zUwcfzeCBJUwFYSF$08qDsB;gWQN*g!p8pxjofWbqNSZOEKOaTx@+* zwdt5*Q47@EOZ~EZL9s?1o?A%9TJT=Ob_13yyugvPg*e&ZU(r6^k4=2+D-@n=Hv5vu zSXG|hM(>h9^zn=eQ=$6`JO&70&2|%V5Lsx>)(%#;pcOfu>*nk_3HB_BNaH$`jM<^S zcSftDU1?nL;jy)+sfonQN}(}gUW?d_ikr*3=^{G)=tjBtEPe>TO|0ddVB zTklrSHiW+!#26frPXQQ(YN8DG$PZo?(po(QUCCf_OJC`pw*uey00%gmH!`WJkrKXj2!#6?`T25mTu9OJp2L8z3! z=arrL$ZqxuE{%yV)14Kd>k}j7pxZ6#$Dz8$@WV5p8kTqN<-7W)Q7Gt2{KoOPK_tZ| zf2WG~O5@{qPI+W<4f_;reuFVdO^5`ADC1!JQE|N`s3cq@(0WB!n0uh@*c{=LAd;~} zyGK@hbF-Oo+!nN)@i*O(`@FA#u?o=~e{`4O#5}z&=UkU*50fOrzi11D^&FOqe>wii z?*k+2|EcUs;Gx{!@KBT~>PAwLrIDT7Th=Utu?~?np@t^gFs?zgX=D${RwOY^WGh-+ z+#4$066ISh8eYW#FXWp~S`<*%O^ZuItL1Tyqt8#tZ zY120E;^VG`!lZn&3sPd$RkdHpU#|w+bYV)pJC|SH9g%|5IkxVTQcBA4CL0}$&}ef@ zW^Vtj%M;;_1xxP9x#ex17&4N*{ksO*_4O}xYu(p*JkL#yr}@7b)t5X?%CY<+s5_MJ zuiqt+N_;A(_)%lumoyRFixWa-M7qK_9s6<1X?JDa9fP!+_6u~~M$5L=ipB=7(j#f< zZ34J%=bs549%~_mA(|={uZNs_0?o7;-LBP(ZRnkd{-^|2|=4vUTmtByHL8 zEph`(LSEzQj68a+`d$V<45J7cyv^#|^|%fD#si1Nx!4NW*`l*{->HEWNh6-|g>-=r zXmQ|-i}Ku$ndUeHQ^&ieT!Lf}vf6GaqW9$DJ2NWrqwPY%%4nip$@vK$nRp*_C-v<| zuKz~ZyN&<%!NS26&x?jhy+@awJipMQ-8(X4#Ae5??U<1QMt1l9R=w9fAnEF}NYu$2 z>6}Vkc zIb*A?G*z8^IvibmBKn_u^5&T_1oey0gZS2~obf(#xk=erZGTEdQnt3DMGM+0oPwss zj5zXD;(oWhB_T@~Ig#9@v)AKtXu3>Inmgf@A|-lD-1U>cNyl3h?ADD9)GG4}zUGPk zZzaXe!~Kf?<~@$G?Uql3t8jy9{2!doq4=J}j9ktTxss{p6!9UdjyDERlA*xZ!=Q)KDs5O)phz>Vq3BNGoM(H|=1*Q4$^2fTZw z(%nq1P|5Rt81}SYJpEEzMPl5VJsV5&4e)ZWKDyoZ>1EwpkHx-AQVQc8%JMz;{H~p{=FXV>jIxvm4X*qv52e?Y-f%DJ zxEA165GikEASQ^fH6K#d!Tpu2HP{sFs%E=e$gYd$aj$+xue6N+Wc(rAz~wUsk2`(b z8Kvmyz%bKQxpP}~baG-rwYcYCvkHOi zlkR<=>ZBTU*8RF_d#Bl@zZsRIhx<%~Z@Z=ik z>adw3!DK(8R|q$vy{FTxw%#xliD~6qXmY^7_9kthVPTF~Xy1CfBqbU~?1QmxmU=+k z(ggxvEuA;0e&+ci-zQR{-f7aO{O(Pz_OsEjLh_K>MbvoZ4nxtk5u{g@nPv)cgW_R} z9}EA4K4@z0?7ue}Z(o~R(X&FjejUI2g~08PH1E4w>9o{)S(?1>Z0XMvTb|;&EuyOE zGvWNpYX)Nv<8|a^;1>bh#&znEcl-r!T#pn= z4$?Yudha6F%4b>*8@=BdtXXY4N+`U4Dmx$}>HeVJk-QdTG@t!tVT#0(LeV0gvqyyw z2sEp^9eY0N`u10Tm4n8No&A=)IeEC|gnmEXoNSzu!1<4R<%-9kY_8~5Ej?zRegMn78wuMs#;i&eUA0Zk_RXQ3b&TT} z;SCI=7-FUB@*&;8|n>(_g^HGf3@QODE3LpmX~ELnymQm{Sx9xrKS zK29p~?v@R$0=v6Dr5aW>-!{+h@?Q58|Kz8{{W`%J+lDAdb&M5VHrX_mDY;1-JLnf)ezmPau$)1;=`-FU=-r-83tX=C`S#}GZufju zQ>sXNT0Ny=k@nc%cFnvA_i4SC)?_ORXHq8B4D%el1uPX`c~uG#S1M7C+*MMqLw78E zhY2dI8@+N^qrMI1+;TUda(vGqGSRyU{Fnm`aqrr7bz42c5xsOO-~oZpkzorD1g}Y<6rk&3>PsSGy}W?MtqFky@A(X# zIuNZK0cK?^=;PUAu>j0#HtjbHCV*6?jzA&OoE$*Jlga*}LF`SF?WLhv1O|zqC<>*> zYB;#lsYKx0&kH@BFpW8n*yDcc6?;_zaJs<-jPSkCsSX-!aV=P5kUgF@Nu<{a%#K*F z134Q{9|YX7X(v$62_cY3^G%t~rD>Q0z@)1|zs)vjJ6Jq9;7#Ki`w+eS**En?7;n&7 zu==V3T&eFboN3ZiMx3D8qYc;VjFUk_H-WWCau(VFXSQf~viH0L$gwD$UfFHqNcgN`x}M+YQ6RnN<+@t>JUp#)9YOkqst-Ga?{FsDpEeX0(5v{0J~SEbWiL zXC2}M4?UH@u&|;%0y`eb33ldo4~z-x8zY!oVmV=c+f$m?RfDC35mdQ2E>Pze7KWP- z>!Bh<&57I+O_^s}9Tg^k)h7{xx@0a0IA~GAOt2yy!X%Q$1rt~LbTB6@Du!_0%HV>N zlf)QI1&gvERKwso23mJ!Ou6ZS#zCS5W`gxE5T>C#E|{i<1D35C222I33?Njaz`On7 zi<+VWFP6D{e-{yiN#M|Jgk<44u1TiMI78S5W`Sdb5f+{zu34s{CfWN7a3Cf^@L%!& zN$?|!!9j2c)j$~+R6n#891w-z8(!oBpL2K=+%a$r2|~8-(vQj5_XT`<0Ksf;oP+tz z9CObS!0m)Tgg`K#xBM8B(|Z)Wb&DYL{WTYv`;A=q6~Nnx2+!lTIXtj8J7dZE!P_{z z#f8w6F}^!?^KE#+ZDv+xd5O&3EmomZzsv?>E-~ygGum45fk!SBN&|eo1rKw^?aZJ4 E2O(~oYXATM literal 54708 zcmagFV|ZrKvM!pAZQHhO+qP}9lTNj?q^^Y^VFp)SH8qbSJ)2BQ2girk4u zvO<3q)c?v~^Z#E_K}1nTQbJ9gQ9<%vVRAxVj)8FwL5_iTdUB>&m3fhE=kRWl;g`&m z!W5kh{WsV%fO*%je&j+Lv4xxK~zsEYQls$Q-p&dwID|A)!7uWtJF-=Tm1{V@#x*+kUI$=%KUuf2ka zjiZ{oiL1MXE2EjciJM!jrjFNwCh`~hL>iemrqwqnX?T*MX;U>>8yRcZb{Oy+VKZos zLiFKYPw=LcaaQt8tj=eoo3-@bG_342HQ%?jpgAE?KCLEHC+DmjxAfJ%Og^$dpC8Xw zAcp-)tfJm}BPNq_+6m4gBgBm3+CvmL>4|$2N$^Bz7W(}fz1?U-u;nE`+9`KCLuqg} zwNstNM!J4Uw|78&Y9~9>MLf56to!@qGkJw5Thx%zkzj%Ek9Nn1QA@8NBXbwyWC>9H z#EPwjMNYPigE>*Ofz)HfTF&%PFj$U6mCe-AFw$U%-L?~-+nSXHHKkdgC5KJRTF}`G zE_HNdrE}S0zf4j{r_f-V2imSqW?}3w-4=f@o@-q+cZgaAbZ((hn))@|eWWhcT2pLpTpL!;_5*vM=sRL8 zqU##{U#lJKuyqW^X$ETU5ETeEVzhU|1m1750#f}38_5N9)B_2|v@1hUu=Kt7-@dhA zq_`OMgW01n`%1dB*}C)qxC8q;?zPeF_r;>}%JYmlER_1CUbKa07+=TV45~symC*g8 zW-8(gag#cAOuM0B1xG8eTp5HGVLE}+gYTmK=`XVVV*U!>H`~j4+ROIQ+NkN$LY>h4 zqpwdeE_@AX@PL};e5vTn`Ro(EjHVf$;^oiA%@IBQq>R7_D>m2D4OwwEepkg}R_k*M zM-o;+P27087eb+%*+6vWFCo9UEGw>t&WI17Pe7QVuoAoGHdJ(TEQNlJOqnjZ8adCb zI`}op16D@v7UOEo%8E-~m?c8FL1utPYlg@m$q@q7%mQ4?OK1h%ODjTjFvqd!C z-PI?8qX8{a@6d&Lb_X+hKxCImb*3GFemm?W_du5_&EqRq!+H?5#xiX#w$eLti-?E$;Dhu`{R(o>LzM4CjO>ICf z&DMfES#FW7npnbcuqREgjPQM#gs6h>`av_oEWwOJZ2i2|D|0~pYd#WazE2Bbsa}X@ zu;(9fi~%!VcjK6)?_wMAW-YXJAR{QHxrD5g(ou9mR6LPSA4BRG1QSZT6A?kelP_g- zH(JQjLc!`H4N=oLw=f3{+WmPA*s8QEeEUf6Vg}@!xwnsnR0bl~^2GSa5vb!Yl&4!> zWb|KQUsC$lT=3A|7vM9+d;mq=@L%uWKwXiO9}a~gP4s_4Yohc!fKEgV7WbVo>2ITbE*i`a|V!^p@~^<={#?Gz57 zyPWeM2@p>D*FW#W5Q`1`#5NW62XduP1XNO(bhg&cX`-LYZa|m-**bu|>}S;3)eP8_ zpNTnTfm8 ze+7wDH3KJ95p)5tlwk`S7mbD`SqHnYD*6`;gpp8VdHDz%RR_~I_Ar>5)vE-Pgu7^Y z|9Px+>pi3!DV%E%4N;ii0U3VBd2ZJNUY1YC^-e+{DYq+l@cGtmu(H#Oh%ibUBOd?C z{y5jW3v=0eV0r@qMLgv1JjZC|cZ9l9Q)k1lLgm))UR@#FrJd>w^`+iy$c9F@ic-|q zVHe@S2UAnc5VY_U4253QJxm&Ip!XKP8WNcnx9^cQ;KH6PlW8%pSihSH2(@{2m_o+m zr((MvBja2ctg0d0&U5XTD;5?d?h%JcRJp{_1BQW1xu&BrA3(a4Fh9hon-ly$pyeHq zG&;6q?m%NJ36K1Sq_=fdP(4f{Hop;_G_(i?sPzvB zDM}>*(uOsY0I1j^{$yn3#U(;B*g4cy$-1DTOkh3P!LQ;lJlP%jY8}Nya=h8$XD~%Y zbV&HJ%eCD9nui-0cw!+n`V~p6VCRqh5fRX z8`GbdZ@73r7~myQLBW%db;+BI?c-a>Y)m-FW~M=1^|<21_Sh9RT3iGbO{o-hpN%d6 z7%++#WekoBOP^d0$$|5npPe>u3PLvX_gjH2x(?{&z{jJ2tAOWTznPxv-pAv<*V7r$ z6&glt>7CAClWz6FEi3bToz-soY^{ScrjwVPV51=>n->c(NJngMj6TyHty`bfkF1hc zkJS%A@cL~QV0-aK4>Id!9dh7>0IV;1J9(myDO+gv76L3NLMUm9XyPauvNu$S<)-|F zZS}(kK_WnB)Cl`U?jsdYfAV4nrgzIF@+%1U8$poW&h^c6>kCx3;||fS1_7JvQT~CV zQ8Js+!p)3oW>Df(-}uqC`Tcd%E7GdJ0p}kYj5j8NKMp(KUs9u7?jQ94C)}0rba($~ zqyBx$(1ae^HEDG`Zc@-rXk1cqc7v0wibOR4qpgRDt#>-*8N3P;uKV0CgJE2SP>#8h z=+;i_CGlv+B^+$5a}SicVaSeaNn29K`C&=}`=#Nj&WJP9Xhz4mVa<+yP6hkrq1vo= z1rX4qg8dc4pmEvq%NAkpMK>mf2g?tg_1k2%v}<3`$6~Wlq@ItJ*PhHPoEh1Yi>v57 z4k0JMO)*=S`tKvR5gb-(VTEo>5Y>DZJZzgR+j6{Y`kd|jCVrg!>2hVjz({kZR z`dLlKhoqT!aI8=S+fVp(5*Dn6RrbpyO~0+?fy;bm$0jmTN|t5i6rxqr4=O}dY+ROd zo9Et|x}!u*xi~>-y>!M^+f&jc;IAsGiM_^}+4|pHRn{LThFFpD{bZ|TA*wcGm}XV^ zr*C6~@^5X-*R%FrHIgo-hJTBcyQ|3QEj+cSqp#>&t`ZzB?cXM6S(lRQw$I2?m5=wd z78ki`R?%;o%VUhXH?Z#(uwAn9$m`npJ=cA+lHGk@T7qq_M6Zoy1Lm9E0UUysN)I_x zW__OAqvku^>`J&CB=ie@yNWsaFmem}#L3T(x?a`oZ+$;3O-icj2(5z72Hnj=9Z0w% z<2#q-R=>hig*(t0^v)eGq2DHC%GymE-_j1WwBVGoU=GORGjtaqr0BNigOCqyt;O(S zKG+DoBsZU~okF<7ahjS}bzwXxbAxFfQAk&O@>LsZMsZ`?N?|CDWM(vOm%B3CBPC3o z%2t@%H$fwur}SSnckUm0-k)mOtht`?nwsDz=2#v=RBPGg39i#%odKq{K^;bTD!6A9 zskz$}t)sU^=a#jLZP@I=bPo?f-L}wpMs{Tc!m7-bi!Ldqj3EA~V;4(dltJmTXqH0r z%HAWKGutEc9vOo3P6Q;JdC^YTnby->VZ6&X8f{obffZ??1(cm&L2h7q)*w**+sE6dG*;(H|_Q!WxU{g)CeoT z(KY&bv!Usc|m+Fqfmk;h&RNF|LWuNZ!+DdX*L=s-=_iH=@i` z?Z+Okq^cFO4}_n|G*!)Wl_i%qiMBaH8(WuXtgI7EO=M>=i_+;MDjf3aY~6S9w0K zUuDO7O5Ta6+k40~xh~)D{=L&?Y0?c$s9cw*Ufe18)zzk%#ZY>Tr^|e%8KPb0ht`b( zuP@8#Ox@nQIqz9}AbW0RzE`Cf>39bOWz5N3qzS}ocxI=o$W|(nD~@EhW13Rj5nAp; zu2obEJa=kGC*#3=MkdkWy_%RKcN=?g$7!AZ8vBYKr$ePY(8aIQ&yRPlQ=mudv#q$q z4%WzAx=B{i)UdLFx4os?rZp6poShD7Vc&mSD@RdBJ=_m^&OlkEE1DFU@csgKcBifJ zz4N7+XEJhYzzO=86 z#%eBQZ$Nsf2+X0XPHUNmg#(sNt^NW1Y0|M(${e<0kW6f2q5M!2YE|hSEQ*X-%qo(V zHaFwyGZ0on=I{=fhe<=zo{=Og-_(to3?cvL4m6PymtNsdDINsBh8m>a%!5o3s(en) z=1I z6O+YNertC|OFNqd6P=$gMyvmfa`w~p9*gKDESFqNBy(~Zw3TFDYh}$iudn)9HxPBi zdokK@o~nu?%imcURr5Y~?6oo_JBe}t|pU5qjai|#JDyG=i^V~7+a{dEnO<(y>ahND#_X_fcEBNiZ)uc&%1HVtx8Ts z*H_Btvx^IhkfOB#{szN*n6;y05A>3eARDXslaE>tnLa>+`V&cgho?ED+&vv5KJszf zG4@G;7i;4_bVvZ>!mli3j7~tPgybF5|J6=Lt`u$D%X0l}#iY9nOXH@(%FFJLtzb%p zzHfABnSs;v-9(&nzbZytLiqqDIWzn>JQDk#JULcE5CyPq_m#4QV!}3421haQ+LcfO*>r;rg6K|r#5Sh|y@h1ao%Cl)t*u`4 zMTP!deC?aL7uTxm5^nUv#q2vS-5QbBKP|drbDXS%erB>fYM84Kpk^au99-BQBZR z7CDynflrIAi&ahza+kUryju5LR_}-Z27g)jqOc(!Lx9y)e z{cYc&_r947s9pteaa4}dc|!$$N9+M38sUr7h(%@Ehq`4HJtTpA>B8CLNO__@%(F5d z`SmX5jbux6i#qc}xOhumzbAELh*Mfr2SW99=WNOZRZgoCU4A2|4i|ZVFQt6qEhH#B zK_9G;&h*LO6tB`5dXRSBF0hq0tk{2q__aCKXYkP#9n^)@cq}`&Lo)1KM{W+>5mSed zKp~=}$p7>~nK@va`vN{mYzWN1(tE=u2BZhga5(VtPKk(*TvE&zmn5vSbjo zZLVobTl%;t@6;4SsZ>5+U-XEGUZGG;+~|V(pE&qqrp_f~{_1h@5ZrNETqe{bt9ioZ z#Qn~gWCH!t#Ha^n&fT2?{`}D@s4?9kXj;E;lWV9Zw8_4yM0Qg-6YSsKgvQ*fF{#Pq z{=(nyV>#*`RloBVCs;Lp*R1PBIQOY=EK4CQa*BD0MsYcg=opP?8;xYQDSAJBeJpw5 zPBc_Ft9?;<0?pBhCmOtWU*pN*;CkjJ_}qVic`}V@$TwFi15!mF1*m2wVX+>5p%(+R zQ~JUW*zWkalde{90@2v+oVlkxOZFihE&ZJ){c?hX3L2@R7jk*xjYtHi=}qb+4B(XJ z$gYcNudR~4Kz_WRq8eS((>ALWCO)&R-MXE+YxDn9V#X{_H@j616<|P(8h(7z?q*r+ zmpqR#7+g$cT@e&(%_|ipI&A%9+47%30TLY(yuf&*knx1wNx|%*H^;YB%ftt%5>QM= z^i;*6_KTSRzQm%qz*>cK&EISvF^ovbS4|R%)zKhTH_2K>jP3mBGn5{95&G9^a#4|K zv+!>fIsR8z{^x4)FIr*cYT@Q4Z{y}};rLHL+atCgHbfX*;+k&37DIgENn&=k(*lKD zG;uL-KAdLn*JQ?@r6Q!0V$xXP=J2i~;_+i3|F;_En;oAMG|I-RX#FwnmU&G}w`7R{ z788CrR-g1DW4h_`&$Z`ctN~{A)Hv_-Bl!%+pfif8wN32rMD zJDs$eVWBYQx1&2sCdB0!vU5~uf)=vy*{}t{2VBpcz<+~h0wb7F3?V^44*&83Z2#F` z32!rd4>uc63rQP$3lTH3zb-47IGR}f)8kZ4JvX#toIpXH`L%NnPDE~$QI1)0)|HS4 zVcITo$$oWWwCN@E-5h>N?Hua!N9CYb6f8vTFd>h3q5Jg-lCI6y%vu{Z_Uf z$MU{{^o~;nD_@m2|E{J)q;|BK7rx%`m``+OqZAqAVj-Dy+pD4-S3xK?($>wn5bi90CFAQ+ACd;&m6DQB8_o zjAq^=eUYc1o{#+p+ zn;K<)Pn*4u742P!;H^E3^Qu%2dM{2slouc$AN_3V^M7H_KY3H)#n7qd5_p~Za7zAj|s9{l)RdbV9e||_67`#Tu*c<8!I=zb@ z(MSvQ9;Wrkq6d)!9afh+G`!f$Ip!F<4ADdc*OY-y7BZMsau%y?EN6*hW4mOF%Q~bw z2==Z3^~?q<1GTeS>xGN-?CHZ7a#M4kDL zQxQr~1ZMzCSKFK5+32C%+C1kE#(2L=15AR!er7GKbp?Xd1qkkGipx5Q~FI-6zt< z*PTpeVI)Ngnnyaz5noIIgNZtb4bQdKG{Bs~&tf)?nM$a;7>r36djllw%hQxeCXeW^ z(i6@TEIuxD<2ulwLTt|&gZP%Ei+l!(%p5Yij6U(H#HMkqM8U$@OKB|5@vUiuY^d6X zW}fP3;Kps6051OEO(|JzmVU6SX(8q>*yf*x5QoxDK={PH^F?!VCzES_Qs>()_y|jg6LJlJWp;L zKM*g5DK7>W_*uv}{0WUB0>MHZ#oJZmO!b3MjEc}VhsLD~;E-qNNd?x7Q6~v zR=0$u>Zc2Xr}>x_5$-s#l!oz6I>W?lw;m9Ae{Tf9eMX;TI-Wf_mZ6sVrMnY#F}cDd z%CV*}fDsXUF7Vbw>PuDaGhu631+3|{xp<@Kl|%WxU+vuLlcrklMC!Aq+7n~I3cmQ! z`e3cA!XUEGdEPSu``&lZEKD1IKO(-VGvcnSc153m(i!8ohi`)N2n>U_BemYJ`uY>8B*Epj!oXRLV}XK}>D*^DHQ7?NY*&LJ9VSo`Ogi9J zGa;clWI8vIQqkngv2>xKd91K>?0`Sw;E&TMg&6dcd20|FcTsnUT7Yn{oI5V4@Ow~m zz#k~8TM!A9L7T!|colrC0P2WKZW7PNj_X4MfESbt<-soq*0LzShZ}fyUx!(xIIDwx zRHt^_GAWe0-Vm~bDZ(}XG%E+`XhKpPlMBo*5q_z$BGxYef8O!ToS8aT8pmjbPq)nV z%x*PF5ZuSHRJqJ!`5<4xC*xb2vC?7u1iljB_*iUGl6+yPyjn?F?GOF2_KW&gOkJ?w z3e^qc-te;zez`H$rsUCE0<@7PKGW?7sT1SPYWId|FJ8H`uEdNu4YJjre`8F*D}6Wh z|FQ`xf7yiphHIAkU&OYCn}w^ilY@o4larl?^M7&8YI;hzBIsX|i3UrLsx{QDKwCX< zy;a>yjfJ6!sz`NcVi+a!Fqk^VE^{6G53L?@Tif|j!3QZ0fk9QeUq8CWI;OmO-Hs+F zuZ4sHLA3{}LR2Qlyo+{d@?;`tpp6YB^BMoJt?&MHFY!JQwoa0nTSD+#Ku^4b{5SZVFwU9<~APYbaLO zu~Z)nS#dxI-5lmS-Bnw!(u15by(80LlC@|ynj{TzW)XcspC*}z0~8VRZq>#Z49G`I zgl|C#H&=}n-ajxfo{=pxPV(L*7g}gHET9b*s=cGV7VFa<;Htgjk>KyW@S!|z`lR1( zGSYkEl&@-bZ*d2WQ~hw3NpP=YNHF^XC{TMG$Gn+{b6pZn+5=<()>C!N^jncl0w6BJ zdHdnmSEGK5BlMeZD!v4t5m7ct7{k~$1Ie3GLFoHjAH*b?++s<|=yTF+^I&jT#zuMx z)MLhU+;LFk8bse|_{j+d*a=&cm2}M?*arjBPnfPgLwv)86D$6L zLJ0wPul7IenMvVAK$z^q5<^!)7aI|<&GGEbOr=E;UmGOIa}yO~EIr5xWU_(ol$&fa zR5E(2vB?S3EvJglTXdU#@qfDbCYs#82Yo^aZN6`{Ex#M)easBTe_J8utXu(fY1j|R z9o(sQbj$bKU{IjyhosYahY{63>}$9_+hWxB3j}VQkJ@2$D@vpeRSldU?&7I;qd2MF zSYmJ>zA(@N_iK}m*AMPIJG#Y&1KR)6`LJ83qg~`Do3v^B0>fU&wUx(qefuTgzFED{sJ65!iw{F2}1fQ3= ziFIP{kezQxmlx-!yo+sC4PEtG#K=5VM9YIN0z9~c4XTX?*4e@m;hFM!zVo>A`#566 z>f&3g94lJ{r)QJ5m7Xe3SLau_lOpL;A($wsjHR`;xTXgIiZ#o&vt~ zGR6KdU$FFbLfZCC3AEu$b`tj!9XgOGLSV=QPIYW zjI!hSP#?8pn0@ezuenOzoka8!8~jXTbiJ6+ZuItsWW03uzASFyn*zV2kIgPFR$Yzm zE<$cZlF>R8?Nr2_i?KiripBc+TGgJvG@vRTY2o?(_Di}D30!k&CT`>+7ry2!!iC*X z<@=U0_C#16=PN7bB39w+zPwDOHX}h20Ap);dx}kjXX0-QkRk=cr};GYsjSvyLZa-t zzHONWddi*)RDUH@RTAsGB_#&O+QJaaL+H<<9LLSE+nB@eGF1fALwjVOl8X_sdOYme z0lk!X=S(@25=TZHR7LlPp}fY~yNeThMIjD}pd9+q=j<_inh0$>mIzWVY+Z9p<{D^#0Xk+b_@eNSiR8;KzSZ#7lUsk~NGMcB8C2c=m2l5paHPq`q{S(kdA7Z1a zyfk2Y;w?^t`?@yC5Pz9&pzo}Hc#}mLgDmhKV|PJ3lKOY(Km@Fi2AV~CuET*YfUi}u zfInZnqDX(<#vaS<^fszuR=l)AbqG{}9{rnyx?PbZz3Pyu!eSJK`uwkJU!ORQXy4x83r!PNgOyD33}}L=>xX_93l6njNTuqL8J{l%*3FVn3MG4&Fv*`lBXZ z?=;kn6HTT^#SrPX-N)4EZiIZI!0ByXTWy;;J-Tht{jq1mjh`DSy7yGjHxIaY%*sTx zuy9#9CqE#qi>1misx=KRWm=qx4rk|}vd+LMY3M`ow8)}m$3Ggv&)Ri*ON+}<^P%T5 z_7JPVPfdM=Pv-oH<tecoE}(0O7|YZc*d8`Uv_M*3Rzv7$yZnJE6N_W=AQ3_BgU_TjA_T?a)U1csCmJ&YqMp-lJe`y6>N zt++Bi;ZMOD%%1c&-Q;bKsYg!SmS^#J@8UFY|G3!rtyaTFb!5@e(@l?1t(87ln8rG? z--$1)YC~vWnXiW3GXm`FNSyzu!m$qT=Eldf$sMl#PEfGmzQs^oUd=GIQfj(X=}dw+ zT*oa0*oS%@cLgvB&PKIQ=Ok?>x#c#dC#sQifgMwtAG^l3D9nIg(Zqi;D%807TtUUCL3_;kjyte#cAg?S%e4S2W>9^A(uy8Ss0Tc++ZTjJw1 z&Em2g!3lo@LlDyri(P^I8BPpn$RE7n*q9Q-c^>rfOMM6Pd5671I=ZBjAvpj8oIi$! zl0exNl(>NIiQpX~FRS9UgK|0l#s@#)p4?^?XAz}Gjb1?4Qe4?j&cL$C8u}n)?A@YC zfmbSM`Hl5pQFwv$CQBF=_$Sq zxsV?BHI5bGZTk?B6B&KLdIN-40S426X3j_|ceLla*M3}3gx3(_7MVY1++4mzhH#7# zD>2gTHy*%i$~}mqc#gK83288SKp@y3wz1L_e8fF$Rb}ex+`(h)j}%~Ld^3DUZkgez zOUNy^%>>HHE|-y$V@B}-M|_{h!vXpk01xaD%{l{oQ|~+^>rR*rv9iQen5t?{BHg|% zR`;S|KtUb!X<22RTBA4AAUM6#M?=w5VY-hEV)b`!y1^mPNEoy2K)a>OyA?Q~Q*&(O zRzQI~y_W=IPi?-OJX*&&8dvY0zWM2%yXdFI!D-n@6FsG)pEYdJbuA`g4yy;qrgR?G z8Mj7gv1oiWq)+_$GqqQ$(ZM@#|0j7})=#$S&hZwdoijFI4aCFLVI3tMH5fLreZ;KD zqA`)0l~D2tuIBYOy+LGw&hJ5OyE+@cnZ0L5+;yo2pIMdt@4$r^5Y!x7nHs{@>|W(MzJjATyWGNwZ^4j+EPU0RpAl-oTM@u{lx*i0^yyWPfHt6QwPvYpk9xFMWfBFt!+Gu6TlAmr zeQ#PX71vzN*_-xh&__N`IXv6`>CgV#eA_%e@7wjgkj8jlKzO~Ic6g$cT`^W{R{606 zCDP~+NVZ6DMO$jhL~#+!g*$T!XW63#(ngDn#Qwy71yj^gazS{e;3jGRM0HedGD@pt z?(ln3pCUA(ekqAvvnKy0G@?-|-dh=eS%4Civ&c}s%wF@0K5Bltaq^2Os1n6Z3%?-Q zAlC4goQ&vK6TpgtzkHVt*1!tBYt-`|5HLV1V7*#45Vb+GACuU+QB&hZ=N_flPy0TY zR^HIrdskB#<$aU;HY(K{a3(OQa$0<9qH(oa)lg@Uf>M5g2W0U5 zk!JSlhrw8quBx9A>RJ6}=;W&wt@2E$7J=9SVHsdC?K(L(KACb#z)@C$xXD8^!7|uv zZh$6fkq)aoD}^79VqdJ!Nz-8$IrU(_-&^cHBI;4 z^$B+1aPe|LG)C55LjP;jab{dTf$0~xbXS9!!QdcmDYLbL^jvxu2y*qnx2%jbL%rB z{aP85qBJe#(&O~Prk%IJARcdEypZ)vah%ZZ%;Zk{eW(U)Bx7VlzgOi8)x z`rh4l`@l_Ada7z&yUK>ZF;i6YLGwI*Sg#Fk#Qr0Jg&VLax(nNN$u-XJ5=MsP3|(lEdIOJ7|(x3iY;ea)5#BW*mDV%^=8qOeYO&gIdJVuLLN3cFaN=xZtFB=b zH{l)PZl_j^u+qx@89}gAQW7ofb+k)QwX=aegihossZq*+@PlCpb$rpp>Cbk9UJO<~ zDjlXQ_Ig#W0zdD3&*ei(FwlN#3b%FSR%&M^ywF@Fr>d~do@-kIS$e%wkIVfJ|Ohh=zc zF&Rnic^|>@R%v?@jO}a9;nY3Qrg_!xC=ZWUcYiA5R+|2nsM*$+c$TOs6pm!}Z}dfM zGeBhMGWw3$6KZXav^>YNA=r6Es>p<6HRYcZY)z{>yasbC81A*G-le8~QoV;rtKnkx z;+os8BvEe?0A6W*a#dOudsv3aWs?d% z0oNngyVMjavLjtjiG`!007#?62ClTqqU$@kIY`=x^$2e>iqIy1>o|@Tw@)P)B8_1$r#6>DB_5 zmaOaoE~^9TolgDgooKFuEFB#klSF%9-~d2~_|kQ0Y{Ek=HH5yq9s zDq#1S551c`kSiWPZbweN^A4kWiP#Qg6er1}HcKv{fxb1*BULboD0fwfaNM_<55>qM zETZ8TJDO4V)=aPp_eQjX%||Ud<>wkIzvDlpNjqW>I}W!-j7M^TNe5JIFh#-}zAV!$ICOju8Kx)N z0vLtzDdy*rQN!7r>Xz7rLw8J-(GzQlYYVH$WK#F`i_i^qVlzTNAh>gBWKV@XC$T-` z3|kj#iCquDhiO7NKum07i|<-NuVsX}Q}mIP$jBJDMfUiaWR3c|F_kWBMw0_Sr|6h4 zk`_r5=0&rCR^*tOy$A8K;@|NqwncjZ>Y-75vlpxq%Cl3EgH`}^^~=u zoll6xxY@a>0f%Ddpi;=cY}fyG!K2N-dEyXXmUP5u){4VnyS^T4?pjN@Ot4zjL(Puw z_U#wMH2Z#8Pts{olG5Dy0tZj;N@;fHheu>YKYQU=4Bk|wcD9MbA`3O4bj$hNRHwzb zSLcG0SLV%zywdbuwl(^E_!@&)TdXge4O{MRWk2RKOt@!8E{$BU-AH(@4{gxs=YAz9LIob|Hzto0}9cWoz6Tp2x0&xi#$ zHh$dwO&UCR1Ob2w00-2eG7d4=cN(Y>0R#$q8?||q@iTi+7-w-xR%uMr&StFIthC<# zvK(aPduwuNB}oJUV8+Zl)%cnfsHI%4`;x6XW^UF^e4s3Z@S<&EV8?56Wya;HNs0E> z`$0dgRdiUz9RO9Au3RmYq>K#G=X%*_dUbSJHP`lSfBaN8t-~@F>)BL1RT*9I851A3 z<-+Gb#_QRX>~av#Ni<#zLswtu-c6{jGHR>wflhKLzC4P@b%8&~u)fosoNjk4r#GvC zlU#UU9&0Hv;d%g72Wq?Ym<&&vtA3AB##L}=ZjiTR4hh7J)e>ei} zt*u+>h%MwN`%3}b4wYpV=QwbY!jwfIj#{me)TDOG`?tI!%l=AwL2G@9I~}?_dA5g6 zCKgK(;6Q0&P&K21Tx~k=o6jwV{dI_G+Ba*Zts|Tl6q1zeC?iYJTb{hel*x>^wb|2RkHkU$!+S4OU4ZOKPZjV>9OVsqNnv5jK8TRAE$A&^yRwK zj-MJ3Pl?)KA~fq#*K~W0l4$0=8GRx^9+?w z!QT8*-)w|S^B0)ZeY5gZPI2G(QtQf?DjuK(s^$rMA!C%P22vynZY4SuOE=wX2f8$R z)A}mzJi4WJnZ`!bHG1=$lwaxm!GOnRbR15F$nRC-M*H<*VfF|pQw(;tbSfp({>9^5 zw_M1-SJ9eGF~m(0dvp*P8uaA0Yw+EkP-SWqu zqal$hK8SmM7#Mrs0@OD+%_J%H*bMyZiWAZdsIBj#lkZ!l2c&IpLu(5^T0Ge5PHzR} zn;TXs$+IQ_&;O~u=Jz+XE0wbOy`=6>m9JVG} zJ~Kp1e5m?K3x@@>!D)piw^eMIHjD4RebtR`|IlckplP1;r21wTi8v((KqNqn%2CB< zifaQc&T}*M&0i|LW^LgdjIaX|o~I$`owHolRqeH_CFrqCUCleN130&vH}dK|^kC>) z-r2P~mApHotL4dRX$25lIcRh_*kJaxi^%ZN5-GAAMOxfB!6flLPY-p&QzL9TE%ho( zRwftE3sy5<*^)qYzKkL|rE>n@hyr;xPqncY6QJ8125!MWr`UCWuC~A#G1AqF1@V$kv>@NBvN&2ygy*{QvxolkRRb%Ui zsmKROR%{*g*WjUUod@@cS^4eF^}yQ1>;WlGwOli z+Y$(8I`0(^d|w>{eaf!_BBM;NpCoeem2>J}82*!em=}}ymoXk>QEfJ>G(3LNA2-46 z5PGvjr)Xh9>aSe>vEzM*>xp{tJyZox1ZRl}QjcvX2TEgNc^(_-hir@Es>NySoa1g^ zFow_twnHdx(j?Q_3q51t3XI7YlJ4_q&(0#)&a+RUy{IcBq?)eaWo*=H2UUVIqtp&lW9JTJiP&u zw8+4vo~_IJXZIJb_U^&=GI1nSD%e;P!c{kZALNCm5c%%oF+I3DrA63_@4)(v4(t~JiddILp7jmoy+>cD~ivwoctFfEL zP*#2Rx?_&bCpX26MBgp^4G>@h`Hxc(lnqyj!*t>9sOBcXN(hTwEDpn^X{x!!gPX?1 z*uM$}cYRwHXuf+gYTB}gDTcw{TXSOUU$S?8BeP&sc!Lc{{pEv}x#ELX>6*ipI1#>8 zKes$bHjiJ1OygZge_ak^Hz#k;=od1wZ=o71ba7oClBMq>Uk6hVq|ePPt)@FM5bW$I z;d2Or@wBjbTyZj|;+iHp%Bo!Vy(X3YM-}lasMItEV_QrP-Kk_J4C>)L&I3Xxj=E?| zsAF(IfVQ4w+dRRnJ>)}o^3_012YYgFWE)5TT=l2657*L8_u1KC>Y-R{7w^ShTtO;VyD{dezY;XD@Rwl_9#j4Uo!1W&ZHVe0H>f=h#9k>~KUj^iUJ%@wU{Xuy z3FItk0<;}6D02$u(RtEY#O^hrB>qgxnOD^0AJPGC9*WXw_$k%1a%-`>uRIeeAIf3! zbx{GRnG4R$4)3rVmg63gW?4yIWW_>;t3>4@?3}&ct0Tk}<5ljU>jIN1 z&+mzA&1B6`v(}i#vAzvqWH~utZzQR;fCQGLuCN|p0hey7iCQ8^^dr*hi^wC$bTk`8M(JRKtQuXlSf$d(EISvuY0dM z7&ff;p-Ym}tT8^MF5ACG4sZmAV!l;0h&Mf#ZPd--_A$uv2@3H!y^^%_&Iw$*p79Uc5@ZXLGK;edg%)6QlvrN`U7H@e^P*0Atd zQB%>4--B1!9yeF(3vk;{>I8+2D;j`zdR8gd8dHuCQ_6|F(5-?gd&{YhLeyq_-V--4 z(SP#rP=-rsSHJSHDpT1{dMAb7-=9K1-@co_!$dG^?c(R-W&a_C5qy2~m3@%vBGhgnrw|H#g9ABb7k{NE?m4xD?;EV+fPdE>S2g$U(&_zGV+TPvaot>W_ zf8yY@)yP8k$y}UHVgF*uxtjW2zX4Hc3;W&?*}K&kqYpi%FHarfaC$ETHpSoP;A692 zR*LxY1^BO1ry@7Hc9p->hd==U@cuo*CiTnozxen;3Gct=?{5P94TgQ(UJoBb`7z@BqY z;q&?V2D1Y%n;^Dh0+eD)>9<}=A|F5{q#epBu#sf@lRs`oFEpkE%mrfwqJNFCpJC$| zy6#N;GF8XgqX(m2yMM2yq@TxStIR7whUIs2ar$t%Avh;nWLwElVBSI#j`l2$lb-!y zK|!?0hJ1T-wL{4uJhOFHp4?@28J^Oh61DbeTeSWub(|dL-KfxFCp0CjQjV`WaPW|U z=ev@VyC>IS@{ndzPy||b3z-bj5{Y53ff}|TW8&&*pu#?qs?)#&M`ACfb;%m+qX{Or zb+FNNHU}mz!@!EdrxmP_6eb3Cah!mL0ArL#EA1{nCY-!jL8zzz7wR6wAw(8K|IpW; zUvH*b1wbuRlwlUt;dQhx&pgsvJcUpm67rzkNc}2XbC6mZAgUn?VxO6YYg=M!#e=z8 zjX5ZLyMyz(VdPVyosL0}ULO!Mxu>hh`-MItnGeuQ;wGaU0)gIq3ZD=pDc(Qtk}APj z#HtA;?idVKNF)&0r|&w#l7DbX%b91b2;l2=L8q#}auVdk{RuYn3SMDo1%WW0tD*62 zaIj65Y38;?-~@b82AF!?Nra2;PU)t~qYUhl!GDK3*}%@~N0GQH7zflSpfP-ydOwNe zOK~w((+pCD&>f!b!On);5m+zUBFJtQ)mV^prS3?XgPybC2%2LiE5w+S4B|lP z+_>3$`g=%P{IrN|1Oxz30R{kI`}ZL!r|)RS@8Do;ZD3_=PbBrrP~S@EdsD{V+`!4v z{MSF}j!6odl33rA+$odIMaK%ersg%xMz>JQ^R+!qNq$5S{KgmGN#gAApX*3ib)TDsVVi>4ypIX|Ik4d6E}v z=8+hs9J=k3@Eiga^^O|ESMQB-O6i+BL*~*8coxjGs{tJ9wXjGZ^Vw@j93O<&+bzAH z9+N^ALvDCV<##cGoo5fX;wySGGmbH zHsslio)cxlud=iP2y=nM>v8vBn*hJ0KGyNOy7dr8yJKRh zywBOa4Lhh58y06`5>ESYXqLt8ZM1axd*UEp$wl`APU}C9m1H8-ModG!(wfSUQ%}rT3JD*ud~?WJdM}x>84)Cra!^J9wGs6^G^ze~eV(d&oAfm$ z_gwq4SHe=<#*FN}$5(0d_NumIZYaqs|MjFtI_rJb^+ZO?*XQ*47mzLNSL7~Nq+nw8 zuw0KwWITC43`Vx9eB!0Fx*CN9{ea$xjCvtjeyy>yf!ywxvv6<*h0UNXwkEyRxX{!e$TgHZ^db3r;1qhT)+yt@|_!@ zQG2aT`;lj>qjY`RGfQE?KTt2mn=HmSR>2!E38n8PlFs=1zsEM}AMICb z86Dbx(+`!hl$p=Z)*W~+?_HYp+CJacrCS-Fllz!7E>8*!E(yCh-cWbKc7)mPT6xu= zfKpF3I+p%yFXkMIq!ALiXF89-aV{I6v+^k#!_xwtQ*Nl#V|hKg=nP=fG}5VB8Ki7) z;19!on-iq&Xyo#AowvpA)RRgF?YBdDc$J8*)2Wko;Y?V6XMOCqT(4F#U2n1jg*4=< z8$MfDYL|z731iEKB3WW#kz|c3qh7AXjyZ}wtSg9xA(ou-pLoxF{4qk^KS?!d3J0!! zqE#R9NYGUyy>DEs%^xW;oQ5Cs@fomcrsN}rI2Hg^6y9kwLPF`K3llX00aM_r)c?ay zevlHA#N^8N+AI=)vx?4(=?j^ba^{umw140V#g58#vtnh8i7vRs*UD=lge;T+I zl1byCNr5H%DF58I2(rk%8hQ;zuCXs=sipbQy?Hd;umv4!fav@LE4JQ^>J{aZ=!@Gc~p$JudMy%0{=5QY~S8YVP zaP6gRqfZ0>q9nR3p+Wa8icNyl0Zn4k*bNto-(+o@-D8cd1Ed7`}dN3%wezkFxj_#_K zyV{msOOG;n+qbU=jBZk+&S$GEwJ99zSHGz8hF1`Xxa^&l8aaD8OtnIVsdF0cz=Y)? zP$MEdfKZ}_&#AC)R%E?G)tjrKsa-$KW_-$QL}x$@$NngmX2bHJQG~77D1J%3bGK!- zl!@kh5-uKc@U4I_Er;~epL!gej`kdX>tSXVFP-BH#D-%VJOCpM(-&pOY+b#}lOe)Z z0MP5>av1Sy-dfYFy%?`p`$P|`2yDFlv(8MEsa++Qv5M?7;%NFQK0E`Ggf3@2aUwtBpCoh`D}QLY%QAnJ z%qcf6!;cjOTYyg&2G27K(F8l^RgdV-V!~b$G%E=HP}M*Q*%xJV3}I8UYYd)>*nMvw zemWg`K6Rgy+m|y!8&*}=+`STm(dK-#b%)8nLsL&0<8Zd^|# z;I2gR&e1WUS#v!jX`+cuR;+yi(EiDcRCouW0AHNd?;5WVnC_Vg#4x56#0FOwTH6_p z#GILFF0>bb_tbmMM0|sd7r%l{U!fI0tGza&?65_D7+x9G zf3GA{c|mnO(|>}y(}%>|2>p0X8wRS&Eb0g)rcICIctfD_I9Wd+hKuEqv?gzEZBxG-rG~e!-2hqaR$Y$I@k{rLyCccE}3d)7Fn3EvfsEhA|bnJ374&pZDq&i zr(9#eq(g8^tG??ZzVk(#jU+-ce`|yiQ1dgrJ)$|wk?XLEqv&M+)I*OZ*oBCizjHuT zjZ|mW=<1u$wPhyo#&rIO;qH~pu4e3X;!%BRgmX%?&KZ6tNl386-l#a>ug5nHU2M~{fM2jvY*Py< zbR&^o&!T19G6V-pV@CB)YnEOfmrdPG%QByD?=if99ihLxP6iA8$??wUPWzptC{u5H z38Q|!=IW`)5Gef4+pz|9fIRXt>nlW)XQvUXBO8>)Q=$@gtwb1iEkU4EOWI4`I4DN5 zTC-Pk6N>2%7Hikg?`Poj5lkM0T_i zoCXfXB&}{TG%IB)ENSfI_Xg3=lxYc6-P059>oK;L+vGMy_h{y9soj#&^q5E!pl(Oq zl)oCBi56u;YHkD)d`!iOAhEJ0A^~T;uE9~Yp0{E%G~0q|9f34F!`P56-ZF{2hSaWj zio%9RR%oe~he22r@&j_d(y&nAUL*ayBY4#CWG&gZ8ybs#UcF?8K#HzziqOYM-<`C& z1gD?j)M0bp1w*U>X_b1@ag1Fx=d*wlr zEAcpmI#5LtqcX95LeS=LXlzh*l;^yPl_6MKk)zPuTz_p8ynQ5;oIOUAoPED=+M6Q( z8YR!DUm#$zTM9tbNhxZ4)J0L&Hpn%U>wj3z<=g;`&c_`fGufS!o|1%I_sA&;14bRC z3`BtzpAB-yl!%zM{Aiok8*X%lDNrPiAjBnzHbF0=Ua*3Lxl(zN3Thj2x6nWi^H7Jlwd2fxIvnI-SiC%*j z2~wIWWKT^5fYipo-#HSrr;(RkzzCSt?THVEH2EPvV-4c#Gu4&1X% z<1zTAM7ZM(LuD@ZPS?c30Ur`;2w;PXPVevxT)Ti25o}1JL>MN5i1^(aCF3 zbp>RI?X(CkR9*Hnv!({Ti@FBm;`Ip%e*D2tWEOc62@$n7+gWb;;j}@G()~V)>s}Bd zw+uTg^ibA(gsp*|&m7Vm=heuIF_pIukOedw2b_uO8hEbM4l=aq?E-7M_J`e(x9?{5 zpbgu7h}#>kDQAZL;Q2t?^pv}Y9Zlu=lO5e18twH&G&byq9XszEeXt$V93dQ@Fz2DV zs~zm*L0uB`+o&#{`uVYGXd?)Fv^*9mwLW4)IKoOJ&(8uljK?3J`mdlhJF1aK;#vlc zJdTJc2Q>N*@GfafVw45B03)Ty8qe>Ou*=f#C-!5uiyQ^|6@Dzp9^n-zidp*O`YuZ|GO28 zO0bqi;)fspT0dS2;PLm(&nLLV&&=Ingn(0~SB6Fr^AxPMO(r~y-q2>gRWv7{zYW6c zfiuqR)Xc41A7Eu{V7$-yxYT-opPtqQIJzMVkxU)cV~N0ygub%l9iHT3eQtB>nH0c` zFy}Iwd9vocxlm!P)eh0GwKMZ(fEk92teSi*fezYw3qRF_E-EcCh-&1T)?beW?9Q_+pde8&UW*(avPF4P}M#z*t~KlF~#5TT!&nu z>FAKF8vQl>Zm(G9UKi4kTqHj`Pf@Z@Q(bmZkseb1^;9k*`a9lKXceKX#dMd@ds`t| z2~UPsbn2R0D9Nm~G*oc@(%oYTD&yK)scA?36B7mndR9l*hNg!3?6>CR+tF1;6sr?V zzz8FBrZ@g4F_!O2igIGZcWd zRe_0*{d6cyy9QQ(|Ct~WTM1pC3({5qHahk*M*O}IPE6icikx48VZ?!0Oc^FVoq`}eu~ zpRq0MYHaBA-`b_BVID}|oo-bem76;B2zo7j7yz(9JiSY6JTjKz#+w{9mc{&#x}>E? zSS3mY$_|scfP3Mo_F5x;r>y&Mquy*Q1b3eF^*hg3tap~%?@ASeyodYa=dF&k=ZyWy z3C+&C95h|9TAVM~-8y(&xcy0nvl}6B*)j0FOlSz%+bK-}S4;F?P`j55*+ZO0Ogk7D z5q30zE@Nup4lqQoG`L%n{T?qn9&WC94%>J`KU{gHIq?n_L;75kkKyib;^?yXUx6BO zju%DyU(l!Vj(3stJ>!pMZ*NZFd60%oSAD1JUXG0~2GCXpB0Am(YPyhzQda-e)b^+f zzFaEZdVTJRJXPJo%w z$?T;xq^&(XjmO>0bNGsT|1{1UqGHHhasPC;H!oX52(AQ7h9*^npOIRdQbNrS0X5#5G?L4V}WsAYcpq-+JNXhSl)XbxZ)L@5Q+?wm{GAU z9a7X8hAjAo;4r_eOdZfXGL@YpmT|#qECEcPTQ;nsjIkQ;!0}g?T>Zr*Fg}%BZVA)4 zCAzvWr?M&)KEk`t9eyFi_GlPV9a2kj9G(JgiZadd_&Eb~#DyZ%2Zcvrda_A47G&uW z^6TnBK|th;wHSo8ivpScU?AM5HDu2+ayzExMJc@?4{h-c`!b($ExB`ro#vkl<;=BA z961c*n(4OR!ebT*7UV7sqL;rZ3+Z)BYs<1I|9F|TOKebtLPxahl|ZXxj4j!gjj!3*+iSb5Zni&EKVt$S{0?2>A}d@3PSF3LUu)5 z*Y#a1uD6Y!$=_ghsPrOqX!OcIP`IW};tZzx1)h_~mgl;0=n zdP|Te_7)~R?c9s>W(-d!@nzQyxqakrME{Tn@>0G)kqV<4;{Q?Z-M)E-|IFLTc}WQr z1Qt;u@_dN2kru_9HMtz8MQx1aDYINH&3<+|HA$D#sl3HZ&YsjfQBv~S>4=u z7gA2*X6_cI$2}JYLIq`4NeXTz6Q3zyE717#>RD&M?0Eb|KIyF;xj;+3#DhC-xOj~! z$-Kx#pQ)_$eHE3Zg?V>1z^A%3jW0JBnd@z`kt$p@lch?A9{j6hXxt$(3|b>SZiBxOjA%LsIPii{=o(B`yRJ>OK;z_ELTi8xHX)il z--qJ~RWsZ%9KCNuRNUypn~<2+mQ=O)kd59$Lul?1ev3c&Lq5=M#I{ zJby%%+Top_ocqv!jG6O6;r0Xwb%vL6SP{O(hUf@8riADSI<|y#g`D)`x^vHR4!&HY`#TQMqM`Su}2(C|KOmG`wyK>uh@3;(prdL{2^7T3XFGznp{-sNLLJH@mh* z^vIyicj9yH9(>~I-Ev7p=yndfh}l!;3Q65}K}()(jp|tC;{|Ln1a+2kbctWEX&>Vr zXp5=#pw)@-O6~Q|><8rd0>H-}0Nsc|J6TgCum{XnH2@hFB09FsoZ_ow^Nv@uGgz3# z<6dRDt1>>-!kN58&K1HFrgjTZ^q<>hNI#n8=hP&pKAL4uDcw*J66((I?!pE0fvY6N zu^N=X8lS}(=w$O_jlE(;M9F={-;4R(K5qa=P#ZVW>}J&s$d0?JG8DZJwZcx3{CjLg zJA>q-&=Ekous)vT9J>fbnZYNUtvox|!Rl@e^a6ue_4-_v=(sNB^I1EPtHCFEs!>kK6B@-MS!(B zST${=v9q6q8YdSwk4}@c6cm$`qZ86ipntH8G~51qIlsYQ)+2_Fg1@Y-ztI#aa~tFD_QUxb zU-?g5B}wU@`tnc_l+B^mRogRghXs!7JZS=A;In1|f(1T(+xfIi zvjccLF$`Pkv2w|c5BkSj>>k%`4o6#?ygojkV78%zzz`QFE6nh{(SSJ9NzVdq>^N>X zpg6+8u7i(S>c*i*cO}poo7c9%i^1o&3HmjY!s8Y$5aO(!>u1>-eai0;rK8hVzIh8b zL53WCXO3;=F4_%CxMKRN^;ggC$;YGFTtHtLmX%@MuMxvgn>396~ zEp>V(dbfYjBX^!8CSg>P2c5I~HItbe(dl^Ax#_ldvCh;D+g6-%WD|$@S6}Fvv*eHc zaKxji+OG|_KyMe2D*fhP<3VP0J1gTgs6JZjE{gZ{SO-ryEhh;W237Q0 z{yrDobsM6S`bPMUzr|lT|99m6XDI$RzW4tQ$|@C2RjhBYPliEXFV#M*5G4;Kb|J8E z0IH}-d^S-53kFRZ)ZFrd2%~Sth-6BN?hnMa_PC4gdWyW3q-xFw&L^x>j<^^S$y_3_ zdZxouw%6;^mg#jG@7L!g9Kdw}{w^X9>TOtHgxLLIbfEG^Qf;tD=AXozE6I`XmOF=# zGt$Wl+7L<8^VI-eSK%F%dqXieK^b!Z3yEA$KL}X@>fD9)g@=DGt|=d(9W%8@Y@!{PI@`Nd zyF?Us(0z{*u6|X?D`kKSa}}Q*HP%9BtDEA^buTlI5ihwe)CR%OR46b+>NakH3SDbZmB2X>c8na&$lk zYg$SzY+EXtq2~$Ep_x<~+YVl<-F&_fbayzTnf<7?Y-un3#+T~ahT+eW!l83sofNt; zZY`eKrGqOux)+RMLgGgsJdcA3I$!#zy!f<$zL0udm*?M5w=h$Boj*RUk8mDPVUC1RC8A`@7PgoBIU+xjB7 z25vky+^7k_|1n1&jKNZkBWUu1VCmS}a|6_+*;fdUZAaIR4G!wv=bAZEXBhcjch6WH zdKUr&>z^P%_LIx*M&x{!w|gij?nigT8)Ol3VicXRL0tU}{vp2fi!;QkVc#I38op3O z=q#WtNdN{x)OzmH;)j{cor)DQ;2%m>xMu_KmTisaeCC@~rQwQTfMml7FZ_ zU2AR8yCY_CT$&IAn3n#Acf*VKzJD8-aphMg(12O9cv^AvLQ9>;f!4mjyxq_a%YH2+{~=3TMNE1 z#r3@ynnZ#p?RCkPK36?o{ILiHq^N5`si(T_cKvO9r3^4pKG0AgDEB@_72(2rvU^-; z%&@st2+HjP%H)u50t81p>(McL{`dTq6u-{JM|d=G1&h-mtjc2{W0%*xuZVlJpUSP-1=U6@5Q#g(|nTVN0icr-sdD~DWR=s}`$#=Wa zt5?|$`5`=TWZevaY9J9fV#Wh~Fw@G~0vP?V#Pd=|nMpSmA>bs`j2e{)(827mU7rxM zJ@ku%Xqhq!H)It~yXm=)6XaPk=$Rpk*4i4*aSBZe+h*M%w6?3&0>>|>GHL>^e4zR!o%aGzUn40SR+TdN%=Dbn zsRfXzGcH#vjc-}7v6yRhl{V5PhE-r~)dnmNz=sDt?*1knNZ>xI5&vBwrosF#qRL-Y z;{W)4W&cO0XMKy?{^d`Xh(2B?j0ioji~G~p5NQJyD6vouyoFE9w@_R#SGZ1DR4GnN z{b=sJ^8>2mq3W;*u2HeCaKiCzK+yD!^i6QhTU5npwO+C~A#5spF?;iuOE>o&p3m1C zmT$_fH8v+5u^~q^ic#pQN_VYvU>6iv$tqx#Sulc%|S7f zshYrWq7IXCiGd~J(^5B1nGMV$)lo6FCTm1LshfcOrGc?HW7g>pV%#4lFbnt#94&Rg{%Zbg;Rh?deMeOP(du*)HryI zCdhO$3|SeaWK<>(jSi%qst${Z(q@{cYz7NA^QO}eZ$K@%YQ^Dt4CXzmvx~lLG{ef8 zyckIVSufk>9^e_O7*w2z>Q$8me4T~NQDq=&F}Ogo#v1u$0xJV~>YS%mLVYqEf~g*j zGkY#anOI9{(f4^v21OvYG<(u}UM!-k;ziH%GOVU1`$0VuO@Uw2N{$7&5MYjTE?Er) zr?oZAc~Xc==KZx-pmoh9KiF_JKU7u0#b_}!dWgC>^fmbVOjuiP2FMq5OD9+4TKg^2 z>y6s|sQhI`=fC<>BnQYV433-b+jBi+N6unz%6EQR%{8L#=4sktI>*3KhX+qAS>+K#}y5KnJ8YuOuzG(Ea5;$*1P$-9Z+V4guyJ#s) zRPH(JPN;Es;H72%c8}(U)CEN}Xm>HMn{n!d(=r*YP0qo*^APwwU5YTTeHKy#85Xj< zEboiH=$~uIVMPg!qbx~0S=g&LZ*IyTJG$hTN zv%2>XF``@S9lnLPC?|myt#P)%7?%e_j*aU4TbTyxO|3!h%=Udp;THL+^oPp<6;TLlIOa$&xeTG_a*dbRDy+(&n1T=MU z+|G5{2UprrhN^AqODLo$9Z2h(3^wtdVIoSk@}wPajVgIoZipRft}^L)2Y@mu;X-F{LUw|s7AQD-0!otW#W9M@A~08`o%W;Bq-SOQavG*e-sy8) zwtaucR0+64B&Pm++-m56MQ$@+t{_)7l-|`1kT~1s!swfc4D9chbawUt`RUOdoxU|j z$NE$4{Ysr@2Qu|K8pD37Yv&}>{_I5N49a@0<@rGHEs}t zwh_+9T0oh@ptMbjy*kbz<&3>LGR-GNsT8{x1g{!S&V7{5tPYX(GF>6qZh>O&F)%_I zkPE-pYo3dayjNQAG+xrI&yMZy590FA1unQ*k*Zfm#f9Z5GljOHBj-B83KNIP1a?<^1vOhDJkma0o- zs(TP=@e&s6fRrU(R}{7eHL*(AElZ&80>9;wqj{|1YQG=o2Le-m!UzUd?Xrn&qd8SJ0mmEYtW;t(;ncW_j6 zGWh4y|KMK^s+=p#%fWxjXo434N`MY<8W`tNH-aM6x{@o?D3GZM&+6t4V3I*3fZd{a z0&D}DI?AQl{W*?|*%M^D5{E>V%;=-r&uQ>*e)cqVY52|F{ptA*`!iS=VKS6y4iRP6 zKUA!qpElT5vZvN}U5k-IpeNOr6KF`-)lN1r^c@HnT#RlZbi(;yuvm9t-Noh5AfRxL@j5dU-X37(?S)hZhRDbf5cbhDO5nSX@WtApyp` zT$5IZ*4*)h8wShkPI45stQH2Y7yD*CX^Dh@B%1MJSEn@++D$AV^ttKXZdQMU`rxiR z+M#45Z2+{N#uR-hhS&HAMFK@lYBWOzU^Xs-BlqQDyN4HwRtP2$kks@UhAr@wlJii%Rq?qy25?Egs z*a&iAr^rbJWlv+pYAVUq9lor}#Cm|D$_ev2d2Ko}`8kuP(ljz$nv3OCDc7zQp|j6W zbS6949zRvj`bhbO(LN3}Pq=$Ld3a_*9r_24u_n)1)}-gRq?I6pdHPYHgIsn$#XQi~ z%&m_&nnO9BKy;G%e~fa7i9WH#MEDNQ8WCXhqqI+oeE5R7hLZT_?7RWVzEGZNz4*Po ze&*a<^Q*ze72}UM&$c%FuuEIN?EQ@mnILwyt;%wV-MV+|d%>=;3f0(P46;Hwo|Wr0 z>&FS9CCb{?+lDpJMs`95)C$oOQ}BSQEv0Dor%-Qj0@kqlIAm1-qSY3FCO2j$br7_w zlpRfAWz3>Gh~5`Uh?ER?@?r0cXjD0WnTx6^AOFii;oqM?|M9QjHd*GK3WwA}``?dK15`ZvG>_nB2pSTGc{n2hYT6QF^+&;(0c`{)*u*X7L_ zaxqyvVm$^VX!0YdpSNS~reC+(uRqF2o>jqIJQkC&X>r8|mBHvLaduM^Mh|OI60<;G zDHx@&jUfV>cYj5+fAqvv(XSmc(nd@WhIDvpj~C#jhZ6@M3cWF2HywB1yJv2#=qoY| zIiaxLsSQa7w;4YE?7y&U&e6Yp+2m(sb5q4AZkKtey{904rT08pJpanm->Z75IdvW^ z!kVBy|CIUZn)G}92_MgoLgHa?LZJDp_JTbAEq8>6a2&uKPF&G!;?xQ*+{TmNB1H)_ z-~m@CTxDry_-rOM2xwJg{fcZ41YQDh{DeI$4!m8c;6XtFkFyf`fOsREJ`q+Bf4nS~ zKDYs4AE7Gugv?X)tu4<-M8ag{`4pfQ14z<(8MYQ4u*fl*DCpq66+Q1-gxNCQ!c$me zyTrmi7{W-MGP!&S-_qJ%9+e08_9`wWGG{i5yLJ;8qbt-n_0*Q371<^u@tdz|;>fPW zE=&q~;wVD_4IQ^^jyYX;2shIMiYdvIpIYRT>&I@^{kL9Ka2ECG>^l>Ae!GTn{r~o= z|I9=J#wNe)zYRqGZ7Q->L{dfewyC$ZYcLaoNormZ3*gfM=da*{heC)&46{yTS!t10 zn_o0qUbQOs$>YuY>YHi|NG^NQG<_@jD&WnZcW^NTC#mhVE7rXlZ=2>mZkx{bc=~+2 z{zVH=Xs0`*K9QAgq9cOtfQ^BHh-yr=qX8hmW*0~uCup89IJMvWy%#yt_nz@6dTS)L{O3vXye< zW4zUNb6d|Tx`XIVwMMgqnyk?c;Kv`#%F0m^<$9X!@}rI##T{iXFC?(ui{;>_9Din8 z7;(754q!Jx(~sb!6+6Lf*l{fqD7GW*v{>3wp+)@wq2abADBK!kI8To}7zooF%}g-z zJ1-1lp-lQI6w^bov9EfhpxRI}`$PTpJI3uo@ZAV729JJ2Hs68{r$C0U=!d$Bm+s(p z8Kgc(Ixf4KrN%_jjJjTx5`&`Ak*Il%!}D_V)GM1WF!k$rDJ-SudXd_Xhl#NWnET&e-P!rH~*nNZTzxj$?^oo3VWc-Ay^`Phze3(Ft!aNW-f_ zeMy&BfNCP^-FvFzR&rh!w(pP5;z1$MsY9Voozmpa&A}>|a{eu}>^2s)So>&kmi#7$ zJS_-DVT3Yi(z+ruKbffNu`c}s`Uo`ORtNpUHa6Q&@a%I%I;lm@ea+IbCLK)IQ~)JY zp`kdQ>R#J*i&Ljer3uz$m2&Un9?W=Ue|hHv?xlM`I&*-M;2{@so--0OAiraN1TLra z>EYQu#)Q@UszfJj&?kr%RraFyi*eG+HD_(!AWB;hPgB5Gd-#VDRxxv*VWMY0hI|t- zR=;TL%EKEg*oet7GtmkM zgH^y*1bfJ*af(_*S1^PWqBVVbejFU&#m`_69IwO!aRW>Rcp~+7w^ptyu>}WFYUf;) zZrgs;EIN9$Immu`$umY%$I)5INSb}aV-GDmPp!d_g_>Ar(^GcOY%2M)Vd7gY9llJR zLGm*MY+qLzQ+(Whs8-=ty2l)G9#82H*7!eo|B6B$q%ak6eCN%j?{SI9|K$u3)ORoz zw{bAGaWHrMb|X^!UL~_J{jO?l^}lI^|7jIn^p{n%JUq9{tC|{GM5Az3SrrPkuCt_W zq#u0JfDw{`wAq`tAJmq~sz`D_P-8qr>kmms>I|);7Tn zLl^n*Ga7l=U)bQmgnSo5r_&#Pc=eXm~W75X9Cyy0WDO|fbSn5 zLgpFAF4fa90T-KyR4%%iOq6$6BNs@3ZV<~B;7V=u zdlB8$lpe`w-LoS;0NXFFu@;^^bc?t@r3^XTe*+0;o2dt&>eMQeDit(SfDxYxuA$uS z**)HYK7j!vJVRNfrcokVc@&(ke5kJzvi};Lyl7@$!`~HM$T!`O`~MQ1k~ZH??fQr zNP)33uBWYnTntKRUT*5lu&8*{fv>syNgxVzEa=qcKQ86Vem%Lpae2LM=TvcJLs?`=o9%5Mh#k*_7zQD|U7;A%=xo^_4+nX{~b1NJ6@ z*=55;+!BIj1nI+)TA$fv-OvydVQB=KK zrGWLUS_Chm$&yoljugU=PLudtJ2+tM(xj|E>Nk?c{-RD$sGYNyE|i%yw>9gPItE{ zD|BS=M>V^#m8r?-3swQofD8j$h-xkg=F+KM%IvcnIvc)y zl?R%u48Jeq7E*26fqtLe_b=9NC_z|axW#$e0adI#r(Zsui)txQ&!}`;;Z%q?y2Kn! zXzFNe+g7+>>`9S0K1rmd)B_QVMD?syc3e0)X*y6(RYH#AEM9u?V^E0GHlAAR)E^4- zjKD+0K=JKtf5DxqXSQ!j?#2^ZcQoG5^^T+JaJa3GdFeqIkm&)dj76WaqGukR-*&`13ls8lU2ayVIR%;79HYAr5aEhtYa&0}l}eAw~qKjUyz4v*At z?})QplY`3cWB6rl7MI5mZx&#%I0^iJm3;+J9?RA(!JXjl?(XgmA-D#2cY-^?g1c*Q z3GVLh!8Jhe;QqecbMK#XIJxKMb=6dcs?1vbb?@ov-raj`hnYO92y8pv@>RVr=9Y-F zv`BK)9R6!m4Pfllu4uy0WBL+ZaUFFzbZZtI@J8{OoQ^wL-b$!FpGT)jYS-=vf~b-@ zIiWs7j~U2yI=G5;okQz%gh6}tckV5wN;QDbnu|5%%I(#)8Q#)wTq8YYt$#f9=id;D zJbC=CaLUyDIPNOiDcV9+=|$LE9v2;Qz;?L+lG{|g&iW9TI1k2_H;WmGH6L4tN1WL+ zYfSVWq(Z_~u~U=g!RkS|YYlWpKfZV!X%(^I3gpV%HZ_{QglPSy0q8V+WCC2opX&d@eG2BB#(5*H!JlUzl$DayI5_J-n zF@q*Fc-nlp%Yt;$A$i4CJ_N8vyM5fNN`N(CN53^f?rtya=p^MJem>JF2BEG|lW|E) zxf)|L|H3Oh7mo=9?P|Y~|6K`B3>T)Gw`0ESP9R`yKv}g|+qux(nPnU(kQ&&x_JcYg9+6`=; z-EI_wS~l{T3K~8}8K>%Ke`PY!kNt415_x?^3QOvX(QUpW&$LXKdeZM-pCI#%EZ@ta zv(q-(xXIwvV-6~(Jic?8<7ain4itN>7#AqKsR2y(MHMPeL)+f+v9o8Nu~p4ve*!d3 z{Lg*NRTZsi;!{QJknvtI&QtQM_9Cu%1QcD0f!Fz+UH4O#8=hvzS+^(e{iG|Kt7C#u zKYk7{LFc+9Il>d6)blAY-9nMd(Ff0;AKUo3B0_^J&ESV@4UP8PO0no7G6Gp_;Z;YnzW4T-mCE6ZfBy(Y zXOq^Of&?3#Ra?khzc7IJT3!%IKK8P(N$ST47Mr=Gv@4c!>?dQ-&uZihAL1R<_(#T8Y`Ih~soL6fi_hQmI%IJ5qN995<{<@_ z;^N8AGQE+?7#W~6X>p|t<4@aYC$-9R^}&&pLo+%Ykeo46-*Yc(%9>X>eZpb8(_p{6 zwZzYvbi%^F@)-}5%d_z^;sRDhjqIRVL3U3yK0{Q|6z!PxGp?|>!%i(!aQODnKUHsk^tpeB<0Qt7`ZBlzRIxZMWR+|+ z3A}zyRZ%0Ck~SNNov~mN{#niO**=qc(faGz`qM16H+s;Uf`OD1{?LlH!K!+&5xO%6 z5J80-41C{6)j8`nFvDaeSaCu_f`lB z_Y+|LdJX=YYhYP32M556^^Z9MU}ybL6NL15ZTV?kfCFfpt*Pw5FpHp#2|ccrz#zoO zhs=+jQI4fk*H0CpG?{fpaSCmXzU8bB`;kCLB8T{_3t>H&DWj0q0b9B+f$WG=e*89l zzUE)b9a#aWsEpgnJqjVQETpp~R7gn)CZd$1B8=F*tl+(iPH@s9jQtE33$dBDOOr=% ziOpR8R|1eLI?Rn*d+^;_U#d%bi$|#obe0(-HdB;K>=Y=mg{~jTA_WpChe8QquhF`N z>hJ}uV+pH`l_@d>%^KQNm*$QNJ(lufH>zv9M`f+C-y*;hAH(=h;kp@eL=qPBeXrAo zE7my75EYlFB30h9sdt*Poc9)2sNP9@K&4O7QVPQ^m$e>lqzz)IFJWpYrpJs)Fcq|P z5^(gnntu!+oujqGpqgY_o0V&HL72uOF#13i+ngg*YvPcqpk)Hoecl$dx>C4JE4DWp z-V%>N7P-}xWv%9Z73nn|6~^?w$5`V^xSQbZceV<_UMM&ijOoe{Y^<@3mLSq_alz8t zr>hXX;zTs&k*igKAen1t1{pj94zFB;AcqFwV)j#Q#Y8>hYF_&AZ?*ar1u%((E2EfZ zcRsy@s%C0({v=?8oP=DML`QsPgzw3|9|C22Y>;=|=LHSm7~+wQyI|;^WLG0_NSfrf zamq!5%EzdQ&6|aTP2>X=Z^Jl=w6VHEZ@=}n+@yeu^ke2Yurrkg9up3g$0SI8_O-WQu$bCsKc(juv|H;vz6}%7ONww zKF%!83W6zO%0X(1c#BM}2l^ddrAu^*`9g&1>P6m%x{gYRB)}U`40r>6YmWSH(|6Ic zH~QNgxlH*;4jHg;tJiKia;`$n_F9L~M{GiYW*sPmMq(s^OPOKm^sYbBK(BB9dOY`0 z{0!=03qe*Sf`rcp5Co=~pfQyqx|umPHj?a6;PUnO>EZGb!pE(YJgNr{j;s2+nNV(K zDi#@IJ|To~Zw)vqGnFwb2}7a2j%YNYxe2qxLk)VWJIux$BC^oII=xv-_}h@)Vkrg1kpKokCmX({u=lSR|u znu_fA0PhezjAW{#Gu0Mdhe8F4`!0K|lEy+<1v;$ijSP~A9w%q5-4Ft|(l7UqdtKao zs|6~~nmNYS>fc?Nc=yzcvWNp~B0sB5ForO5SsN(z=0uXxl&DQsg|Y?(zS)T|X``&8 z*|^p?~S!vk8 zg>$B{oW}%rYkgXepmz;iqCKY{R@%@1rcjuCt}%Mia@d8Vz5D@LOSCbM{%JU#cmIp! z^{4a<3m%-p@JZ~qg)Szb-S)k{jv92lqB(C&KL(jr?+#ES5=pUH$(;CO9#RvDdErmW z3(|f{_)dcmF-p*D%qUa^yYngNP&Dh2gq5hr4J!B5IrJ?ODsw@*!0p6Fm|(ebRT%l) z#)l22@;4b9RDHl1ys$M2qFc;4BCG-lp2CN?Ob~Be^2wQJ+#Yz}LP#8fmtR%o7DYzoo1%4g4D+=HonK7b!3nvL0f1=oQp93dPMTsrjZRI)HX-T}ApZ%B#B;`s? z9Kng{|G?yw7rxo(T<* z1+O`)GNRmXq3uc(4SLX?fPG{w*}xDCn=iYo2+;5~vhWUV#e5e=Yfn4BoS@3SrrvV9 zrM-dPU;%~+3&>(f3sr$Rcf4>@nUGG*vZ~qnxJznDz0irB(wcgtyATPd&gSuX^QK@+ z)7MGgxj!RZkRnMSS&ypR94FC$;_>?8*{Q110XDZ)L);&SA8n>72s1#?6gL>gydPs` zM4;ert4-PBGB@5E` zBaWT=CJUEYV^kV%@M#3(E8>g8Eg|PXg`D`;K8(u{?}W`23?JgtNcXkUxrH}@H_4qN zw_Pr@g%;CKkgP(`CG6VTIS4ZZ`C22{LO{tGi6+uPvvHkBFK|S6WO{zo1MeK$P zUBe}-)3d{55lM}mDVoU@oGtPQ+a<=wwDol}o=o1z*)-~N!6t09du$t~%MlhM9B5~r zy|zs^LmEF#yWpXZq!+Nt{M;bE%Q8z7L8QJDLie^5MKW|I1jo}p)YW(S#oLf(sWn~* zII>pocNM5#Z+-n2|495>?H?*oyr0!SJIl(}q-?r`Q;Jbqqr4*_G8I7agO298VUr9x z8ZcHdCMSK)ZO@Yr@c0P3{`#GVVdZ{zZ$WTO zuvO4ukug&& ze#AopTVY3$B>c3p8z^Yyo8eJ+(@FqyDWlR;uxy0JnSe`gevLF`+ZN6OltYr>oN(ZV z>76nIiVoll$rDNkck6_eh%po^u16tD)JXcii|#Nn(7=R9mA45jz>v}S%DeMc(%1h> zoT2BlF9OQ080gInWJ3)bO9j$ z`h6OqF0NL4D3Kz?PkE8nh;oxWqz?<3_!TlN_%qy*T7soZ>Pqik?hWWuya>T$55#G9 zxJv=G&=Tm4!|p1#!!hsf*uQe}zWTKJg`hkuj?ADST2MX6fl_HIDL7w`5Dw1Btays1 zz*aRwd&>4*H%Ji2bt-IQE$>sbCcI1Poble0wL`LAhedGRZp>%>X6J?>2F*j>`BX|P zMiO%!VFtr_OV!eodgp-WgcA-S=kMQ^zihVAZc!vdx*YikuDyZdHlpy@Y3i!r%JI85$-udM6|7*?VnJ!R)3Qfm4mMm~Z#cvNrGUy|i0u zb|(7WsYawjBK0u1>@lLhMn}@X>gyDlx|SMXQo|yzkg-!wIcqfGrA!|t<3NC2k` zq;po50dzvvHD>_mG~>W0iecTf@3-)<$PM5W@^yMcu@U;)(^eu@e4jAX7~6@XrSbIE zVG6v2miWY^g8bu5YH$c2QDdLkg2pU8xHnh`EUNT+g->Q8Tp4arax&1$?CH($1W&*} zW&)FQ>k5aCim$`Ph<9Zt?=%|pz&EX@_@$;3lQT~+;EoD(ho|^nSZDh*M0Z&&@9T+e zHYJ;xB*~UcF^*7a_T)9iV5}VTYKda8n*~PSy@>h7c(mH~2AH@qz{LMQCb+-enMhX} z2k0B1JQ+6`?Q3Lx&(*CBQOnLBcq;%&Nf<*$CX2<`8MS9c5zA!QEbUz1;|(Ua%CiuL zF2TZ>@t7NKQ->O#!;0s;`tf$veXYgq^SgG>2iU9tCm5&^&B_aXA{+fqKVQ*S9=58y zddWqy1lc$Y@VdB?E~_B5w#so`r552qhPR649;@bf63_V@wgb!>=ij=%ptnsq&zl8^ zQ|U^aWCRR3TnoKxj0m0QL2QHM%_LNJ(%x6aK?IGlO=TUoS%7YRcY{!j(oPcUq{HP=eR1>0o^(KFl-}WdxGRjsT);K8sGCkK0qVe{xI`# z@f+_kTYmLbOTxRv@wm2TNBKrl+&B>=VaZbc(H`WWLQhT=5rPtHf)#B$Q6m1f8We^)f6ylbO=t?6Y;{?&VL|j$VXyGV!v8eceRk zl>yOWPbk%^wv1t63Zd8X^Ck#12$*|yv`v{OA@2;-5Mj5sk#ptfzeX(PrCaFgn{3*hau`-a+nZhuJxO;Tis51VVeKAwFML#hF9g26NjfzLs8~RiM_MFl1mgDOU z=ywk!Qocatj1Q1yPNB|FW>!dwh=aJxgb~P%%7(Uydq&aSyi?&b@QCBiA8aP%!nY@c z&R|AF@8}p7o`&~>xq9C&X6%!FAsK8gGhnZ$TY06$7_s%r*o;3Y7?CenJUXo#V-Oag z)T$d-V-_O;H)VzTM&v8^Uk7hmR8v0)fMquWHs6?jXYl^pdM#dY?T5XpX z*J&pnyJ<^n-d<0@wm|)2SW9e73u8IvTbRx?Gqfy_$*LI_Ir9NZt#(2T+?^AorOv$j zcsk+t<#!Z!eC|>!x&#l%**sSAX~vFU0|S<;-ei}&j}BQ#ekRB-;c9~vPDIdL5r{~O zMiO3g0&m-O^gB}<$S#lCRxX@c3g}Yv*l)Hh+S^my28*fGImrl<-nbEpOw-BZ;WTHL zgHoq&ftG|~ouV<>grxRO6Z%{!O+j`Cw_4~BIzrjpkdA5jH40{1kDy|pEq#7`$^m*? zX@HxvW`e}$O$mJvm+65Oc4j7W@iVe)rF&-}R>KKz>rF&*Qi3%F0*tz!vNtl@m8L9= zyW3%|X}0KsW&!W<@tRNM-R>~~QHz?__kgnA(G`jWOMiEaFjLzCdRrqzKlP1vYLG`Y zh6_knD3=9$weMn4tBD|5=3a9{sOowXHu(z5y^RYrxJK z|L>TUvbDuO?3=YJ55N5}Kj0lC(PI*Te0>%eLNWLnawD54geX5>8AT(oT6dmAacj>o zC`Bgj-RV0m3Dl2N=w3e0>wWWG5!mcal`Xu<(1=2$b{k(;kC(2~+B}a(w;xaHPk^@V zGzDR|pt%?(1xwNxV!O6`JLCM!MnvpbLoHzKziegT_2LLWAi4}UHIo6uegj#WTQLet z9Dbjyr{8NAk+$(YCw~_@Az9N|iqsliRYtR7Q|#ONIV|BZ7VKcW$phH9`ZAlnMTW&9 zIBqXYuv*YY?g*cJRb(bXG}ts-t0*|HXId4fpnI>$9A?+BTy*FG8f8iRRKYRd*VF_$ zoo$qc+A(d#Lx0@`ck>tt5c$L1y7MWohMnZd$HX++I9sHoj5VXZRZkrq`v@t?dfvC} z>0h!c4HSb8%DyeF#zeU@rJL2uhZ^8dt(s+7FNHJeY!TZJtyViS>a$~XoPOhHsdRH* zwW+S*rIgW0qSPzE6w`P$Jv^5dsyT6zoby;@z=^yWLG^x;e557RnndY>ph!qCF;ov$ ztSW1h3@x{zm*IMRx|3lRWeI3znjpbS-0*IL4LwwkWyPF1CRpQK|s42dJ{ddA#BDDqio-Y+mF-XcP-z4bi zAhfXa2=>F0*b;F0ftEPm&O+exD~=W^qjtv&>|%(4q#H=wbA>7QorDK4X3~bqeeXv3 zV1Q<>_Fyo!$)fD`fd@(7(%6o-^x?&+s=)jjbQ2^XpgyYq6`}ISX#B?{I$a&cRcW?X zhx(i&HWq{=8pxlA2w~7521v-~lu1M>4wL~hDA-j(F2;9ICMg+6;Zx2G)ulp7j;^O_ zQJIRUWQam(*@?bYiRTKR<;l_Is^*frjr-Dj3(fuZtK{Sn8F;d*t*t{|_lnlJ#e=hx zT9?&_n?__2mN5CRQ}B1*w-2Ix_=CF@SdX-cPjdJN+u4d-N4ir*AJn&S(jCpTxiAms zzI5v(&#_#YrKR?B?d~ge1j*g<2yI1kp`Lx>8Qb;aq1$HOX4cpuN{2ti!2dXF#`AG{ zp<iD=Z#qN-yEwLwE7%8w8&LB<&6{WO$#MB-|?aEc@S1a zt%_p3OA|kE&Hs47Y8`bdbt_ua{-L??&}uW zmwE7X4Y%A2wp-WFYPP_F5uw^?&f zH%NCcbw_LKx!c!bMyOBrHDK1Wzzc5n7A7C)QrTj_Go#Kz7%+y^nONjnnM1o5Sw(0n zxU&@41(?-faq?qC^kO&H301%|F9U-Qm(EGd3}MYTFdO+SY8%fCMTPMU3}bY7ML1e8 zrdOF?E~1uT)v?UX(XUlEIUg3*UzuT^g@QAxEkMb#N#q0*;r zF6ACHP{ML*{Q{M;+^4I#5bh#c)xDGaIqWc#ka=0fh*_Hlu%wt1rBv$B z%80@8%MhIwa0Zw$1`D;Uj1Bq`lsdI^g_18yZ9XUz2-u6&{?Syd zHGEh-3~HH-vO<)_2^r|&$(q7wG{@Q~un=3)Nm``&2T99L(P+|aFtu1sTy+|gwL*{z z)WoC4rsxoWhz0H$rG|EwhDT z0zcOAod_k_Ql&Y`YV!#&Mjq{2ln|;LMuF$-G#jX_2~oNioTHb4GqFatn@?_KgsA7T z(ouy$cGKa!m}6$=C1Wmb;*O2p*@g?wi-}X`v|QA4bNDU*4(y8*jZy-Ku)S3iBN(0r ztfLyPLfEPqj6EV}xope=?b0Nyf*~vDz-H-Te@B`{ib?~F<*(MmG+8zoYS77$O*3vayg#1kkKN+Bu9J9;Soev<%2S&J zr8*_PKV4|?RVfb#SfNQ;TZC$8*9~@GR%xFl1 z3MD?%`1PxxupvVO>2w#8*zV<-!m&Lis&B>)pHahPQ@I_;rY~Z$1+!4V1jde&L8y0! zha7@F+rOENF{~0$+a~oId0R|_!PhO=8)$>LcO)ca6YeOQs?ZG;`4O`x=Pd??Bl?Qf zgkaNj7X5@3_==zlQ-u6?omteA!_e-6gfDtw6CBnP2o1wo-7U!Y@89rU1HFb|bIr!I z=qIz=AW(}L^m z=I9RiS{DRtTYS6jsnvt1zs)W;kSVFOK|WMyZ@dxs+8{*W9-aTmS79J4R{Cis>EIqS zw+~gJqwz)(!z>)KDyhS{lM*xQ-8mNvo$A=IwGu+iS564tgX`|MeEuis!aN-=7!L&e zhNs;g1MBqDyx{y@AI&{_)+-?EEg|5C*!=OgD#$>HklRVU+R``HYZZq5{F9C0KKo!d z$bE2XC(G=I^YUxYST+Hk>0T;JP_iAvCObcrPV1Eau865w6d^Wh&B?^#h2@J#!M2xp zLGAxB^i}4D2^?RayxFqBgnZ-t`j+~zVqr+9Cz9Rqe%1a)c*keP#r54AaR2*TH^}7j zmJ48DN);^{7+5|+GmbvY2v#qJy>?$B(lRlS#kyodlxA&Qj#9-y4s&|eq$5} zgI;4u$cZWKWj`VU%UY#SH2M$8?PjO-B-rNPMr=8d=-D(iLW#{RWJ}@5#Z#EK=2(&LvfW&{P4_jsDr^^rg9w#B7h`mBwdL9y)Ni;= zd$jFDxnW7n-&ptjnk#<0zmNNt{;_30vbQW!5CQ7SuEjR1be!vxvO53!30iOermrU1 zXhXaen8=4Q(574KO_h$e$^1khO&tQL59=)Dc^8iPxz8+tC3`G$w|yUzkGd%Wg4(3u zJ<&7r^HAaEfG?F8?2I64j4kPpsNQk7qBJa9_hFT;*j;A%H%;QI@QWqJaiOl=;u>G8 zG`5Ow4K5ifd=OS|7F;EFc1+GzLld0RCQxG>Fn?~5Wl5VHJ=$DeR-2zwBgzSrQsGG0 zBqrILuB+_SgLxh~S~^QNHWW(2P;Z?d!Rd1lnEM=z23xPzyrbO_L0k43zruDkrJO*D zlzN(peBMLji`xfgYUirul-7c#3t(*=x6A^KSU-L|$(0pp9A*43#=Q!cu%9ZHP!$J| zSk8k=Z8cl811Vvn(4p8xx+EdKQV(sjC4_mEvlWeuIfwEVcF2LiC{H!oW)LSW=0ul| zT?$5PCc(pf-zKzUH`p7I7coVvCK;Dv-3_c?%~bPz`#ehbfrSrFf{RAz0I5e*W1S)kTW{0gf5X2v2k=S=W{>pr44tQ?o` zih8gE29VGR_SL~YJtcA)lRLozPg!<3Mh(`Hp)5{bclb)reTScXzJ>7{?i^yR@{(^% z#=$BYXPIX%fhgsofP-T`3b<5#V(TTS)^$vlhV&Kn=(LXOTAADIR1v8UqmW5c`n`S% zC8SOW$e?>&0dwKD%Jt{+67PfCLnqX0{8K^(q_^^2#puPYPkJsyXWMa~?V?p5{flYi z-1!uqI2x%puPG)r7b8y+Pc0Z5C%aA6`Q1_?W9k!YbiVVJVJwGLL?)P0M&vo{^IgEE zrX3eTgrJl_AeXYmiciYX9OP?NPN%-7Ji%z3U`-iXX=T~OI0M=ek|5IvIsvXM$%S&v zKw{`Kj(JVc+Pp^?vLKEyoycfnk)Hd>et78P^Z*{#rBY~_>V7>{gtB$0G99nbNBt+r zyXvEg_2=#jjK+YX1A>cj5NsFz9rjB_LB%hhx4-2I73gr~CW_5pD=H|e`?#CQ2)p4& z^v?Dlxm-_j6bO5~eeYFZGjW3@AGkIxY=XB*{*ciH#mjQ`dgppNk4&AbaRYKKY-1CT z>)>?+ME)AcCM7RRZQsH5)db7y!&jY-qHp%Ex9N|wKbN$!86i>_LzaD=f4JFc6Dp(a z%z>%=q(sXlJ=w$y^|tcTy@j%AP`v1n0oAt&XC|1kA`|#jsW(gwI0vi3a_QtKcL+yh z1Y=`IRzhiUvKeZXH6>>TDej)?t_V8Z7;WrZ_7@?Z=HRhtXY+{hlY?x|;7=1L($?t3 z6R$8cmez~LXopZ^mH9=^tEeAhJV!rGGOK@sN_Zc-vmEr;=&?OBEN)8aI4G&g&gdOb zfRLZ~dVk3194pd;=W|Z*R|t{}Evk&jw?JzVERk%JNBXbMDX82q~|bv%!2%wFP9;~-H?={C1sZ( zuDvY5?M8gGX*DyN?nru)UvdL|Rr&mXzgZ;H<^KYvzIlet!aeFM@I?JduKj=!(+ zM7`37KYhd*^MrKID^Y1}*sZ#6akDBJyKna%xK%vLlBqzDxjQ3}jx8PBOmXkvf@B{@ zc#J;~wQ<6{B;``j+B!#7s$zONYdXunbuKvl@zvaWq;`v2&iCNF2=V9Kl|77-mpCp= z2$SxhcN=pZ?V{GW;t6s)?-cNPAyTi&8O0QMGo#DcdRl#+px!h3ayc*(VOGR95*Anj zL0YaiVN2mifzZ){X+fl`Z^P=_(W@=*cIe~BJd&n@HD@;lRmu8cx7K8}wPbIK)GjF> zQGQ2h#21o6b2FZI1sPl}9_(~R|2lE^h}UyM5A0bJQk2~Vj*O)l-4WC4$KZ>nVZS|d zZv?`~2{uPYkc?254B9**q6tS|>We?uJ&wK3KIww|zzSuj>ncI4D~K z1Y6irVFE{?D-|R{!rLhZxAhs+Ka9*-(ltIUgC;snNek4_5xhO}@+r9Sl*5=7ztnXO zAVZLm$Kdh&rqEtdxxrE9hw`aXW1&sTE%aJ%3VL3*<7oWyz|--A^qvV3!FHBu9B-Jj z4itF)3dufc&2%V_pZsjUnN=;s2B9<^Zc83>tzo)a_Q$!B9jTjS->%_h`ZtQPz@{@z z5xg~s*cz`Tj!ls3-hxgnX}LDGQp$t7#d3E}>HtLa12z&06$xEQfu#k=(4h{+p%aCg zzeudlLc$=MVT+|43#CXUtRR%h5nMchy}EJ;n7oHfTq6wN6PoalAy+S~2l}wK;qg9o zcf#dX>ke;z^13l%bwm4tZcU1RTXnDhf$K3q-cK576+TCwgHl&?9w>>_(1Gxt@jXln zt3-Qxo3ITr&sw1wP%}B>J$Jy>^-SpO#3e=7iZrXCa2!N69GDlD{97|S*og)3hG)Lk zuqxK|PkkhxV$FP45%z*1Z?(LVy+ruMkZx|(@1R(0CoS6`7FWfr4-diailmq&Q#ehn zc)b&*&Ub;7HRtFVjL%((d$)M=^6BV@Kiusmnr1_2&&aEGBpbK7OWs;+(`tRLF8x?n zfKJB3tB^F~N`_ak3^exe_3{=aP)3tuuK2a-IriHcWv&+u7p z_yXsd6kyLV@k=(QoSs=NRiKNYZ>%4wAF;2#iu1p^!6>MZUPd;=2LY~l2ydrx10b#OSAlltILY%OKTp{e{ zzNogSk~SJBqi<_wRa#JqBW8Ok=6vb%?#H(hG}Dv98{JST5^SSh>_GQ@UK-0J`6l#E za}X#ud0W?cp-NQE@jAx>NUv65U~%YYS%BC0Cr$5|2_A)0tW;(nqoGJUHG5R`!-{1M-4T{<^pOE!Dvyuu1x7?Wt#YIgq zA$Vwj`St+M#ZxJXXGkepIF6`xL&XPu^qiFlZcX+@fOAdQ9d(h{^xCiAWJ0Ixp~3&E z(WwdT$O$7ez?pw>Jf{`!T-205_zJv+y~$w@XmQ;CiL8d*-x_z~0@vo4|3xUermJ;Q z9KgxjkN8Vh)xZ2xhX0N@{~@^d@BLoYFW%Uys83=`15+YZ%KecmWXjVV2}YbjBonSh zVOwOfI7^gvlC~Pq$QDHMQ6_Pd10OV{q_Zai^Yg({5XysuT`3}~3K*8u>a2FLBQ%#_YT6$4&6(?ZGwDE*C-p8>bM?hj*XOIoj@C!L5) zH1y!~wZ^dX5N&xExrKV>rEJJjkJDq*$K>qMi`Lrq08l4bQW~!Fbxb>m4qMHu6weTiV6_9(a*mZ23kr9AM#gCGE zBXg8#m8{ad@214=#w0>ylE7qL$4`xm!**E@pw484-VddzN}DK2qg&W~?%hcv3lNHx zg(CE<2)N=p!7->aJ4=1*eB%fbAGJcY65f3=cKF4WOoCgVelH$qh0NpIka5J-6+sY* zBg<5!R=I*5hk*CR@$rY6a8M%yX%o@D%{q1Jn=8wAZ;;}ol>xFv5nXvjFggCQ_>N2} zXHiC~pCFG*oEy!h_sqF$^NJIpQzXhtRU`LR0yU;MqrYUG0#iFW4mbHe)zN&4*Wf)G zV6(WGOq~OpEoq##E{rC?!)8ygAaAaA0^`<8kXmf%uIFfNHAE|{AuZd!HW9C^4$xW; zmIcO#ti!~)YlIU4sH(h&s6}PH-wSGtDOZ+%H2gAO(%2Ppdec9IMViuwwWW)qnqblH9xe1cPQ@C zS4W|atjGDGKKQAQlPUVUi1OvGC*Gh2i&gkh0up%u-9ECa7(Iw}k~0>r*WciZyRC%l z7NX3)9WBXK{mS|=IK5mxc{M}IrjOxBMzFbK59VI9k8Yr$V4X_^wI#R^~RFcme2)l!%kvUa zJ{zpM;;=mz&>jLvON5j>*cOVt1$0LWiV>x)g)KKZnhn=%1|2E|TWNfRQ&n?vZxQh* zG+YEIf33h%!tyVBPj>|K!EB{JZU{+k`N9c@x_wxD7z~eFVw%AyU9htoH6hmo0`%kb z55c#c80D%0^*6y|9xdLG$n4Hn%62KIp`Md9Jhyp8)%wkB8<%RlPEwC&FL z;hrH(yRr(Ke$%TZ09J=gGMC3L?bR2F4ZU!}pu)*8@l(d9{v^^(j>y+GF*nGran5*M z{pl5ig0CVsG1etMB8qlF4MDFRkLAg4N=l{Sc*F>K_^AZQc{dSXkvonBI)qEN1*U&? zKqMr?Wu)q9c>U~CZUG+-ImNrU#c`bS?RpvVgWXqSsOJrCK#HNIJ+k_1Iq^QNr(j|~ z-rz67Lf?}jj^9Ik@VIMBU2tN{Ts>-O%5f?=T^LGl-?iC%vfx{}PaoP7#^EH{6HP!( zG%3S1oaiR;OmlKhLy@yLNns`9K?60Zg7~NyT0JF(!$jPrm^m_?rxt~|J2)*P6tdTU z25JT~k4RH9b_1H3-y?X4=;6mrBxu$6lsb@xddPGKA*6O`Cc^>Ul`f9c&$SHFhHN!* zjj=(Jb`P}R%5X@cC%+1ICCRh1^G&u548#+3NpYTVr54^SbFhjTuO-yf&s%r4VIU!lE!j(JzHSc9zRD_fw@CP0pkL(WX6 zn+}LarmQP9ZGF9So^+jr<(LGLlOxGiCsI^SnuC{xE$S;DA+|z+cUk=j^0ipB(WTZ} zR0osv{abBd)HOjc(SAV&pcP@37SLnsbtADj?bT#cPZq|?W1Ar;4Vg5m!l{@{TA~|g zXYOeU`#h-rT@(#msh%%kH>D=`aN}2Rysez?E@R6|@SB(_gS0}HC>83pE`obNA9vsH zSu^r>6W-FSxJA}?oTuH>-y9!pQg|*<7J$09tH=nq4GTx+5($$+IGlO^bptmxy#=)e zuz^beIPpUB_YK^?eb@gu(D%pJJwj3QUk6<3>S>RN^0iO|DbTZNheFX?-jskc5}Nho zf&1GCbE^maIL$?i=nXwi)^?NiK`Khb6A*kmen^*(BI%Kw&Uv4H;<3ib-2UwG{7M&* zn$qyi8wD9cKOuxWhRmFupwLuFn!G5Vj6PZ#GCNJLlTQuQ?bqAYd7Eva5YR~OBbIim zf(6yXS4pei1Bz4w4rrB6Ke~gKYErlC=l9sm*Zp_vwJe7<+N&PaZe|~kYVO%uChefr%G4-=0eSPS{HNf=vB;p~ z5b9O1R?WirAZqcdRn9wtct>$FU2T8p=fSp;E^P~zR!^C!)WHe=9N$5@DHk6(L|7s@ zcXQ6NM9Q~fan1q-u8{ez;RADoIqwkf4|6LfsMZK6h{ZUGYo>vD%JpY<@w;oIN-*sK zxp4@+d{zxe>Z-pH#_)%|d(AC`fa!@Jq)5K8hd71!;CEG|ZI{I2XI`X~n|ae;B!q{I zJDa#T+fRviR&wAN^Sl{z8Ar1LQOF&$rDs18h0{yMh^pZ#hG?c5OL8v07qRZ-Lj5(0 zjFY(S4La&`3IjOT%Jqx4z~08($iVS;M10d@q~*H=Py)xnKt(+G-*o33c7S3bJ8cmwgj45` zU|b7xCoozC!-7CPOR194J-m9N*g`30ToBo!Io?m>T)S{CusNZx0J^Hu6hOmvv;0~W zFHRYJgyRhP1sM_AQ%pkD!X-dPu_>)`8HunR4_v$4T78~R<})-@K2LBt03PBLnjHzuYY)AK?>0TJe9 zmmOjwSL%CTaLYvYlJ~|w?vc*R+$@vEAYghtgGhZ2LyF+UdOn+v^yvD9R%xbU$fUjK{{VQ4VL&&UqAFa>CZuX4kX zJ)njewLWfKXneB+r}Y$`ezzwDoRT3r{9(@=I3-z>8tT)n3whDyi(r*lAnxQJefj_x z-8lc=r!Vua{b}v;LT)oXW>~6Q03~RAp~R}TZq9sGbeUBMS)?ZrJqiu|E&ZE)uN1uL zXcAj3#aEz zzbcCF)+;Hia#OGBvOatkPQfE{*RtBlO1QFVhi+3q0HeuFa*p+Dj)#8Mq9yGtIx%0A znV5EmN(j!&b%kNz4`Vr-)mX_?$ng&M^a6loFO(G3SA!~eBUEY!{~>C|Ht1Q4cw)X5~dPiEYQJNg?B2&P>bU7N(#e5cr8qc7A{a7J9cdMcRx)N|?;$L~O|E)p~ zIC}oi3iLZKb>|@=ApsDAfa_<$0Nm<3nOPdr+8Y@dnb|u2S<7CUmTGKd{G57JR*JTo zb&?qrusnu}jb0oKHTzh42P00C{i^`v+g=n|Q6)iINjWk4mydBo zf0g=ikV*+~{rIUr%MXdz|9ebUP)<@zR8fgeR_rChk0<^^3^?rfr;-A=x3M?*8|RPz z@}DOF`aXXuZGih9PyAbp|DULSw8PJ`54io)ga6JG@Hgg@_Zo>OfJ)8+TIfgqu%877 z@aFykK*+|%@rSs-t*oAzH6Whyr=TpuQ}B0ptSsMg9p8@ZE5A6LfMk1qdsf8T^zkdC3rUhB$`s zBdanX%L3tF7*YZ4^A8MvOvhfr&B)QOWCLJ^02kw5;P%n~5e`sa6MG{E2N^*2ZX@ge zI2>ve##O?I}sWX)UqK^_bRz@;5HWp5{ziyg?QuEjXfMP!j zpr(McSAQz>ME?M-3NSoCn$91#_iNnULp6tD0NN7Z0s#G~-~xWZFWN-%KUVi^yz~-` zn;AeGvjLJ~{1p#^?$>zM4vu=3mjBI$(_tC~NC0o@6<{zS_*3nGfUsHr3Gdgn%XedF zQUP=j5Mb>9=#f7aPl;cm$=I0u*WP}aVE!lCYw2Ht{Z_j9mp1h>dHGKkEZP6f^6O@J zndJ2+rWjxp|3#<2oO=8v!oHMX{|Vb|^G~pU_A6=ckBQvt>o+dpgYy(D=VCj65GE&jJj{&-*iq?z)PHNee&-@Mie~#LD*={ex8h(-)<@|55 zUr(}L?mz#;d|mrD%zrh<-*=;5*7K$B`zPjJ%m2pwr*G6tf8tN%a

_x$+l{{cH8$W#CT diff --git a/furhat_skills/Conversation/gradle/wrapper/gradle-wrapper.properties b/furhat_skills/Conversation/gradle/wrapper/gradle-wrapper.properties index 273a260..a595206 100755 --- a/furhat_skills/Conversation/gradle/wrapper/gradle-wrapper.properties +++ b/furhat_skills/Conversation/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,5 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-6.9.4-bin.zip diff --git a/furhat_skills/Conversation/gradlew b/furhat_skills/Conversation/gradlew index cccdd3d..1b6c787 100755 --- a/furhat_skills/Conversation/gradlew +++ b/furhat_skills/Conversation/gradlew @@ -1,78 +1,129 @@ -#!/usr/bin/env sh +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ############################################################################## -## -## Gradle start up script for UN*X -## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# ############################################################################## # Attempt to set APP_HOME + # Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` +APP_BASE_NAME=${0##*/} # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -81,7 +132,7 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" + JAVACMD=java which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the @@ -89,84 +140,95 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=$((i+1)) + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=$(save "$@") - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" - -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then - cd "$(dirname "$0")" -fi +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' exec "$JAVACMD" "$@" diff --git a/furhat_skills/Conversation/gradlew.bat b/furhat_skills/Conversation/gradlew.bat index f955316..ac1b06f 100755 --- a/furhat_skills/Conversation/gradlew.bat +++ b/furhat_skills/Conversation/gradlew.bat @@ -1,84 +1,89 @@ -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt index e702bb7..3aaa272 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt @@ -1,7 +1,8 @@ package furhatos.app.templateadvancedskill.flow -import furhatos.app.templateadvancedskill.flow.main.Idle import furhatos.app.templateadvancedskill.flow.main.DocumentWaitingToStart +import furhatos.app.templateadvancedskill.flow.main.Greeting +import furhatos.app.templateadvancedskill.flow.main.Idle import furhatos.app.templateadvancedskill.setting.* import furhatos.flow.kotlin.State import furhatos.flow.kotlin.furhat @@ -17,7 +18,7 @@ val Init: State = state { onEntry { /** start interaction */ when { - furhat.isVirtual() -> goto(DocumentWaitingToStart) // Convenient to bypass the need for user when running Virtual Furhat + furhat.isVirtual() -> goto(Greeting) // Convenient to bypass the need for user when running Virtual Furhat users.hasAny() -> { furhat.attend(users.random) goto(DocumentWaitingToStart) diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt index bff3dac..8de29f4 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt @@ -6,8 +6,12 @@ import okhttp3.MediaType.Companion.toMediaType import okhttp3.RequestBody.Companion.toRequestBody import org.json.JSONObject import java.net.ConnectException -import okio.IOException +import java.io.IOException import furhatos.nlu.common.* +import furhatos.app.templateadvancedskill.language.AppLanguage +import furhatos.app.templateadvancedskill.language.LangDetect +import furhatos.app.templateadvancedskill.language.LanguageManager +import furhatos.app.templateadvancedskill.language.setAppLanguage import furhatos.app.templateadvancedskill.flow.Parent import furhatos.gestures.Gestures import furhatos.app.templateadvancedskill.params.LOCAL_BACKEND_URL @@ -17,64 +21,113 @@ import furhatos.app.templateadvancedskill.nlu.UncertainResponseIntent import java.net.SocketTimeoutException data class Transcription(val content: String) - data class EngageRequest(val document: String, val answer: String) +/** Helper: choose EN/NO text based on language */ +private fun localized(en: String, no: String, lang: AppLanguage): String = + if (lang == AppLanguage.NO) no else en + +/** Helper: use the most recently set conversation language */ +private fun currentConversationLanguage(): AppLanguage = LanguageManager.current + // Document Q&A state, inheriting from Parent. fun documentInfoQnA(documentName: String): State = state(parent = Parent) { + var conversationCount = 0 var lastQuestion = "" var lastAnswer = "" var previousQuestions = mutableListOf() var previousAnswers = mutableListOf() - var userMood = "neutral" // Track user's mood + var userMood = "neutral" var lastGestureTime = 0L onEntry { - // Lock the attended user during this conversation. furhat.gesture(Gestures.Smile) - furhat.ask("Hello! I'm here to help you learn about $documentName. What would you like to know?") + + val lang = currentConversationLanguage() + val intro = localized( + en = "Hello! I'm here to help you learn about $documentName. What would you like to know?", + no = "Hei! Jeg er her for å hjelpe deg med $documentName. Hva vil du vite?", + lang = lang + ) + furhat.ask(intro) } onExit { - // Release the attention lock when leaving this state. furhat.gesture(Gestures.Wink) } onResponse { + val lang = currentConversationLanguage() furhat.gesture(Gestures.Smile) - furhat.say("Thank you for the interesting conversation! Goodbye!") + furhat.say( + localized( + en = "Thank you for the interesting conversation! Goodbye!", + no = "Takk for en interessant samtale! Ha det bra!", + lang = lang + ) + ) goto(Idle) } onResponse { - // Only end conversation if it's a clear "no" without additional context + val lang = currentConversationLanguage() if (it.text.matches(Regex("(?i)^(no|nope|nah| no goodbye)$"))) { furhat.gesture(Gestures.Nod) - furhat.say("Alright, thank you for the conversation. Goodbye!") + furhat.say( + localized( + en = "Alright, thank you for the conversation. Goodbye!", + no = "Greit, takk for praten. Ha det bra!", + lang = lang + ) + ) goto(Idle) } else { - // If it's a "no" with additional context, treat it as a regular response raise(it) } } onResponse { - // Handle uncertain responses by encouraging further discussion + val lang = currentConversationLanguage() + furhat.gesture(Gestures.Thoughtful) furhat.say { random { - +"That's an interesting perspective. Let me share what I know about this topic." - +"I understand your uncertainty. Let me provide some more information that might help." - +"That's a good point to explore further. Let me elaborate on this topic." + +localized( + en = "That's an interesting perspective. Let me share what I know about this topic.", + no = "Det er et interessant perspektiv. La meg fortelle det jeg vet om dette temaet.", + lang = lang + ) + +localized( + en = "I understand your uncertainty. Let me provide some more information that might help.", + no = "Jeg forstår at du er usikker. La meg gi litt mer informasjon som kan hjelpe.", + lang = lang + ) + +localized( + en = "That's a good point to explore further. Let me elaborate on this topic.", + no = "Det er et godt poeng å utforske videre. La meg utdype dette temaet.", + lang = lang + ) } } - // Add an engaging follow-up question + furhat.ask { random { - +"What specific aspect of this topic interests you the most?" - +"Would you like to explore a particular angle of this discussion?" - +"Is there a specific part you'd like me to focus on?" + +localized( + en = "What specific aspect of this topic interests you the most?", + no = "Hvilket aspekt ved dette temaet interesserer deg mest?", + lang = lang + ) + +localized( + en = "Would you like to explore a particular angle of this discussion?", + no = "Vil du utforske en bestemt vinkling av denne diskusjonen?", + lang = lang + ) + +localized( + en = "Is there a specific part you'd like me to focus on?", + no = "Er det en bestemt del du vil at jeg skal fokusere på?", + lang = lang + ) } } } @@ -82,37 +135,42 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { onResponse { val userQuestion = it.text.trim() conversationCount++ - - // Update user mood based on question content + + // Detect language from the *current* question and switch ASR/TTS + val detectedLanguage = LangDetect.detect(userQuestion) + setAppLanguage(detectedLanguage) + + val preferredLanguage = when (detectedLanguage) { + AppLanguage.EN -> "English" + AppLanguage.NO -> "Norwegian" + } + + // Mood detection stays as-is userMood = when { userQuestion.contains(Regex("(great|wonderful|amazing|excellent)", RegexOption.IGNORE_CASE)) -> "positive" userQuestion.contains(Regex("(bad|terrible|awful|horrible)", RegexOption.IGNORE_CASE)) -> "negative" else -> "neutral" } - - // Natural thinking gesture - only if the question is complex + if (userQuestion.split(" ").size > 5) { furhat.gesture(Gestures.GazeAway, priority = 1) } - - // Call the backend /ask endpoint to get an answer - val answer = callDocumentAgent(userQuestion) - - // Clean up the answer - remove URLs and extra whitespace, but keep the complete response + + // Call backend with preferred_language + val answer = callDocumentAgent(userQuestion, preferredLanguage) + val cleanAnswer = answer .replace(Regex("https?://\\S+"), "") .replace(Regex("\\s+"), " ") .trim() - - // Store the Q&A pair for context + previousQuestions.add(userQuestion) previousAnswers.add(cleanAnswer) lastQuestion = userQuestion lastAnswer = cleanAnswer - - // Add natural gestures while speaking, but less frequently + val currentTime = System.currentTimeMillis() - if (currentTime - lastGestureTime > 5000) { // Increased to 5 seconds between gestures + if (currentTime - lastGestureTime > 5000) { when (userMood) { "positive" -> furhat.gesture(Gestures.Smile, priority = 2) "negative" -> furhat.gesture(Gestures.ExpressSad, priority = 2) @@ -120,88 +178,135 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { } lastGestureTime = currentTime } - - // Speak the cleaned answer with natural pauses + furhat.say(cleanAnswer) - - // Generate a contextual follow-up based on conversation history and user mood + + // Localized follow-ups val followUpPrompt = when { - conversationCount == 1 -> { - // First follow-up: Focus on specific aspects mentioned in the answer - val keyAspects = cleanAnswer.split(".").take(2).joinToString(" ") - when (userMood) { - "positive" -> "What would you like to know more about?" - "negative" -> "Would you like me to explain that differently?" - else -> "What interests you most about that?" - } + conversationCount == 1 -> when (userMood) { + "positive" -> localized( + en = "What would you like to know more about?", + no = "Hva vil du vite mer om?", + lang = detectedLanguage + ) + "negative" -> localized( + en = "Would you like me to explain that differently?", + no = "Vil du at jeg skal forklare det på en annen måte?", + lang = detectedLanguage + ) + else -> localized( + en = "What interests you most about that?", + no = "Hva synes du er mest interessant med det?", + lang = detectedLanguage + ) } - conversationCount == 2 -> { - // Second follow-up: Connect to previous question - when (userMood) { - "positive" -> "Want to explore that further?" - "negative" -> "Would you like me to clarify anything?" - else -> "What would you like to know more about?" - } + + conversationCount == 2 -> when (userMood) { + "positive" -> localized( + en = "Want to explore that further?", + no = "Vil du utforske det videre?", + lang = detectedLanguage + ) + "negative" -> localized( + en = "Would you like me to clarify anything?", + no = "Vil du at jeg skal forklare noe nærmere?", + lang = detectedLanguage + ) + else -> localized( + en = "What would you like to know more about?", + no = "Hva vil du vite mer om?", + lang = detectedLanguage + ) } + else -> { - // Later follow-ups: Use conversation history for context try { val engagePrompt = callEngageUser(documentName, cleanAnswer) if (engagePrompt.isNotEmpty()) { - // Keep the API response simple and natural engagePrompt } else { - // Simple fallback based on mood when (userMood) { - "positive" -> "What would you like to explore next?" - "negative" -> "Would you like me to explain something else?" - else -> "What interests you most?" + "positive" -> localized( + en = "What would you like to explore next?", + no = "Hva vil du utforske videre?", + lang = detectedLanguage + ) + "negative" -> localized( + en = "Would you like me to explain something else?", + no = "Vil du at jeg skal forklare noe annet?", + lang = detectedLanguage + ) + else -> localized( + en = "What interests you most?", + no = "Hva synes du er mest interessant?", + lang = detectedLanguage + ) } } } catch (e: Exception) { - // Simple fallback based on mood when (userMood) { - "positive" -> "What would you like to explore next?" - "negative" -> "Would you like me to explain something else?" - else -> "What interests you most?" + "positive" -> localized( + en = "What would you like to explore next?", + no = "Hva vil du utforske videre?", + lang = detectedLanguage + ) + "negative" -> localized( + en = "Would you like me to explain something else?", + no = "Vil du at jeg skal forklare noe annet?", + lang = detectedLanguage + ) + else -> localized( + en = "What interests you most?", + no = "Hva synes du er mest interessant?", + lang = detectedLanguage + ) } } } } - - // Ask the follow-up question with appropriate gesture + when (userMood) { "positive" -> furhat.gesture(Gestures.Smile, priority = 2) "negative" -> furhat.gesture(Gestures.ExpressSad, priority = 2) else -> furhat.gesture(Gestures.Nod, priority = 2) } + furhat.ask(followUpPrompt) } onNoResponse { + val lang = currentConversationLanguage() furhat.gesture(Gestures.ExpressSad) - furhat.ask("I didn't catch that. Could you please repeat your question?") + furhat.ask( + localized( + en = "I didn't catch that. Could you please repeat your question?", + no = "Jeg oppfattet ikke det. Kan du gjenta spørsmålet?", + lang = lang + ) + ) reentry() } } -// Helper function to call the /ask endpoint. -private fun callDocumentAgent(question: String): String { +// Helper function to call the /ask endpoint (now with preferred_language). +private fun callDocumentAgent(question: String, preferredLanguage: String): String { val baseUrl = AWS_BACKEND_URL val client = OkHttpClient.Builder() .connectTimeout(60, TimeUnit.SECONDS) .readTimeout(60, TimeUnit.SECONDS) .writeTimeout(60, TimeUnit.SECONDS) .build() + return try { val requestBody = JSONObject() .put("content", question) - .put("max_tokens", 2000) // Increased token limit - .put("temperature", 0.7) // Add temperature for more controlled generation - .put("top_p", 0.9) // Add top_p for better response quality + .put("preferred_language", preferredLanguage) + .put("max_tokens", 2000) + .put("temperature", 0.7) + .put("top_p", 0.9) .toString() .toRequestBody("application/json; charset=utf-8".toMediaType()) - + val request = Request.Builder() .url("$baseUrl/ask") .post(requestBody) @@ -212,48 +317,61 @@ private fun callDocumentAgent(question: String): String { println("Error response from backend: ${response.code} - ${response.message}") throw IOException("Unexpected response: $response") } - + val jsonResponse = response.body?.string() ?: throw IOException("Empty response") val jsonObject = JSONObject(jsonResponse) - - // Log the response length for debugging + val responseText = jsonObject.getString("response") println("Response length: ${responseText.length} characters") - - // Check for potential truncation - if (responseText.endsWith("...") || - responseText.endsWith(".") == false || - responseText.length > 1900) { // Close to max_tokens limit + + if (responseText.endsWith("...") || + !responseText.endsWith(".") || + responseText.length > 1900 + ) { println("Warning: Response might be truncated") - // You might want to handle this case differently, e.g., by requesting continuation } - + responseText } + } catch (e: ConnectException) { - println("Connection error: ${e.message}") - "I'm sorry, I cannot process your request right now. Please try again in a moment." + val lang = currentConversationLanguage() + localized( + en = "I'm sorry, I cannot process your request right now. Please try again in a moment.", + no = "Beklager, jeg kan ikke behandle forespørselen din akkurat nå. Prøv igjen om litt.", + lang = lang + ) } catch (e: SocketTimeoutException) { - println("Timeout error: ${e.message}") - "I'm sorry, the request took too long to process. Please try asking your question again." + val lang = currentConversationLanguage() + localized( + en = "I'm sorry, the request took too long to process. Please try asking your question again.", + no = "Beklager, forespørselen tok for lang tid. Prøv å stille spørsmålet på nytt.", + lang = lang + ) } catch (e: Exception) { - println("Error processing question: ${e.message}") - "I apologize, but I encountered an error processing your question. Could you please rephrase it?" + val lang = currentConversationLanguage() + localized( + en = "I apologize, but I encountered an error processing your question. Could you please rephrase it?", + no = "Beklager, det oppstod en feil da jeg skulle behandle spørsmålet ditt. Kan du formulere det på en annen måte?", + lang = lang + ) } } -// Helper function to call the /engage endpoint. +// Helper function to call the /engage endpoint (unchanged logic, language handled by backend). private fun callEngageUser(documentName: String, answer: String): String { - val baseUrl = AWS_BACKEND_URL + val baseUrl = AWS_BACKEND_URL // or switch based on config if needed val client = OkHttpClient.Builder() .connectTimeout(30, TimeUnit.SECONDS) .readTimeout(30, TimeUnit.SECONDS) .writeTimeout(30, TimeUnit.SECONDS) .build() + return try { val map = JSONObject() map.put("document", documentName) map.put("answer", answer) + val requestBody = map.toString() .toRequestBody("application/json; charset=utf-8".toMediaType()) @@ -266,15 +384,11 @@ private fun callEngageUser(documentName: String, answer: String): String { if (!response.isSuccessful) throw IOException("Unexpected response: $response") val jsonResponse = response.body?.string() ?: throw IOException("Empty response") val jsonObject = JSONObject(jsonResponse) - try { - jsonObject.getString("prompt") - } catch (e: Exception) { - "" // Return empty string to trigger fallback question - } + jsonObject.optString("prompt", "") } } catch (e: ConnectException) { - "" // Return empty string to trigger fallback question + "" } catch (e: Exception) { - "" // Return empty string to trigger fallback question + "" } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt index 88f03ce..baa61a7 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt @@ -2,45 +2,60 @@ package furhatos.app.templateadvancedskill.flow.main import furhatos.flow.kotlin.* import furhatos.app.templateadvancedskill.flow.Parent -import furhatos.app.templateadvancedskill.params +import furhatos.app.templateadvancedskill.params.AWS_BACKEND_URL import okhttp3.MediaType.Companion.toMediaType import okhttp3.OkHttpClient import okhttp3.Request -import okhttp3.RequestBody +import okhttp3.RequestBody.Companion.toRequestBody import org.json.JSONObject import java.io.IOException -import okhttp3.* -import furhatos.app.templateadvancedskill.params.LOCAL_BACKEND_URL -import furhatos.app.templateadvancedskill.params.AWS_BACKEND_URL import java.net.ConnectException import java.net.SocketTimeoutException import java.util.concurrent.TimeUnit - val DocumentWaitingToStart: State = state(parent = Parent) { + onEntry { - furhat.ask("I'm ready to assist with your document questions. Could you please tell me what subject you're interested in, or simply the name of the document?") + furhat.ask( + "I'm ready to assist with your document questions. " + + "Could you please tell me what subject you're interested in, " + + "or simply the name of the document?" + ) } - // When any response is detected, transition to SelectDocument. + // When any response is detected, transition to document-specific Q&A. onResponse { -// goto(SelectDocument) val userInput = it.text.trim() + // Call the API endpoint /get_docs to perform document retrieval/classification. val bestDocName = callGetDocs(userInput) - // Transition to the Q&A state for the matched document. - goto(documentInfoQnA(bestDocName)) + + if (bestDocName.isNullOrBlank()) { + furhat.say( + "I'm having trouble finding a matching document right now. " + + "Could you try rephrasing the title or subject?" + ) + reentry() + } else { + goto(documentInfoQnA(bestDocName)) + } } onNoResponse { - furhat.ask("I didn't catch that. Please tell me the subject or the name of the document you're interested in.") + furhat.ask( + "I didn't catch that. Please tell me the subject or the name of the document you're interested in." + ) reentry() } } -fun callGetDocs(userInput: String): String { - // Your FastAPI server's address (adjust if needed) +/** + * Calls the FastAPI /get_docs endpoint with the user input and + * returns the best matching document name (as provided by the backend). + */ +fun callGetDocs(userInput: String): String? { val url = "$AWS_BACKEND_URL/get_docs" + val client = OkHttpClient.Builder() .connectTimeout(30, TimeUnit.SECONDS) .readTimeout(30, TimeUnit.SECONDS) @@ -49,7 +64,8 @@ fun callGetDocs(userInput: String): String { // Build JSON payload. val jsonBody = """{"content":"$userInput"}""" - val body = RequestBody.create("application/json".toMediaType(), jsonBody) + val body = jsonBody.toRequestBody("application/json".toMediaType()) + val request = Request.Builder() .url(url) .post(body) @@ -62,9 +78,11 @@ fun callGetDocs(userInput: String): String { } val respString = response.body?.string() ?: throw IOException("Empty response body") val json = JSONObject(respString) + // Expecting backend to respond with {"response": ""} json.getString("response") } } catch (e: ConnectException) { + // Fallback value – you may want to handle this more gracefully in your flow "I'm sorry, I cannot connect to the server right now. Please try again later." } catch (e: SocketTimeoutException) { "I'm sorry, the server is taking too long to respond. Please try again later." diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt index 3a8270b..c5b199e 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt @@ -1,94 +1,23 @@ package furhatos.app.templateadvancedskill.flow.main -import furhatos.app.templateadvancedskill.flow.Parent -import furhatos.app.templateadvancedskill.flow.how_are_you.HowAreYou -import furhatos.app.templateadvancedskill.nlu.HowAreYouIntent -import furhatos.app.templateadvancedskill.nlu.NiceToMeetYouIntent +import furhatos.app.templateadvancedskill.language.AppLanguage +import furhatos.app.templateadvancedskill.language.I18n +import furhatos.app.templateadvancedskill.language.setAppLanguage import furhatos.flow.kotlin.* -import furhatos.gestures.Gestures -import furhatos.nlu.Response -import furhatos.nlu.common.Greeting -import furhatos.util.Language -/** - * Example state of a simple flow to greet a user. - * - */ -// define the state as a function to be able to pass arguments to it -fun GreetUser(response: Response<*>? = null): State = state(Parent) { +val Greeting : State = state { + onEntry { - if (response != null) raise(response) // raise any response that was passed on and handle the response here - else furhat.listen() // or start a listen to collect a response in this state - } - // Handle partial responses where the user said a greeting and something else. - onPartialResponse { - furhat.say { - random { - +"Hi!" - +"Hello!" - +"Hi there!" - } - } - // Raising the secondary intent will cause our triggers to handle the second part of the intent - // Also raising the response (it) allows for acting on information in the response - e.g. what user spoke - raise(it, it.secondaryIntent) - } - onResponse { - furhat.say { - random { - +"Hi!" - +"Hello!" - +"Hi there!" - } - } - goto(DocumentWaitingToStart) - } - onResponse { - furhat.say { - +"I feel" - random { - +"good" - +"pretty good" - } - Gestures.BigSmile - } - call(HowAreYou) // We'll return the pleasantries, but then end the conversation. - goto(DocumentWaitingToStart) - } - onResponse { - furhat.say { - random { - +"Nice too meet you too. " - +"My pleasure. " - +"Nice to see you as well. " - } - +Gestures.BigSmile - } - call(HowAreYou) // We'll return the pleasantries, but then end the conversation. - goto(DocumentWaitingToStart) - } - onResponse { - goto(DocumentWaitingToStart) - } - onNoResponse { - goto(DocumentWaitingToStart) - } + // Default ASR/TTS to English at startup + setAppLanguage(AppLanguage.EN) + + // Bilingual intro (English + Norwegian) + furhat.say(I18n.t("intro_bilingual")) -} + // If you have a specific document/topic, plug its name here + val docName = "this topic" + furhat.say(I18n.t("greet", docName)) -/** Run this to test the intents of this state from the run terminal in IntelliJ. **/ -fun main(args: Array) { - println("Type to test the intents of this state. (please ignore the initial error messages)") - while (true) { - println("Enter your user response...") - val utterance = readlnOrNull() - val results = GreetUser(null).getIntentClassifier(lang = Language.ENGLISH_US).classify(utterance!!) - if (results.isEmpty()) { - println("No match") - } else { - results.forEach { - println("Matched ${it.intents} with ${it.conf} confidence") - } - } + goto(DocumentWaitingToStart) } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/idle.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/idle.kt index 7f30bed..c7a24dc 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/idle.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/idle.kt @@ -4,6 +4,7 @@ import furhatos.flow.kotlin.State import furhatos.flow.kotlin.furhat import furhatos.flow.kotlin.onUserEnter import furhatos.flow.kotlin.state +import furhatos.app.templateadvancedskill.flow.main.Greeting val Idle: State = state { onEntry { @@ -12,7 +13,7 @@ val Idle: State = state { onUserEnter { furhat.attend(it) - goto(DocumentWaitingToStart) + goto(Greeting) } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/AppLanguage.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/AppLanguage.kt new file mode 100644 index 0000000..a61440a --- /dev/null +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/AppLanguage.kt @@ -0,0 +1,6 @@ +package furhatos.app.templateadvancedskill.language + +enum class AppLanguage { + EN, + NO +} \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/I18n.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/I18n.kt new file mode 100644 index 0000000..5d58fe7 --- /dev/null +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/I18n.kt @@ -0,0 +1,28 @@ +package furhatos.app.templateadvancedskill.language + +object I18n { + private val strings = mapOf( + AppLanguage.EN to mapOf( + "intro_bilingual" to "Hi! Hei! You can talk to me in English or Norwegian. Just start talking.", + "greet" to "I’m here to help you learn about %s. What would you like to know?", + "fallback" to "I didn’t catch that. Could you please repeat your question?", + "error_processing" to "I encountered an error processing your request. Please try again.", + "waiting_doc" to "I’ll start by looking at the document and then we can talk about it.", + "goodbye" to "Thank you for the interesting conversation! Goodbye!" + ), + AppLanguage.NO to mapOf( + "intro_bilingual" to "Hi! Hei! Du kan snakke med meg på engelsk eller norsk. Bare start å snakke.", + "greet" to "Jeg er her for å hjelpe deg med %s. Hva vil du vite?", + "fallback" to "Jeg oppfattet ikke det. Kan du gjenta spørsmålet?", + "error_processing" to "Jeg fikk en feil da jeg behandlet forespørselen din. Prøv igjen.", + "waiting_doc" to "Jeg starter med å se på dokumentet, så kan vi snakke om det etterpå.", + "goodbye" to "Takk for en hyggelig samtale! Ha det bra!" + ) + ) + + fun t(key: String, vararg args: Any?): String { + val langMap = strings[LanguageManager.current] ?: strings[AppLanguage.EN]!! + val raw = langMap[key] ?: strings[AppLanguage.EN]!![key]!! + return if (args.isNotEmpty()) raw.format(*args) else raw + } +} \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt new file mode 100644 index 0000000..c37ac36 --- /dev/null +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt @@ -0,0 +1,22 @@ +package furhatos.app.templateadvancedskill.language + +object LangDetect { + fun detect(text: String): AppLanguage { + val t = text.lowercase() + + // Check for Norwegian special characters + val norwegianChars = listOf('æ', 'ø', 'å') + if (norwegianChars.any { it in t }) { + return AppLanguage.NO + } + + // Common Norwegian words + val norwegianWords = listOf("ikke", "hvordan", "hva", "hvorfor", "forklar", "beskriv", "omtrent") + if (norwegianWords.any { t.contains(it) }) { + return AppLanguage.NO + } + + // Default: English + return AppLanguage.EN + } +} \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt new file mode 100644 index 0000000..9a81a5f --- /dev/null +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt @@ -0,0 +1,35 @@ +package furhatos.app.templateadvancedskill.language + +import furhatos.flow.kotlin.FlowControlRunner +import furhatos.flow.kotlin.furhat +import furhatos.util.Language + +/** + * Tracks the current language of the robot (English or Norwegian). + */ +object LanguageManager { + var current: AppLanguage = AppLanguage.EN +} + +/** + * Extension function that can be called from any Furhat state (because "this" is a FlowControlRunner). + * + * Sets Furhat's: + * - ASR language + * - TTS voice + * + * Usage inside a state: + * setAppLanguage(AppLanguage.NO) + */ +fun FlowControlRunner.setAppLanguage(lang: AppLanguage) { + if (LanguageManager.current == lang) return // No change needed + LanguageManager.current = lang + + when (lang) { + AppLanguage.EN -> + furhat.setVoice(Language.ENGLISH_US, "Matthew", true) + + AppLanguage.NO -> + furhat.setVoice(Language.NORWEGIAN, "Hans", true) + } +} \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt new file mode 100644 index 0000000..e69de29 diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserProfile.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserProfile.kt new file mode 100644 index 0000000..e69de29 diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/RAG/rag_flow.py b/my_furhat_backend/RAG/rag_flow.py index 825401e..930d1d1 100755 --- a/my_furhat_backend/RAG/rag_flow.py +++ b/my_furhat_backend/RAG/rag_flow.py @@ -98,9 +98,19 @@ def _initialize_embeddings(self): The initialized embedding model """ if self.hf: + # Auto-detect device: prefer MPS (macOS), then CUDA, then CPU + import torch + if torch.backends.mps.is_available(): + device = 'mps' + elif torch.cuda.is_available(): + device = 'cuda' + else: + device = 'cpu' + logger.info(f"Using device: {device} for embeddings") + return HuggingFaceEmbeddings( model_name="sentence-transformers/all-MiniLM-L6-v2", - model_kwargs={'device': 'cuda'}, + model_kwargs={'device': device}, encode_kwargs={'normalize_embeddings': True} ) else: diff --git a/my_furhat_backend/agents/document_agent.py b/my_furhat_backend/agents/document_agent.py index 428137c..143cb62 100755 --- a/my_furhat_backend/agents/document_agent.py +++ b/my_furhat_backend/agents/document_agent.py @@ -24,6 +24,15 @@ import uuid import shutil import sys +import langgraph.checkpoint.base as _checkpoint_base + +# Temporary compatibility shim for langgraph <-> langgraph-checkpoint mismatch. +# Older versions of langgraph-checkpoint (<2.0.13) do not expose +# `EXCLUDED_METADATA_KEYS`, but langgraph>=0.3.25 expects it during import. +# When missing, provide a conservative default so the runtime can proceed. +if not hasattr(_checkpoint_base, "EXCLUDED_METADATA_KEYS"): + _checkpoint_base.EXCLUDED_METADATA_KEYS = set() # type: ignore[attr-defined] + from langgraph.graph import StateGraph, START, END from langchain_core.messages import HumanMessage, AIMessage, ToolMessage, BaseMessage, SystemMessage from typing_extensions import TypedDict, Annotated, List @@ -46,6 +55,9 @@ from my_furhat_backend.utils.gpu_utils import print_gpu_status, clear_gpu_cache from my_furhat_backend.models.llm_factory import HuggingFaceLLM +# Set up logging +logger = logging.getLogger(__name__) + # Set up cache directories CACHE_DIR = config["HF_HOME"] os.makedirs(CACHE_DIR, exist_ok=True) @@ -232,32 +244,21 @@ class DocumentAgent: The workflow is implemented as a state graph with checkpointed memory for resumption. """ - def __init__(self, model_id: str = "Mistral-7B-Instruct-v0.3.Q4_K_M.gguf"): - """ - Initialize the DocumentAgent. - - Args: - model_id (str): ID of the model to use for the chatbot - """ - print_gpu_status() - - self.memory = MemorySaver() - - # Initialize RAG with caching - self.rag_instance = RAG( - hf=True, - persist_directory=config["VECTOR_STORE_PATH"], - path_to_document=os.path.join(config["DOCUMENTS_PATH"], "NorwAi annual report 2023.pdf") - ) - - # Initialize chatbot with optimized settings - # Only pass the model_id and essential parameters + def __init__( + self, + model: str = "llama3.1:instruct", # Ollama model tag + base_url: str = "http://localhost:11434", + **kwargs + ): + # Default chatbot backend = Ollama self.chatbot = create_chatbot( - "llama", - model_id=model_id, - n_ctx=4096, # Reduced context window - n_batch=512, # Increased batch size - n_gpu_layers=32 # Use more GPU layers + "ollama", + model=model, + base_url=base_url, + num_ctx=8192, # typical context for Ollama + temperature=0.7, + top_p=0.9, + **kwargs ) self.llm = self.chatbot.llm @@ -278,6 +279,30 @@ def __init__(self, model_id: str = "Mistral-7B-Instruct-v0.3.Q4_K_M.gguf"): self.graph = StateGraph(State) + # Initialize memory checkpointer for state persistence + self.memory = MemorySaver() + + # Initialize RAG instance for document retrieval and context gathering + # RAG is needed for: + # 1. Retrieving document context in the engage() method + # 2. Getting document context in retrieve_context() method + # 3. Listing available documents in check_uncertainty() method + # Note: RAG can work without documents (empty vector store), but requires langchain-huggingface + self.rag_instance = None + try: + # Check if langchain-huggingface is available (required for RAG) + from langchain_huggingface import HuggingFaceEmbeddings + self.rag_instance = RAG( + hf=True, # Use HuggingFace embeddings + persist_directory=config.get("VECTOR_STORE_PATH"), # Use configured vector store path + path_to_document=None # Can be set later or via config if needed (works fine without documents) + ) + logger.info("RAG instance initialized successfully (vector store may be empty if no documents loaded)") + except ImportError as e: + logger.warning(f"langchain-huggingface not available, RAG features disabled: {e}") + except Exception as e: + logger.warning(f"Failed to initialize RAG instance: {e}. Some features may not work.") + # Initialize caches with larger sizes self.question_cache = QuestionCache() self.context_cache = {} @@ -415,7 +440,11 @@ def retrieval_node(self, state: State) -> dict: return {"messages": messages} # Retrieve context from RAG - context = self.rag_instance.get_document_context(input_text) + if self.rag_instance is None: + logger.warning("RAG instance not initialized, returning empty context") + context = [] + else: + context = self.rag_instance.get_document_context(input_text) self.context_cache[input_text] = context # Create a ToolMessage with the retrieval results @@ -1086,6 +1115,9 @@ def engage(self, document_name: str, answer: str) -> str: Returns: str: A conversational follow-up question """ + if self.rag_instance is None: + raise ValueError("RAG instance is not initialized. Cannot retrieve document context.") + # Get document context from cache or retrieve it if document_name not in self.context_cache: self.context_cache[document_name] = self.rag_instance.get_document_context(document_name) @@ -1232,7 +1264,7 @@ def _determine_next_node(self, state: State) -> str: user_input = state.get("input", "").lower() # Check for document name mentions - if any(doc.lower() in user_input for doc in self.rag_instance.get_list_docs()): + if self.rag_instance is not None and any(doc.lower() in user_input for doc in self.rag_instance.get_list_docs()): # Clear conversation memory when switching documents self.conversation_memory = [] return "retrieval" diff --git a/my_furhat_backend/config/settings.py b/my_furhat_backend/config/settings.py index 49957b0..741c2f8 100755 --- a/my_furhat_backend/config/settings.py +++ b/my_furhat_backend/config/settings.py @@ -4,18 +4,22 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -MOUNT_DIR = Path("/mnt") +# Use local cache directory instead of /mnt (which is read-only on macOS) +PROJECT_ROOT = Path(BASE_DIR).parent +CACHE_DIR = PROJECT_ROOT / ".cache" +MOUNT_DIR = Path("/mnt") # Keep for reference, but use CACHE_DIR for local dev config = dotenv_values(".env") +# Default to local cache directory, but allow override via environment variables config.update({ - "HF_HOME": os.getenv("HF_HOME", str(MOUNT_DIR / "hf_cache")), - "TORCH_HOME": os.getenv("TORCH_HOME", str(MOUNT_DIR / "torch_cache")), - "VECTOR_STORE_PATH": os.getenv("VECTOR_STORE_PATH", str(MOUNT_DIR / "vector_store")), - "DOCUMENTS_PATH": os.getenv("DOCUMENTS_PATH", str(MOUNT_DIR / "documents")), - "MODEL_PATH": os.getenv("MODEL_PATH", str(MOUNT_DIR / "models")), - "GGUF_MODELS_PATH": os.getenv("GGUF_MODELS_PATH", str(MOUNT_DIR / "models/gguf")), + "HF_HOME": os.getenv("HF_HOME", str(CACHE_DIR / "hf_cache")), + "TORCH_HOME": os.getenv("TORCH_HOME", str(CACHE_DIR / "torch_cache")), + "VECTOR_STORE_PATH": os.getenv("VECTOR_STORE_PATH", str(CACHE_DIR / "vector_store")), + "DOCUMENTS_PATH": os.getenv("DOCUMENTS_PATH", str(CACHE_DIR / "documents")), + "MODEL_PATH": os.getenv("MODEL_PATH", str(CACHE_DIR / "models")), + "GGUF_MODELS_PATH": os.getenv("GGUF_MODELS_PATH", str(CACHE_DIR / "models/gguf")), "CUDA_VISIBLE_DEVICES": os.getenv("CUDA_VISIBLE_DEVICES", "0"), "PYTORCH_CUDA_ALLOC_CONF": os.getenv("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:512") }) diff --git a/my_furhat_backend/main.py b/my_furhat_backend/main.py new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/memory/summarizer.py b/my_furhat_backend/memory/summarizer.py new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/models/chatbot_factory.py b/my_furhat_backend/models/chatbot_factory.py index 3503aea..f227bdb 100755 --- a/my_furhat_backend/models/chatbot_factory.py +++ b/my_furhat_backend/models/chatbot_factory.py @@ -158,6 +158,98 @@ def chatbot(self, state: dict) -> dict: state["messages"] = messages return state +class Chatbot_Ollama(BaseChatbot): + """ + Ollama-based chatbot implementation, with optional preferred_language. + Uses the Ollama backend added to llm_factory (create_llm('ollama', ...)). + """ + def __init__( + self, + model_instance=None, + model: str = "llama3.1:instruct", + base_url: str = "http://localhost:11434", + **kwargs + ): + if model_instance is not None: + self.llm = model_instance + else: + self.llm = create_llm("ollama", model=model, base_url=base_url, **kwargs) + + # (Optional) allow external control at runtime + def set_language(self, lang: str | None): + self._forced_language = lang # mutable, optional override + + def _infer_language(self, text: str) -> str | None: + """ + Super-light heuristic: detect Norwegian vs English vs fallback. + Swap to a real detector if you like. + """ + if not text: + return None + t = text.lower() + # quick Norwegian markers + no_markers = ("å", "ø", "æ", "ikke", "også", "hvordan", "hva", "hvorfor", "forklar", "beskriv") + if any(m in t for m in no_markers): + return "Norwegian" + # quick French/Spanish markers (examples) + fr_markers = ("pourquoi", "comment", "était", "être", "œ") + es_markers = ("por qué", "cómo", "está", "ser") + if any(m in t for m in fr_markers): return "French" + if any(m in t for m in es_markers): return "Spanish" + # default: None -> let model pick, or your app can set state["preferred_language"] + return None + + def chatbot(self, state: dict) -> dict: + messages = state.get("messages", []) + if not messages: + raise ValueError("No messages found in state.") + + # 1) Runtime language decision (priority: explicit state -> forced -> inferred) + lang = state.get("preferred_language", None) + if lang is None: + lang = getattr(self, "_forced_language", None) + + if lang is None: + # infer from last HumanMessage + for m in reversed(messages): + if isinstance(m, HumanMessage): + lang = self._infer_language(m.content) + break + + # 2) Inject a per-turn SystemMessage if we have a language + if lang: + already_has_lang = any( + isinstance(m, SystemMessage) and "respond in" in (m.content or "").lower() + for m in messages + ) + if not already_has_lang: + messages = [SystemMessage( + content=( + f"Please respond in {lang}. " + "If structured output is required (e.g., JSON), DO NOT translate keys." + ) + )] + messages + + # 3) Format & query as usual + prompt = format_chatml(messages) + response = self.llm.query(prompt) + + # 4) Normalize to text and append + if isinstance(response, AIMessage): + response_text = response.content + elif isinstance(response, str): + response_text = response + elif isinstance(response, dict): + response_text = response.get("content", "") + else: + response_text = str(response) + + messages.append(AIMessage(content=response_text)) + state["messages"] = messages + # Optionally persist the decided language back into state for next turn + state["preferred_language"] = lang or state.get("preferred_language") + return state + def create_chatbot(chatbot_type: str, **kwargs) -> BaseChatbot: """ Factory function to create a chatbot instance. @@ -176,7 +268,9 @@ def create_chatbot(chatbot_type: str, **kwargs) -> BaseChatbot: return Chatbot_HuggingFace(**kwargs) elif chatbot_type.lower() == "llama": return Chatbot_LlamaCpp(**kwargs) + elif chatbot_type.lower() == "ollama": + return Chatbot_Ollama(**kwargs) else: raise ValueError(f"Unsupported chatbot type: {chatbot_type}") -__all__ = ["create_chatbot", "Chatbot_HuggingFace", "Chatbot_LlamaCpp", "BaseChatbot"] +__all__ = ["create_chatbot", "Chatbot_HuggingFace", "Chatbot_LlamaCpp", "Chatbot_Ollama", "BaseChatbot"] diff --git a/my_furhat_backend/models/llm_factory.py b/my_furhat_backend/models/llm_factory.py index 0552423..97af264 100755 --- a/my_furhat_backend/models/llm_factory.py +++ b/my_furhat_backend/models/llm_factory.py @@ -16,6 +16,7 @@ from abc import ABC, abstractmethod import multiprocessing from langchain_community.chat_models import ChatLlamaCpp +from langchain_community.chat_models import ChatOllama from my_furhat_backend.config.settings import config from my_furhat_backend.utils.gpu_utils import setup_gpu, move_model_to_device, print_gpu_status, clear_gpu_cache from transformers import pipeline @@ -327,6 +328,85 @@ def bind_tools(self, tools: list, tool_schema: dict | str = None) -> None: """ self.chat_llm.bind_tools(tools) +class OllamaLLM(BaseLLM): + """ + LLM implementation using an Ollama server (http://localhost:11434 by default). + Suitable for multilingual models like llama3.1, mixtral, qwen2.5, etc. + Mirrors the query/bind_tools interface of other backends. + """ + + def __init__( + self, + model: str = "llama3.1:instruct", + base_url: str = "http://localhost:11434", + **kwargs + ): + """ + Args: + model: Ollama model name/tag (e.g., 'llama3.1:instruct', 'mixtral:8x7b-instruct', 'qwen2.5:14b-instruct'). + base_url: Ollama server URL. + **kwargs: Generation/runtime options (temperature, top_p, num_ctx, num_gpu, repeat_penalty, etc.). + """ + self.model = model + self.base_url = base_url + self.gen_kwargs = { + # Reasonable multilingual/chat defaults; override via **kwargs + "temperature": 0.8, + "top_p": 0.9, + "num_ctx": 8192, # increase if you need longer prompts + # You can pass num_gpu, num_thread, repeat_penalty, stop, etc. + } + self.gen_kwargs.update(kwargs) + + # Eager check that Ollama server is reachable (optional but helpful) + try: + r = requests.get(f"{self.base_url}/api/tags", timeout=2) + r.raise_for_status() + except Exception as e: + print(f"[OllamaLLM] Warning: Could not reach Ollama at {self.base_url}: {e}") + + # LangChain wrapper; keeps your interface consistent with ChatLlamaCpp + # ChatOllama accepts model/base_url and a dict of 'options' for runtime + self.chat_llm = ChatOllama( + model=self.model, + base_url=self.base_url, + # map gen kwargs into options LangChain forwards to Ollama + options=self.gen_kwargs + ) + + def __del__(self): + try: + clear_gpu_cache() + except Exception: + pass + + def bind_tools(self, tools: list, tool_schema: dict | str = None) -> None: + """ + Bind tool specs to this chat model (LangChain will convert tools into + the proper JSON schema format for tool calling). + """ + try: + self.chat_llm = self.chat_llm.bind_tools(tools) + except Exception as e: + print(f"[OllamaLLM] bind_tools error: {e}") + + def query(self, text: str, tool: bool = False) -> str: + """ + Send a prompt to the Ollama model. If you've bound tools, LangChain will handle tool calling. + """ + try: + print_gpu_status() + # For Chat models, we can use .invoke with a simple human message + # If you prefer plain text, LangChain accepts string directly. + out = self.chat_llm.invoke(text) + print_gpu_status() + # ChatOllama returns a BaseMessage or string depending on version; + # extract text robustly: + return getattr(out, "content", str(out)) + except Exception as e: + print(f"[OllamaLLM] query error: {e}") + return "" + def create_llm(llm_type: str, **kwargs) -> BaseLLM: """ Factory function to create an instance of a language model. @@ -345,7 +425,9 @@ def create_llm(llm_type: str, **kwargs) -> BaseLLM: return HuggingFaceLLM(**kwargs) elif llm_type == "llama": return LlamaCcpLLM(**kwargs) + elif llm_type == "ollama": + return OllamaLLM(**kwargs) else: raise ValueError(f"Unsupported LLM type: {llm_type}") -__all__ = ["create_llm", "HuggingFaceLLM", "LlamaCcpLLM", "BaseLLM"] +__all__ = ["create_llm", "HuggingFaceLLM", "LlamaCcpLLM", "OllamaLLM", "BaseLLM"] diff --git a/my_furhat_backend/perception/face.py b/my_furhat_backend/perception/face.py new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/perception/language.py b/my_furhat_backend/perception/language.py new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/perception/session_state.py b/my_furhat_backend/perception/session_state.py new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/perception/voice.py b/my_furhat_backend/perception/voice.py new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/perception/websocket_handler.py b/my_furhat_backend/perception/websocket_handler.py new file mode 100644 index 0000000..e69de29 diff --git a/my_furhat_backend/pyproject.toml b/my_furhat_backend/pyproject.toml new file mode 100644 index 0000000..68acbb4 --- /dev/null +++ b/my_furhat_backend/pyproject.toml @@ -0,0 +1,47 @@ +[project] +name = "my-furhat-backend" +version = "0.1.0" +description = "AI Agent" +requires-python = ">=3.11,<4.0" +dependencies = [ + "requests>=2.32.3,<3.0.0", + "python-dotenv>=1.0.1,<2.0.0", + "pydantic>=2.10.6,<3.0.0", + "transformers @ git+https://github.com/huggingface/transformers.git", + # "langchain-huggingface>=0.1.2,<0.2.0", # Commented out: requires sentence-transformers which is incompatible with transformers 5.0.0.dev0 + "pydantic-settings>=2.7.1,<3.0.0", + "huggingface-hub>=1.0.0,<2.0.0", + "accelerate @ git+https://github.com/huggingface/accelerate.git", + "torch>=2.6.0,<3.0.0", + "gguf>=0.10.0", + "protobuf>=5.29.3,<6.0.0", + "fastapi>=0.115.8,<0.116.0", + "uvicorn>=0.34.0,<0.35.0", + "langgraph>=0.3.25,<0.4.0", + "langchain>=0.3.23,<0.4.0", + "langchain-community>=0.3.21,<0.4.0", + "langchain-chroma>=0.2.2,<0.3.0", + "pypdf>=5.4.0,<6.0.0" +] + +[project.optional-dependencies] +dev = ["pytest>=8.0.0,<9.0.0"] + +[tool.setuptools] +package-dir = {"my_furhat_backend" = "."} +packages = [ + "my_furhat_backend", + "my_furhat_backend.agents", + "my_furhat_backend.api", + "my_furhat_backend.api_clients", + "my_furhat_backend.config", + "my_furhat_backend.llm_tools", + "my_furhat_backend.models", + "my_furhat_backend.RAG", + "my_furhat_backend.utils", +] + +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + diff --git a/pyproject.toml b/pyproject.toml index e931edc..17820cb 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ dependencies = [ "transformers @ git+https://github.com/huggingface/transformers.git", "langchain-huggingface (>=0.1.2,<0.2.0)", "pydantic-settings (>=2.7.1,<3.0.0)", - "huggingface-hub (>=0.28.1,<0.29.0)", + "huggingface-hub (>=1.0.0,<2.0.0)", "accelerate @ git+https://github.com/huggingface/accelerate.git", "torch (>=2.6.0,<3.0.0)", "gguf (>=0.10.0)", diff --git a/requirements_poetry.txt b/requirements_poetry.txt index b57d772..0c34262 100755 --- a/requirements_poetry.txt +++ b/requirements_poetry.txt @@ -39,7 +39,7 @@ httpcore==1.0.7 httptools==0.6.4 httpx==0.28.1 httpx-sse==0.4.0 -huggingface-hub==0.28.1 +huggingface-hub==1.0.0 humanfriendly==10.0 hypothesis==6.125.2 idna==3.10 @@ -53,14 +53,14 @@ joblib==1.4.2 jsonpatch==1.33 jsonpointer==3.0.0 kubernetes==32.0.1 -langchain==0.3.19 +langchain==0.3.23 langchain-chroma==0.2.2 -langchain-community==0.3.18 -langchain-core==0.3.40 -langchain-huggingface==0.1.2 -langchain-text-splitters==0.3.6 +langchain-community==0.3.21 +langchain-core==0.3.51 +# langchain-huggingface==0.1.2 # Commented out: requires sentence-transformers which is incompatible with transformers 5.0.0.dev0 +langchain-text-splitters==0.3.8 langchainhub==0.1.21 -langgraph==0.2.70 +langgraph==0.3.25 langgraph-checkpoint==2.0.12 langgraph-sdk==0.1.51 langsmith==0.3.11 @@ -74,7 +74,7 @@ monotonic==1.6 mpmath==1.3.0 msgpack==1.1.0 multidict==6.1.0 --e git+https://github.com/jeevanp03/my_furhat_backend.git@ac9626a550b801fa72f7108ba657166b68d9f953#egg=my_furhat_backend +-e ./my_furhat_backend mypy-extensions==1.0.0 networkx==3.4.2 numpy>=1.26.4,<2.2.3 @@ -105,7 +105,7 @@ pydantic==2.10.6 pydantic-settings==2.8.1 pydantic_core==2.27.2 Pygments==2.19.1 -pypdf==5.3.0 +pypdf==5.4.0 pyperclip==1.9.0 PyPika==0.48.9 pyproject_hooks==1.2.0 @@ -123,7 +123,7 @@ s3transfer==0.11.2 safetensors==0.5.3 scikit-learn==1.6.1 scipy==1.15.2 -sentence-transformers==3.4.1 +# sentence-transformers # Commented out: incompatible with transformers 5.0.0.dev0 (requires transformers<5.0.0) sentencepiece==0.2.0 shellingham==1.5.4 six==1.17.0 @@ -136,10 +136,9 @@ tavily-python==0.5.1 tenacity==9.0.0 threadpoolctl==3.5.0 tiktoken==0.9.0 -tokenizers==0.21.0 +tokenizers==0.22.1 torch==2.6.0 tqdm==4.67.1 -transformers>=4.41.0,<5.0.0 typer==0.15.1 types-requests==2.32.0.20241016 typing-inspect==0.9.0 diff --git a/tests/test_language.py b/tests/test_language.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_memory_summarizer.py b/tests/test_memory_summarizer.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_perception_ws.py b/tests/test_perception_ws.py new file mode 100644 index 0000000..e69de29 From 31a8214fef17fe0de96c2a28ea1b88ae10814c38 Mon Sep 17 00:00:00 2001 From: johngreenough Date: Thu, 20 Nov 2025 16:52:57 +0100 Subject: [PATCH 2/3] updated, tested memory & perception --- furhat_skills/Conversation/build.gradle | 2 + .../flow/main/UserExtensions.kt | 25 ++ .../flow/main/conversation.kt | 47 +++ .../flow/main/greeting.kt | 75 +++- .../app/templateadvancedskill/nlu/intents.kt | 32 ++ .../perception/PerceptionClient.kt | 164 +++++++++ .../perception/UserProfile.kt | 47 +++ .../perception/UserState.kt | 33 ++ middleware/main.py | 125 +------ my_furhat_backend/__init__.py | 15 + my_furhat_backend/config/settings.py | 12 +- my_furhat_backend/memory/__init__.py | 3 + my_furhat_backend/memory/summarizer.py | 25 ++ my_furhat_backend/perception/__init__.py | 4 + my_furhat_backend/perception/face.py | 116 ++++++ my_furhat_backend/perception/language.py | 23 ++ my_furhat_backend/perception/session_state.py | 39 ++ my_furhat_backend/perception/voice.py | 105 ++++++ .../perception/websocket_handler.py | 338 ++++++++++++++++++ pyproject.toml | 4 + tests/test_language.py | 27 ++ tests/test_memory_summarizer.py | 45 +++ tests/test_perception_ws.py | 81 +++++ 23 files changed, 1271 insertions(+), 116 deletions(-) create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt create mode 100644 my_furhat_backend/memory/__init__.py create mode 100644 my_furhat_backend/perception/__init__.py diff --git a/furhat_skills/Conversation/build.gradle b/furhat_skills/Conversation/build.gradle index 6204e8d..2800c6d 100755 --- a/furhat_skills/Conversation/build.gradle +++ b/furhat_skills/Conversation/build.gradle @@ -48,6 +48,8 @@ dependencies { implementation 'com.furhatrobotics.assets:StandardLibraryCollection:1.2.0' // Additional dependencies for HTTP calls, JSON processing, and coroutines implementation "com.squareup.okhttp3:okhttp:4.10.0" + implementation "com.fasterxml.jackson.core:jackson-annotations:2.15.2" + implementation "com.fasterxml.jackson.module:jackson-module-kotlin:2.15.2" implementation "org.json:json:20210307" implementation "org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.4" } diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt new file mode 100644 index 0000000..6e476e8 --- /dev/null +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt @@ -0,0 +1,25 @@ +package furhatos.app.templateadvancedskill.flow.main + +import furhatos.app.templateadvancedskill.perception.PerceptionClient +import furhatos.records.User +import java.util.concurrent.ConcurrentHashMap + +private object PerceptionClientRegistry { + private val clients = ConcurrentHashMap() + + fun get(userId: String): PerceptionClient? = clients[userId] + + fun set(userId: String, client: PerceptionClient?) { + if (client == null) { + clients.remove(userId) + } else { + clients[userId] = client + } + } +} + +var User.perceptionClient: PerceptionClient? + get() = PerceptionClientRegistry.get(id) + set(value) { + PerceptionClientRegistry.set(id, value) + } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt index 8de29f4..6070a2d 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt @@ -19,6 +19,9 @@ import furhatos.app.templateadvancedskill.params.AWS_BACKEND_URL import java.util.concurrent.TimeUnit import furhatos.app.templateadvancedskill.nlu.UncertainResponseIntent import java.net.SocketTimeoutException +import furhatos.app.templateadvancedskill.nlu.MyNameIsIntent +import furhatos.app.templateadvancedskill.perception.UserState + data class Transcription(val content: String) data class EngageRequest(val document: String, val answer: String) @@ -54,6 +57,8 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { } onExit { + // Close perception client when leaving the Q&A state + users.current.perceptionClient?.close() furhat.gesture(Gestures.Wink) } @@ -87,6 +92,33 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { } } + onResponse { + val name = it.intent.name?.toText() ?: return@onResponse + + val profile = UserState.currentProfile + if (profile != null) { + profile.name = name + } + + val client = users.current.perceptionClient + client?.sendNameUpdate( + userId = profile?.id, + name = name + ) + + val lang = currentConversationLanguage() + furhat.say( + localized( + en = "Nice to meet you, $name!", + no = "Hyggelig å møte deg, $name!", + lang = lang + ) + ) + + // Go back to the normal questioning flow + reentry() + } + onResponse { val lang = currentConversationLanguage() @@ -181,6 +213,21 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { furhat.say(cleanAnswer) + val client = users.current.perceptionClient + val profile = UserState.currentProfile + + val langCode = when (detectedLanguage) { + AppLanguage.EN -> "en" + AppLanguage.NO -> "no" + } + + client?.sendTurn( + userId = profile?.id, + language = langCode, + userText = userQuestion, + robotText = cleanAnswer + ) + // Localized follow-ups val followUpPrompt = when { conversationCount == 1 -> when (userMood) { diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt index c5b199e..9285442 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt @@ -3,21 +3,86 @@ package furhatos.app.templateadvancedskill.flow.main import furhatos.app.templateadvancedskill.language.AppLanguage import furhatos.app.templateadvancedskill.language.I18n import furhatos.app.templateadvancedskill.language.setAppLanguage +import furhatos.app.templateadvancedskill.perception.PerceptionClient import furhatos.flow.kotlin.* +import furhatos.flow.kotlin.Furhat +import furhatos.flow.kotlin.furhat.audiofeed.AudioFeedListener +import furhatos.flow.kotlin.furhat.camerafeed.CameraFeedListener +import furhatos.flow.kotlin.furhat.camerafeed.FaceData +import furhatos.flow.kotlin.users +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import java.awt.image.BufferedImage -val Greeting : State = state { +private object PerceptionStreaming { + @Volatile + var activeClient: PerceptionClient? = null + @Volatile + private var listenersAttached: Boolean = false + + fun start(client: PerceptionClient) { + activeClient = client + } + + fun stop() { + activeClient = null + } + + fun ensureListeners(furhat: Furhat) { + if (listenersAttached) return + furhat.cameraFeed.addListener(PerceptionCameraListener) + furhat.audioFeed.addListener(PerceptionAudioListener) + listenersAttached = true + } +} + +private object PerceptionCameraListener : CameraFeedListener { + override fun cameraImage(image: BufferedImage, imageData: ByteArray, faces: List) { + val client = PerceptionStreaming.activeClient ?: return + val payload = byteArrayOf(0x01) + imageData + client.sendBinary(payload) + } +} + +private object PerceptionAudioListener : AudioFeedListener { + override fun audioData(data: ByteArray) { + val client = PerceptionStreaming.activeClient ?: return + val payload = byteArrayOf(0x02) + data + client.sendBinary(payload) + } +} + +private const val PERCEPTION_WS_URL = "ws://localhost:8000/ws/perception" +private val perceptionScope = CoroutineScope(SupervisorJob() + Dispatchers.IO) + +val Greeting: State = state { onEntry { - // Default ASR/TTS to English at startup - setAppLanguage(AppLanguage.EN) + val sessionId = "furhat-session-" + System.currentTimeMillis() + + val client = PerceptionClient( + backendWsUrl = PERCEPTION_WS_URL, + sessionId = sessionId, + robotId = "furhat-ntnu-01" + ) - // Bilingual intro (English + Norwegian) + users.current.perceptionClient = client + client.connect(perceptionScope) + PerceptionStreaming.start(client) + PerceptionStreaming.ensureListeners(furhat) + + setAppLanguage(AppLanguage.EN) furhat.say(I18n.t("intro_bilingual")) - // If you have a specific document/topic, plug its name here val docName = "this topic" furhat.say(I18n.t("greet", docName)) goto(DocumentWaitingToStart) } + + onExit { + users.current.perceptionClient?.close() + PerceptionStreaming.stop() + } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt index 2b49b27..734deee 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt @@ -2,6 +2,8 @@ package furhatos.app.templateadvancedskill.nlu import furhatos.nlu.Intent import furhatos.util.Language +import furhatos.nlu.common.PersonName +import furhatos.app.templateadvancedskill.language.AppLanguage /** * Define intents to match a user utterance and assign meaning to what they said. @@ -90,4 +92,34 @@ class UncertainResponseIntent : Intent() { "I'm not sure what to make of it" ) } +} + +/** + * capture the user's name in both English and Norwegian. + */ +class MyNameIsIntent( + val name: PersonName? = null +) : Intent() { + + override fun getExamples(lang: Language): List { + return when (lang) { + Language.ENGLISH_US -> listOf( + "My name is @name", + "I am @name", + "I'm @name", + "You can call me @name", + "It's @name" + ) + Language.NORWEGIAN -> listOf( + "Jeg heter @name", + "Mitt navn er @name", + "Jeg er @name", + "Du kan kalle meg @name" + ) + else -> listOf( + "My name is @name", + "I am @name" + ) + } + } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt index e69de29..dcc7647 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt @@ -0,0 +1,164 @@ +package furhatos.app.templateadvancedskill.perception + +import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper +import com.fasterxml.jackson.module.kotlin.readValue +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import okhttp3.* +import okio.ByteString.Companion.toByteString + +/** + * Client for the /ws/perception WebSocket endpoint. + */ +class PerceptionClient( + private val backendWsUrl: String, // e.g. "ws://localhost:8000/ws/perception" + private val sessionId: String, + private val robotId: String = "furhat-ntnu-01" +) { + + private val client = OkHttpClient() + private val mapper = jacksonObjectMapper() + + @Volatile + private var webSocket: WebSocket? = null + + /** + * Connect to the backend WebSocket in the given coroutine scope. + * This should be called once per Furhat interaction session. + */ + fun connect(scope: CoroutineScope) { + val request = Request.Builder() + .url(backendWsUrl) + .build() + + val listener = object : WebSocketListener() { + + override fun onOpen(ws: WebSocket, response: Response) { + webSocket = ws + sendHello() + } + + override fun onMessage(ws: WebSocket, text: String) { + handleIncomingMessage(text) + } + + override fun onFailure(ws: WebSocket, t: Throwable, response: Response?) { + // Log as needed; for now just print + println("Perception WS failure: ${t.message}") + } + + override fun onClosed(ws: WebSocket, code: Int, reason: String) { + println("Perception WS closed: $code / $reason") + } + } + + scope.launch(Dispatchers.IO) { + client.newWebSocket(request, listener) + } + } + + /** + * Send "hello" message once connection opens. + */ + private fun sendHello() { + val msg = mapOf( + "type" to "hello", + "payload" to mapOf( + "session_id" to sessionId, + "robot_id" to robotId, + "timestamp" to System.currentTimeMillis() + ) + ) + sendJson(msg) + } + + /** + * Send a dialogue turn (user text + robot text) to the backend. + * Can be called from onResponse / after you know what Furhat said. + */ + fun sendTurn( + userId: String?, + language: String?, + userText: String?, + robotText: String? + ) { + val msg = mapOf( + "type" to "turn", + "payload" to mapOf( + "session_id" to sessionId, + "user_id" to userId, + "language" to (language ?: "en"), + "user_text" to (userText ?: ""), + "robot_text" to robotText, + "timestamp" to System.currentTimeMillis() + // turn_index is handled by backend session_state + ) + ) + sendJson(msg) + } + + /** + * Notify backend when you’ve captured the user's name. + */ + fun sendNameUpdate( + userId: String?, + name: String + ) { + val msg = mapOf( + "type" to "name_update", + "payload" to mapOf( + "session_id" to sessionId, + "user_id" to userId, + "name" to name, + "timestamp" to System.currentTimeMillis() + ) + ) + sendJson(msg) + } + + /** + * Close the WebSocket when the interaction ends. + */ + fun close() { + webSocket?.close(1000, "Session ended") + webSocket = null + } + + // -------------------- Internals -------------------- // + + private fun sendJson(body: Any) { + try { + val json: String = mapper.writeValueAsString(body) + webSocket?.send(json) + } catch (e: Exception) { + println("Perception WS send error: ${e.message}") + } + } + + fun sendBinary(payload: ByteArray) { + try { + webSocket?.send(payload.toByteString()) + } catch (e: Exception) { + println("Perception WS binary send error: ${e.message}") + } + } + + private fun handleIncomingMessage(text: String) { + try { + // We only care about identity_update for now + val root: Map = mapper.readValue(text) + val type = root["type"] as? String ?: return + if (type != "identity_update") return + + // Re-serialize payload to map it into IdentityUpdatePayload + val payloadObj = root["payload"] + val payloadJson = mapper.writeValueAsString(payloadObj) + val payload: IdentityUpdatePayload = mapper.readValue(payloadJson) + + UserState.applyIdentityUpdate(payload) + } catch (e: Exception) { + println("Perception WS parse error: ${e.message}") + } + } +} \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserProfile.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserProfile.kt index e69de29..ab1e4b1 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserProfile.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserProfile.kt @@ -0,0 +1,47 @@ +package furhatos.app.templateadvancedskill.perception + +import com.fasterxml.jackson.annotation.JsonProperty + +/** + * Local representation of the current user as known by the backend. + */ +data class UserProfile( + val id: String, + var name: String? = null, + var primaryLanguage: String = "en", + var languages: MutableMap = mutableMapOf("en" to 1.0), + var confidence: Double = 1.0, + var lastSeen: Long = System.currentTimeMillis() +) + +/** + * Shape of the 'payload' in the 'identity_update' messages from the backend. + * Must match the JSON sent by perception_ws_handler.py. + */ +data class IdentityUpdatePayload( + @JsonProperty("session_id") + val sessionId: String, + + @JsonProperty("user_id") + val userId: String, + + val name: String?, + + @JsonProperty("primary_language") + val primaryLanguage: String, + + val languages: Map, + + val confidence: Double, + + @JsonProperty("last_seen") + val lastSeen: Long +) + +/** + * Full identity_update message from backend: { "type": "...", "payload": { ... } } + */ +data class IdentityUpdateMessage( + val type: String, + val payload: IdentityUpdatePayload +) \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt index e69de29..1cdd73f 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt @@ -0,0 +1,33 @@ +package furhatos.app.templateadvancedskill.perception + +object UserState { + + @Volatile + var currentProfile: UserProfile? = null + + /** + * Merge an IdentityUpdatePayload into the local UserProfile. + */ + fun applyIdentityUpdate(update: IdentityUpdatePayload) { + val existing = currentProfile + if (existing == null || existing.id != update.userId) { + currentProfile = UserProfile( + id = update.userId, + name = update.name, + primaryLanguage = update.primaryLanguage, + languages = update.languages.toMutableMap(), + confidence = update.confidence, + lastSeen = update.lastSeen + ) + } else { + if (update.name != null) { + existing.name = update.name + } + existing.primaryLanguage = update.primaryLanguage + existing.languages.clear() + existing.languages.putAll(update.languages) + existing.confidence = update.confidence + existing.lastSeen = update.lastSeen + } + } +} \ No newline at end of file diff --git a/middleware/main.py b/middleware/main.py index 5f713d2..0588eee 100755 --- a/middleware/main.py +++ b/middleware/main.py @@ -31,7 +31,7 @@ retrieving relevant document context, and generating responses using an LLM. """ -from fastapi import FastAPI, HTTPException +from fastapi import FastAPI, HTTPException, WebSocket from pydantic import BaseModel import uvicorn import asyncio @@ -45,6 +45,9 @@ classify_text ) +from my_furhat_backend.db.session import init_db +from my_furhat_backend.perception.websocket_handler import perception_ws_handler + # Initialize the FastAPI application. app = FastAPI() @@ -54,62 +57,32 @@ class Transcription(BaseModel): - """ - Pydantic model representing the transcription received from the client. - - Attributes: - content (str): The text content of the transcription. - """ content: str class EngageRequest(BaseModel): - """ - Pydantic model for the engage endpoint containing the document and the answer. - - Attributes: - document (str): Identifier or content of the document to be used. - answer (str): The answer generated or provided that will be used to create further prompts. - """ document: str answer: str # For demonstration purposes, using a simple in-memory store for the latest response. -# In production, consider using a more robust solution (e.g., session management or a database). latest_response = None # Instantiate the DocumentAgent to handle LLM processing. agent = DocumentAgent() +@app.on_event("startup") +def on_startup(): + init_db() + logger.info("Database initialized.") + + @app.post("/ask", response_model=dict) async def ask_question(transcription: Transcription): - """ - Process a transcription by running it through the LLM agent synchronously and return the generated response. - - This endpoint accepts a POST request containing the transcription (user's spoken text), - processes it synchronously using the DocumentAgent's `run` method, and returns the generated response - along with metadata about the processing. The processing is offloaded to a separate thread to avoid blocking. - - Parameters: - transcription (Transcription): A Pydantic model instance containing the transcribed text. - - Returns: - dict: A JSON response containing: - - status: Success status - - response: The generated response text - - metadata: Processing metadata including timestamp and lengths - - Raises: - HTTPException: If an error occurs during processing - """ global latest_response try: - # Offload the agent processing to a separate thread to avoid blocking. latest_response = await asyncio.to_thread(agent.run, transcription.content) - - # Add metadata to the response response = { "status": "success", "response": latest_response, @@ -127,76 +100,30 @@ async def ask_question(transcription: Transcription): @app.post("/transcribe", response_model=dict) async def transcribe(transcription: Transcription): - """ - Asynchronously process transcribed text from the Furhat frontend and store the agent's response. - - This endpoint accepts a POST request containing the transcription (user's spoken text), - processes it using the DocumentAgent's `run` method (offloaded to a thread), and stores the generated response - in a global variable for later retrieval via the /response endpoint. If an error occurs during processing, - a 500 HTTPException is raised. - - Parameters: - transcription (Transcription): A Pydantic model instance with the transcribed text. - - Returns: - dict: A JSON response indicating that the transcription was received. - """ global latest_response try: - # Process the transcription asynchronously and update the global latest_response. latest_response = await asyncio.to_thread(agent.run, transcription.content) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) - # Acknowledge that the transcription has been received. return {"status": "transcription received"} @app.get("/response", response_model=dict) async def get_response(): - """ - Retrieve the latest response generated by the LLM agent. - - This endpoint accepts a GET request and returns the most recent response stored from a transcription. - If no response is available, it returns a message indicating that no response has been generated yet. - - Returns: - dict: A JSON response with a 'response' key containing the agent's answer as a string. - """ if latest_response is None: - # Return a default message if no response has been generated yet. return {"response": "No response generated yet."} return {"response": latest_response} @app.post("/get_docs", response_model=dict) async def get_docs(transcription: Transcription): - """ - Retrieve a document based on the provided transcription by using document retrieval and classification. - - This endpoint obtains a list of documents using `get_list_docs()`. If no documents are found, - it returns a message indicating such. If exactly one document is found, it returns that document. - If multiple documents are found, it uses a TextClassifier to rank the documents based on the transcription - and returns the top-ranked document. - - Parameters: - transcription (Transcription): A Pydantic model instance containing the transcribed text - to be used for ranking the documents. - - Returns: - dict: A JSON response with a 'response' key that contains the relevant document or an appropriate message. - """ try: - # Retrieve a list of available documents. docs = await asyncio.to_thread(get_list_docs) if not docs: return {"response": "No documents found."} if len(docs) == 1: - # If only one document exists, return it directly. return {"response": docs[0]} - # If multiple documents are found, rank them using the text classifier. ranked_docs = await asyncio.to_thread(classify_text, transcription.content, docs) - # Assuming ranked_docs is a dictionary with documents as keys and scores as values, - # select the top-ranked document (first key in the dictionary). top_doc = list(ranked_docs.keys())[0] if ranked_docs else "No document ranked." return {"response": top_doc} except Exception as e: @@ -205,31 +132,8 @@ async def get_docs(transcription: Transcription): @app.post("/engage", response_model=dict) async def engage(engage_request: EngageRequest): - """ - Process an engagement request by retrieving document context, generating a summary, and producing a followup prompt. - - This endpoint accepts a POST request containing a document identifier and an answer, - and generates an engaging follow-up prompt based on the document context and previous answer. - The processing is offloaded to a separate thread to avoid blocking. - - Parameters: - engage_request (EngageRequest): A Pydantic model instance containing: - - document: Identifier or content of the document - - answer: The previous answer to generate follow-up from - - Returns: - dict: A JSON response containing: - - status: Success status - - prompt: The generated follow-up prompt - - metadata: Processing metadata including timestamp and context information - - Raises: - HTTPException: If an error occurs during processing - """ try: - # Get engaging prompt from document agent prompt = await asyncio.to_thread(agent.engage, engage_request.document, engage_request.answer) - response = { "status": "success", "prompt": prompt, @@ -244,6 +148,11 @@ async def engage(engage_request: EngageRequest): logger.error(f"Error in engage endpoint: {e}") raise HTTPException(status_code=500, detail=str(e)) -# Run the application using Uvicorn if this module is executed directly. + +@app.websocket("/ws/perception") +async def ws_perception(websocket: WebSocket): + await perception_ws_handler(websocket) + + if __name__ == "__main__": - uvicorn.run(app, host="0.0.0.0", port=8000) + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/my_furhat_backend/__init__.py b/my_furhat_backend/__init__.py index 8b13789..a002c47 100755 --- a/my_furhat_backend/__init__.py +++ b/my_furhat_backend/__init__.py @@ -1 +1,16 @@ +""" +my_furhat_backend package initialization. +Provides compatibility shims required by downstream dependencies. +""" + +from __future__ import annotations + +try: + import langgraph.checkpoint.base as _checkpoint_base + + if not hasattr(_checkpoint_base, "EXCLUDED_METADATA_KEYS"): + _checkpoint_base.EXCLUDED_METADATA_KEYS = set() # type: ignore[attr-defined] +except Exception: + # LangGraph is optional during docs/tests; ignore if unavailable. + pass diff --git a/my_furhat_backend/config/settings.py b/my_furhat_backend/config/settings.py index 741c2f8..4478417 100755 --- a/my_furhat_backend/config/settings.py +++ b/my_furhat_backend/config/settings.py @@ -3,13 +3,13 @@ from pathlib import Path -BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Use local cache directory instead of /mnt (which is read-only on macOS) PROJECT_ROOT = Path(BASE_DIR).parent CACHE_DIR = PROJECT_ROOT / ".cache" MOUNT_DIR = Path("/mnt") # Keep for reference, but use CACHE_DIR for local dev - +# Load .env file variables (if present) config = dotenv_values(".env") # Default to local cache directory, but allow override via environment variables @@ -21,7 +21,13 @@ "MODEL_PATH": os.getenv("MODEL_PATH", str(CACHE_DIR / "models")), "GGUF_MODELS_PATH": os.getenv("GGUF_MODELS_PATH", str(CACHE_DIR / "models/gguf")), "CUDA_VISIBLE_DEVICES": os.getenv("CUDA_VISIBLE_DEVICES", "0"), - "PYTORCH_CUDA_ALLOC_CONF": os.getenv("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:512") + "PYTORCH_CUDA_ALLOC_CONF": os.getenv("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:512"), + # NEW: database URL for user-recognition + memory + # For dev, SQLite in the local .cache dir; override in .env for Postgres, etc. + "DATABASE_URL": os.getenv( + "DATABASE_URL", + f"sqlite:///{CACHE_DIR / 'furhat_memory.db'}" + ), }) # Set environment variables from config diff --git a/my_furhat_backend/memory/__init__.py b/my_furhat_backend/memory/__init__.py new file mode 100644 index 0000000..edd6adf --- /dev/null +++ b/my_furhat_backend/memory/__init__.py @@ -0,0 +1,3 @@ +from .summarizer import update_summary_for_conversation + +__all__ = ["update_summary_for_conversation"] \ No newline at end of file diff --git a/my_furhat_backend/memory/summarizer.py b/my_furhat_backend/memory/summarizer.py index e69de29..3cc2872 100644 --- a/my_furhat_backend/memory/summarizer.py +++ b/my_furhat_backend/memory/summarizer.py @@ -0,0 +1,25 @@ +from sqlalchemy.orm import Session +from my_furhat_backend.db import crud +from my_furhat_backend.db.models import Conversation + + +def _naive_summary(turns): + """ + Very simple summary: first + last user utterances trimmed. + Replace with a proper LLM call later. + """ + if not turns: + return "" + first = (turns[0].user_text or "").strip() + last = (turns[-1].user_text or "").strip() + if not first and not last: + return "" + if first == last: + return first[:200] + return f"Started with: {first[:100]}... | Ended with: {last[:100]}..." + + +def update_summary_for_conversation(db: Session, conversation: Conversation): + turns = crud.get_recent_turns(db, conversation, limit=50) + summary = _naive_summary(turns) + crud.update_conversation_summary(db, conversation, summary) \ No newline at end of file diff --git a/my_furhat_backend/perception/__init__.py b/my_furhat_backend/perception/__init__.py new file mode 100644 index 0000000..6636987 --- /dev/null +++ b/my_furhat_backend/perception/__init__.py @@ -0,0 +1,4 @@ +from .session_state import SessionState +from . import language, websocket_handler, face, voice + +__all__ = ["SessionState", "language", "websocket_handler", "face", "voice"] \ No newline at end of file diff --git a/my_furhat_backend/perception/face.py b/my_furhat_backend/perception/face.py index e69de29..b8ceb87 100644 --- a/my_furhat_backend/perception/face.py +++ b/my_furhat_backend/perception/face.py @@ -0,0 +1,116 @@ +# my_furhat_backend/perception/face.py + +from __future__ import annotations + +import io +from typing import Optional, Tuple, List + +import cv2 +import numpy as np +from PIL import Image +from sqlalchemy.orm import Session + +from my_furhat_backend.db.models import User + +# --------- Load InsightFace model once at import ---------- + +try: + from insightface.app import FaceAnalysis + + _face_app: Optional[FaceAnalysis] = FaceAnalysis( + name="buffalo_l", # solid general-purpose model + providers=["CUDAExecutionProvider", "CPUExecutionProvider"], + ) + _face_app.prepare(ctx_id=0, det_size=(640, 640)) +except Exception as e: + print(f"[face] Failed to initialize InsightFace: {e}") + _face_app = None + + +def _bytes_to_bgr(image_bytes: bytes) -> Optional[np.ndarray]: + """Decode image bytes into an OpenCV BGR array.""" + try: + image = Image.open(io.BytesIO(image_bytes)).convert("RGB") + return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) + except Exception as e: + print(f"[face] Failed to decode image: {e}") + return None + + +def extract_face_embedding(frame_bytes: bytes) -> Optional[np.ndarray]: + """ + Extract a face embedding from an image. + + Returns: + np.ndarray of shape (512,) or similar, or None if failed/no face. + """ + if _face_app is None: + return None + + bgr = _bytes_to_bgr(frame_bytes) + if bgr is None: + return None + + faces = _face_app.get(bgr) + if not faces: + return None + + # For now, just take the largest detected face + faces.sort(key=lambda f: (f.bbox[2] - f.bbox[0]) * (f.bbox[3] - f.bbox[1]), reverse=True) + emb = faces[0].normed_embedding # already L2-normalized + return np.array(emb, dtype=np.float32) + + +def serialize_embedding(embedding: np.ndarray) -> bytes: + """Convert numpy embedding to raw float32 bytes.""" + return embedding.astype(np.float32).tobytes() + + +def deserialize_embedding(blob: bytes) -> np.ndarray: + """Convert raw float32 bytes back to numpy vector.""" + return np.frombuffer(blob, dtype=np.float32) + + +def _cosine_similarity(a: np.ndarray, b: np.ndarray) -> float: + if a.size == 0 or b.size == 0: + return -1.0 + a_norm = a / (np.linalg.norm(a) + 1e-9) + b_norm = b / (np.linalg.norm(b) + 1e-9) + return float(np.dot(a_norm, b_norm)) + + +def match_face_embedding( + db: Session, + embedding: np.ndarray, + threshold: float = 0.45, +) -> Optional[Tuple[User, float]]: + """ + Match a face embedding against stored users. + + Returns: + (User, similarity) if similarity >= threshold, else None. + """ + if embedding is None or embedding.size == 0: + return None + + users: List[User] = db.query(User).filter(User.face_embedding.isnot(None)).all() + if not users: + return None + + best_user = None + best_score = -1.0 + + for user in users: + try: + stored = deserialize_embedding(user.face_embedding) + score = _cosine_similarity(embedding, stored) + if score > best_score: + best_score = score + best_user = user + except Exception as e: + print(f"[face] Failed to compare embedding for user {user.id}: {e}") + + if best_user is not None and best_score >= threshold: + return best_user, best_score + + return None \ No newline at end of file diff --git a/my_furhat_backend/perception/language.py b/my_furhat_backend/perception/language.py index e69de29..128ab07 100644 --- a/my_furhat_backend/perception/language.py +++ b/my_furhat_backend/perception/language.py @@ -0,0 +1,23 @@ +from typing import Dict + + +def detect_language(text: str) -> str: + """ + VERY simple placeholder – always returns 'en' for now. + Later: plug in a real language-id model or reuse ASR metadata. + """ + return "en" + + +def update_language_distribution(existing: Dict[str, float], lang_code: str, weight: float = 1.0) -> Dict[str, float]: + """ + Update a language distribution dict with a new observation. + existing: e.g. {"en": 0.7, "no": 0.3} + lang_code: e.g. "en" + """ + dist = dict(existing) if existing else {} + dist[lang_code] = dist.get(lang_code, 0.0) + weight + total = sum(dist.values()) or 1.0 + for k in dist: + dist[k] /= total + return dist \ No newline at end of file diff --git a/my_furhat_backend/perception/session_state.py b/my_furhat_backend/perception/session_state.py index e69de29..6034277 100644 --- a/my_furhat_backend/perception/session_state.py +++ b/my_furhat_backend/perception/session_state.py @@ -0,0 +1,39 @@ +from dataclasses import dataclass, field +from typing import Optional, Dict + + +@dataclass +class SessionState: + session_id: str + user_id: Optional[str] = None + # language distribution ("en" -> prob, etc.) + language_dist: Dict[str, float] = field(default_factory=dict) + turn_index: int = 0 + + +# Simple in-memory store: session_id -> SessionState +_sessions: Dict[str, SessionState] = {} + + +def get_or_create_session(session_id: str) -> SessionState: + if session_id not in _sessions: + _sessions[session_id] = SessionState(session_id=session_id) + return _sessions[session_id] + + +def update_session_user(session_id: str, user_id: str) -> SessionState: + state = get_or_create_session(session_id) + state.user_id = user_id + return state + + +def update_session_language_dist(session_id: str, language_dist: Dict[str, float]) -> SessionState: + state = get_or_create_session(session_id) + state.language_dist = language_dist + return state + + +def increment_turn_index(session_id: str) -> int: + state = get_or_create_session(session_id) + state.turn_index += 1 + return state.turn_index \ No newline at end of file diff --git a/my_furhat_backend/perception/voice.py b/my_furhat_backend/perception/voice.py index e69de29..d1947f7 100644 --- a/my_furhat_backend/perception/voice.py +++ b/my_furhat_backend/perception/voice.py @@ -0,0 +1,105 @@ +# my_furhat_backend/perception/voice.py + +from __future__ import annotations + +import io +from typing import Optional, Tuple, List + +import numpy as np +import soundfile as sf +from sqlalchemy.orm import Session + +from my_furhat_backend.db.models import User + +# --------- Load Resemblyzer encoder once ---------- + +try: + from resemblyzer import VoiceEncoder, preprocess_wav + + _voice_encoder: Optional[VoiceEncoder] = VoiceEncoder() +except Exception as e: + print(f"[voice] Failed to initialize VoiceEncoder: {e}") + _voice_encoder = None + + +def _bytes_to_mono_float32(audio_bytes: bytes) -> Optional[np.ndarray]: + """Decode audio bytes into a mono float32 numpy array.""" + try: + audio, sr = sf.read(io.BytesIO(audio_bytes), dtype="float32") + if audio.ndim > 1: # stereo → mono + audio = np.mean(audio, axis=1) + return preprocess_wav(audio, source_sr=sr) + except Exception as e: + print(f"[voice] Failed to decode audio: {e}") + return None + + +def extract_voice_embedding(audio_bytes: bytes) -> Optional[np.ndarray]: + """ + Extract a voice embedding from an audio clip (a few seconds). + + Returns: + np.ndarray or None. + """ + if _voice_encoder is None: + return None + + wav = _bytes_to_mono_float32(audio_bytes) + if wav is None or len(wav) < 16000: # < 1 s at 16k + return None + + emb = _voice_encoder.embed_utterance(wav) + return np.array(emb, dtype=np.float32) + + +def serialize_embedding(embedding: np.ndarray) -> bytes: + return embedding.astype(np.float32).tobytes() + + +def deserialize_embedding(blob: bytes) -> np.ndarray: + return np.frombuffer(blob, dtype=np.float32) + + +def _cosine_similarity(a: np.ndarray, b: np.ndarray) -> float: + if a.size == 0 or b.size == 0: + return -1.0 + a_norm = a / (np.linalg.norm(a) + 1e-9) + b_norm = b / (np.linalg.norm(b) + 1e-9) + return float(np.dot(a_norm, b_norm)) + + +def match_voice_embedding( + db: Session, + embedding: np.ndarray, + threshold: float = 0.75, +) -> Optional[Tuple[User, float]]: + """ + Match a voice embedding against stored users. + + Returns: + (User, similarity) if similarity >= threshold, else None. + """ + if embedding is None or embedding.size == 0: + return None + + users: List[User] = db.query(User).filter(User.voice_embedding.isnot(None)).all() + if not users: + return None + + best_user = None + best_score = -1.0 + + for user in users: + try: + stored = deserialize_embedding(user.voice_embedding) + score = _cosine_similarity(embedding, stored) + if score > best_score: + best_score = score + best_user = user + except Exception as e: + print(f"[voice] Failed to compare embedding for user {user.id}: {e}") + + if best_user is not None and best_score >= threshold: + return best_user, best_score + + return None \ No newline at end of file diff --git a/my_furhat_backend/perception/websocket_handler.py b/my_furhat_backend/perception/websocket_handler.py index e69de29..193eb19 100644 --- a/my_furhat_backend/perception/websocket_handler.py +++ b/my_furhat_backend/perception/websocket_handler.py @@ -0,0 +1,338 @@ +from __future__ import annotations + +import json +import uuid +from datetime import datetime +from typing import Any, Dict, Optional + +from fastapi import WebSocket, WebSocketDisconnect +from sqlalchemy.orm import Session + +from my_furhat_backend.db.session import SessionLocal +from my_furhat_backend.db import crud +from my_furhat_backend.perception.session_state import ( + get_or_create_session, + update_session_user, + update_session_language_dist, + increment_turn_index, +) +from my_furhat_backend.perception.language import detect_language, update_language_distribution +from my_furhat_backend.perception import face as face_mod +from my_furhat_backend.perception import voice as voice_mod + + +VIDEO_PREFIX = 0x01 +AUDIO_PREFIX = 0x02 + + +def _generate_new_user_id() -> str: + return f"user-{uuid.uuid4().hex[:12]}" + + +async def _send_error(websocket: WebSocket, session_id: str, message: str): + await websocket.send_text( + json.dumps( + { + "type": "error", + "payload": { + "session_id": session_id, + "code": "BAD_REQUEST", + "message": message, + }, + } + ) + ) + + +async def _send_identity_update( + websocket: WebSocket, + session_id: str, + user_id: str, + name: Optional[str], + languages_dist: Dict[str, float], +): + primary_language = max(languages_dist, key=languages_dist.get) if languages_dist else "en" + payload = { + "type": "identity_update", + "payload": { + "session_id": session_id, + "user_id": user_id, + "name": name, + "primary_language": primary_language, + "languages": languages_dist, + "confidence": float(languages_dist.get(primary_language, 1.0)), + "last_seen": int(datetime.utcnow().timestamp() * 1000), + }, + } + await websocket.send_text(json.dumps(payload)) + + +def _ensure_user_for_session(db: Session, session_id: str): + """ + Ensure there is a User associated with this session. + """ + state = get_or_create_session(session_id) + if state.user_id: + user = crud.get_user_by_id(db, state.user_id) + if user: + return user + + # No user yet → create one + user_id = _generate_new_user_id() + user = crud.create_user( + db, + user_id=user_id, + primary_language="en", + languages_json={"en": 1.0}, + ) + update_session_user(session_id, user_id) + update_session_language_dist(session_id, user.languages_json or {"en": 1.0}) + return user + + +def _update_identity_from_face(db: Session, session_id: str, frame_bytes: bytes): + state = get_or_create_session(session_id) + + emb = face_mod.extract_face_embedding(frame_bytes) + if emb is None: + return None + + match = face_mod.match_face_embedding(db, emb) + serialized = face_mod.serialize_embedding(emb) + + if match: + user, score = match + update_session_user(session_id, user.id) + crud.update_user_embeddings(db, user, face_embedding=serialized) + return user + + user = _ensure_user_for_session(db, session_id) + crud.update_user_embeddings(db, user, face_embedding=serialized) + return user + + +def _update_identity_from_voice(db: Session, session_id: str, audio_bytes: bytes): + state = get_or_create_session(session_id) + + emb = voice_mod.extract_voice_embedding(audio_bytes) + if emb is None: + return None + + match = voice_mod.match_voice_embedding(db, emb) + serialized = voice_mod.serialize_embedding(emb) + + if match: + user, score = match + update_session_user(session_id, user.id) + crud.update_user_embeddings(db, user, voice_embedding=serialized) + return user + + user = _ensure_user_for_session(db, session_id) + crud.update_user_embeddings(db, user, voice_embedding=serialized) + return user + + +async def _handle_text_message( + websocket: WebSocket, + db: Session, + text: str, + current_session_id: Optional[str], +) -> Optional[str]: + """ + Handle JSON text messages: hello, turn, name_update. + Returns updated current_session_id. + """ + try: + msg = json.loads(text) + except json.JSONDecodeError: + await _send_error(websocket, current_session_id or "unknown", "Invalid JSON") + return current_session_id + + msg_type = msg.get("type") + payload: Dict[str, Any] = msg.get("payload") or {} + + # --- HELLO --- + if msg_type == "hello": + session_id = payload.get("session_id") + if not session_id: + await _send_error(websocket, "unknown", "Missing session_id in hello payload") + return current_session_id + + get_or_create_session(session_id) + # Default language distribution + update_session_language_dist(session_id, {"en": 1.0}) + return session_id + + # For all other types, we need session_id + session_id = payload.get("session_id") or current_session_id + if not session_id: + await _send_error(websocket, "unknown", f"Missing session_id in {msg_type} payload") + return current_session_id + + # Make sure session exists + state = get_or_create_session(session_id) + + # --- TURN --- + if msg_type == "turn": + # If we already have a recognized user, reuse it; else create. + if state.user_id: + user = crud.get_user_by_id(db, state.user_id) + if not user: + user = _ensure_user_for_session(db, session_id) + else: + user = _ensure_user_for_session(db, session_id) + + user_text = payload.get("user_text") or "" + lang = payload.get("language") or detect_language(user_text or "") + + # Update language distribution + new_lang_dist = update_language_distribution(state.language_dist, lang) + update_session_language_dist(session_id, new_lang_dist) + + crud.update_user_languages( + db, + user, + new_lang_dist, + primary_language=max(new_lang_dist, key=new_lang_dist.get), + ) + + # Create conversation + turn + conv = crud.get_or_create_conversation( + db, + session_id=session_id, + user=user, + language=lang, + ) + turn_idx = increment_turn_index(session_id) + crud.create_turn( + db, + conversation=conv, + turn_index=turn_idx, + user_text=user_text, + robot_text=payload.get("robot_text"), + ) + + await _send_identity_update( + websocket, + session_id=session_id, + user_id=user.id, + name=user.name, + languages_dist=new_lang_dist, + ) + + # --- NAME_UPDATE --- + elif msg_type == "name_update": + if state.user_id: + user = crud.get_user_by_id(db, state.user_id) + if not user: + user = _ensure_user_for_session(db, session_id) + else: + user = _ensure_user_for_session(db, session_id) + + name = payload.get("name") + if name: + crud.update_user_name(db, user, name) + + lang_dist = state.language_dist or user.languages_json or {"en": 1.0} + update_session_language_dist(session_id, lang_dist) + + await _send_identity_update( + websocket, + session_id=session_id, + user_id=user.id, + name=user.name, + languages_dist=lang_dist, + ) + + else: + await _send_error(websocket, session_id, f"Unknown message type: {msg_type}") + + return session_id + + +async def _handle_binary_message( + websocket: WebSocket, + db: Session, + data: bytes, + current_session_id: Optional[str], +): + """ + Handle binary streaming frames: + - 0x01 + JPEG bytes => video frame + - 0x02 + audio bytes => audio chunk + """ + if not data: + return + + stream_type = data[0] + payload_bytes = data[1:] + + if not current_session_id: + # We require a hello first to set session_id + await _send_error(websocket, "unknown", "Binary data received before hello/session_id") + return + + session_id = current_session_id + state = get_or_create_session(session_id) + + if stream_type == VIDEO_PREFIX: + user = _update_identity_from_face(db, session_id, payload_bytes) + elif stream_type == AUDIO_PREFIX: + user = _update_identity_from_voice(db, session_id, payload_bytes) + else: + # Unknown binary subtype; ignore + return + + if not user: + return + + # Use existing or default language distribution + lang_dist = state.language_dist or user.languages_json or {"en": 1.0} + update_session_language_dist(session_id, lang_dist) + + await _send_identity_update( + websocket, + session_id=session_id, + user_id=user.id, + name=user.name, + languages_dist=lang_dist, + ) + + +async def perception_ws_handler(websocket: WebSocket): + """ + Single WebSocket entry point that handles: + - Text JSON messages for hello/turn/name_update. + - Binary streaming frames from the Furhat camera/mic. + + Designed to run continuously while the skill is active. + """ + await websocket.accept() + db: Session = SessionLocal() + current_session_id: Optional[str] = None + + try: + while True: + message = await websocket.receive() + + if "text" in message and message["text"] is not None: + current_session_id = await _handle_text_message( + websocket, + db, + message["text"], + current_session_id, + ) + + elif "bytes" in message and message["bytes"] is not None: + await _handle_binary_message( + websocket, + db, + message["bytes"], + current_session_id, + ) + + # Otherwise ignore (e.g. pings) + except WebSocketDisconnect: + pass + finally: + db.close() \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 17820cb..0114e26 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,3 +34,7 @@ ai-agent = "my_furhat_backend.main:main" [build-system] requires = ["poetry-core>=1.5.0"] build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +addopts = "--ignore=my_furhat_backend/agents --strict-markers" +pythonpath = ["."] diff --git a/tests/test_language.py b/tests/test_language.py index e69de29..1dfe9e6 100644 --- a/tests/test_language.py +++ b/tests/test_language.py @@ -0,0 +1,27 @@ +import pytest + +from my_furhat_backend.perception.language import ( + detect_language, + update_language_distribution, +) + + +def test_detect_language_returns_default_en(): + assert detect_language("Hei på deg!") == "en" + assert detect_language("") == "en" + + +def test_update_language_distribution_normalizes_scores(): + existing = {"en": 2.0, "no": 1.0} + + updated = update_language_distribution(existing, "no", weight=3.0) + + assert pytest.approx(sum(updated.values()), rel=1e-6) == 1.0 + assert updated["no"] > updated["en"] + + +def test_update_language_distribution_handles_empty_seed(): + updated = update_language_distribution({}, "en") + + assert updated == {"en": 1.0} + diff --git a/tests/test_memory_summarizer.py b/tests/test_memory_summarizer.py index e69de29..dfc8862 100644 --- a/tests/test_memory_summarizer.py +++ b/tests/test_memory_summarizer.py @@ -0,0 +1,45 @@ +from dataclasses import dataclass +from unittest.mock import MagicMock, patch + +from my_furhat_backend.memory import summarizer + + +@dataclass +class DummyTurn: + user_text: str | None + + +def test_naive_summary_empty_turns(): + assert summarizer._naive_summary([]) == "" + + +def test_naive_summary_uses_first_and_last_turns(): + turns = [ + DummyTurn("Hello there"), + DummyTurn("Some middle question"), + DummyTurn("Thanks, bye"), + ] + + summary = summarizer._naive_summary(turns) + + assert "Hello there" in summary + assert "Thanks, bye" in summary + assert "Some middle" not in summary + + +def test_update_summary_for_conversation_calls_crud(): + db = MagicMock() + conversation = MagicMock() + turn = DummyTurn("Hi") + + with patch.object(summarizer.crud, "get_recent_turns", return_value=[turn]) as get_recent, patch.object( + summarizer.crud, "update_conversation_summary" + ) as update_summary: + summarizer.update_summary_for_conversation(db, conversation) + + get_recent.assert_called_once_with(db, conversation, limit=50) + update_summary.assert_called_once() + args, kwargs = update_summary.call_args + assert args[0] is db + assert args[1] is conversation + diff --git a/tests/test_perception_ws.py b/tests/test_perception_ws.py index e69de29..6d8d80c 100644 --- a/tests/test_perception_ws.py +++ b/tests/test_perception_ws.py @@ -0,0 +1,81 @@ +import asyncio +import json +import pytest + +from my_furhat_backend.perception import session_state +from my_furhat_backend.perception.websocket_handler import ( + _generate_new_user_id, + _handle_text_message, + _send_identity_update, +) + + +@pytest.fixture(autouse=True) +def reset_session_state(): + session_state._sessions.clear() # type: ignore[attr-defined] + yield + session_state._sessions.clear() # type: ignore[attr-defined] + + +class DummyWebSocket: + def __init__(self): + self.sent_text: list[str] = [] + + async def send_text(self, text: str): + self.sent_text.append(text) + + +def test_generate_new_user_id_produces_unique_ids(): + ids = {_generate_new_user_id() for _ in range(10)} + + assert all(identifier.startswith("user-") for identifier in ids) + assert len(ids) == 10 + + +def test_send_identity_update_formats_payload(): + ws = DummyWebSocket() + + asyncio.run( + _send_identity_update( + ws, + session_id="session-123", + user_id="user-xyz", + name="Ava", + languages_dist={"en": 0.7, "no": 0.3}, + ) + ) + + assert len(ws.sent_text) == 1 + payload = json.loads(ws.sent_text[0]) + assert payload["type"] == "identity_update" + assert payload["payload"]["session_id"] == "session-123" + assert payload["payload"]["user_id"] == "user-xyz" + assert payload["payload"]["primary_language"] == "en" + + +def test_handle_text_message_initializes_session_language_distribution(): + ws = DummyWebSocket() + text = json.dumps( + { + "type": "hello", + "payload": {"session_id": "session-abc"}, + } + ) + + session_id = asyncio.run(_handle_text_message(ws, db=None, text=text, current_session_id=None)) + + assert session_id == "session-abc" + state = session_state.get_or_create_session("session-abc") + assert state.language_dist == {"en": 1.0} + + +def test_handle_text_message_without_session_sends_error(): + ws = DummyWebSocket() + text = json.dumps({"type": "hello", "payload": {}}) + + asyncio.run(_handle_text_message(ws, db=None, text=text, current_session_id=None)) + + assert ws.sent_text, "Expected an error payload to be sent" + payload = json.loads(ws.sent_text[-1]) + assert payload["type"] == "error" + From ba5629669baa18463c8d88ebab08bfc200246e2c Mon Sep 17 00:00:00 2001 From: johngreenough Date: Wed, 10 Dec 2025 15:34:58 +0100 Subject: [PATCH 3/3] Document updates and recent skill/backend changes --- README.md | 12 + .../app/templateadvancedskill/flow/init.kt | 4 + .../flow/main/UserExtensions.kt | 7 + .../flow/main/conversation.kt | 1036 +++++++++-- .../flow/main/documentWaitingToStart.kt | 58 +- .../flow/main/greeting.kt | 54 +- .../language/LangDetect.kt | 91 +- .../language/LanguageManager.kt | 28 +- .../app/templateadvancedskill/nlu/intents.kt | 175 +- .../app/templateadvancedskill/parms.kt | 64 +- .../perception/PerceptionClient.kt | 81 +- .../perception/UserMemory.kt | 27 + .../perception/UserState.kt | 21 + .../trivia/TriviaStats.kt | 144 ++ middleware/main.py | 137 +- my_furhat_backend/RAG/rag_flow.py | 501 +++--- my_furhat_backend/agents/document_agent.py | 1584 +++-------------- .../agents/test_2_conversational_agent.py | 7 + .../agents/test_conversational_agent.py | 8 + my_furhat_backend/config/settings.py | 59 + my_furhat_backend/llm_tools/tools.py | 26 +- my_furhat_backend/memory/summarizer.py | 11 +- my_furhat_backend/models/chatbot_factory.py | 52 +- my_furhat_backend/models/classifier.py | 80 +- my_furhat_backend/models/llm_factory.py | 364 ++-- my_furhat_backend/perception/face.py | 17 +- my_furhat_backend/perception/language.py | 12 +- my_furhat_backend/perception/session_state.py | 13 + my_furhat_backend/perception/voice.py | 29 +- .../perception/websocket_handler.py | 48 +- my_furhat_backend/utils/gpu_utils.py | 122 +- my_furhat_backend/utils/ollama_bootstrap.py | 117 ++ my_furhat_backend/utils/qa_pairs.py | 111 ++ my_furhat_backend/utils/util.py | 8 +- pyproject.toml | 3 +- tests/rag_llm_eval.py | 588 ++++++ tests/test_biometrics.py | 56 + tests/test_perception_ws.py | 77 + tests/test_session_state.py | 40 + tests/test_websocket_handler.py | 143 ++ 40 files changed, 3799 insertions(+), 2216 deletions(-) create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserMemory.kt create mode 100644 furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/trivia/TriviaStats.kt mode change 100755 => 100644 my_furhat_backend/RAG/rag_flow.py mode change 100755 => 100644 my_furhat_backend/agents/document_agent.py create mode 100644 my_furhat_backend/utils/ollama_bootstrap.py create mode 100644 my_furhat_backend/utils/qa_pairs.py create mode 100644 tests/rag_llm_eval.py create mode 100644 tests/test_biometrics.py create mode 100644 tests/test_session_state.py create mode 100644 tests/test_websocket_handler.py diff --git a/README.md b/README.md index 9adda8b..778e06b 100755 --- a/README.md +++ b/README.md @@ -261,6 +261,18 @@ The project includes several GPU optimizations: --- +## Recent Additions and Defaults (skill + backend) + +- **Perception websocket**: Robot streams frames to `/ws/perception` (binary prefix `0x01` video JPEG, `0x02` audio). Text messages carry `hello`, `turn`, `name_update`. Server soft-fails if face/voice libs are missing; user IDs are created on-demand for stats. +- **Trivia flow**: Kotlin skill calls `/quiz/question`, `/trivia/turn` (LLM phrasing/feedback), and `/memory/trivia` (stats). Local cache + backend persistence (`trivia_stats` table). +- **Language handling**: Heuristic EN/NO detector client-side; explicit language pinning via `LanguageManager` with Polly voices (`Kendra-Neural` EN, `Ida-Neural` NO). Backend language hinting in prompts; placeholder server-side lang detect. +- **URLs/IPs**: Default `BACKEND_URL` in the skill points to laptop IP (override via `BACKEND_URL` env). Robot IP tracked in params. Perception WS URL derives from `BACKEND_URL` (`ws://:8000/ws/perception`). +- **RAG**: Lightweight BM25 (no vector DB) over `DOCUMENTS_PATH` with preference for `qa_pairs.json` when present; falls back to PDFs/txt. Chunk size 800 / overlap 150. +- **LLM defaults**: Backend uses Ollama (`llama3.2:latest`) by default; HF/LlamaCpp paths are guarded behind torch/transformers availability. System prompt in `config/settings.py` defines the Kaia persona and strict language policy. +- **Storage/layout**: Backend defaults to local `.cache` for models/caches/docs/vector store/DB (SQLite `furhat_memory.db`). Tables: `users`, `conversations`, `turns`, `trivia_stats`. +- **Ingestion helper**: `ingestion/web_ingest.py` fetches PDFs via DuckDuckGo HTML + regex (best-effort), storing into a folder you can set as `DOCUMENTS_PATH`. + + ## Requirements & Installation This project uses a hybrid dependency management approach: some dependencies are installed via pip into your environment, and others are managed by Poetry (tracked in the `poetry.lock` file). diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt index 3aaa272..2026427 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/init.kt @@ -14,6 +14,10 @@ val Init: State = state { init { /** Set our default interaction parameters */ users.setSimpleEngagementPolicy(DISTANCE_TO_ENGAGE, MAX_NUMBER_OF_USERS) + // Add a small end-of-speech silence buffer so the robot waits + // ~1.5–2 seconds after the user stops talking before responding. + // This helps avoid interrupting users who pause briefly mid-utterance. + furhat.param.endSilTimeout = 2000 } onEntry { /** start interaction */ diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt index 6e476e8..fdcd0eb 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/UserExtensions.kt @@ -4,6 +4,10 @@ import furhatos.app.templateadvancedskill.perception.PerceptionClient import furhatos.records.User import java.util.concurrent.ConcurrentHashMap +/** + * Simple per-user registry so we can hang a PerceptionClient off a User record + * without modifying the Furhat SDK types. Keeps mapping in memory only. + */ private object PerceptionClientRegistry { private val clients = ConcurrentHashMap() @@ -18,6 +22,9 @@ private object PerceptionClientRegistry { } } +/** + * Extension property to access a per-user PerceptionClient. + */ var User.perceptionClient: PerceptionClient? get() = PerceptionClientRegistry.get(id) set(value) { diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt index 6070a2d..ced4410 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/conversation.kt @@ -1,6 +1,16 @@ package furhatos.app.templateadvancedskill.flow.main +/** + * Core conversation flow for the trivia experience. + * Highlights: + * - Trivia is driven via backend endpoints (/quiz/question, /trivia/turn). + * - Language handling uses LangDetect + explicit pinning via LanguageManager. + * - Perception/identity updates are consumed via PerceptionClient (see UserState). + * - Trivia stats are cached locally and synced to backend memory endpoints. + */ + import furhatos.flow.kotlin.* +import furhatos.flow.kotlin.users import okhttp3.* import okhttp3.MediaType.Companion.toMediaType import okhttp3.RequestBody.Companion.toRequestBody @@ -14,18 +24,23 @@ import furhatos.app.templateadvancedskill.language.LanguageManager import furhatos.app.templateadvancedskill.language.setAppLanguage import furhatos.app.templateadvancedskill.flow.Parent import furhatos.gestures.Gestures -import furhatos.app.templateadvancedskill.params.LOCAL_BACKEND_URL -import furhatos.app.templateadvancedskill.params.AWS_BACKEND_URL +import furhatos.app.templateadvancedskill.params.BACKEND_URL import java.util.concurrent.TimeUnit import furhatos.app.templateadvancedskill.nlu.UncertainResponseIntent import java.net.SocketTimeoutException import furhatos.app.templateadvancedskill.nlu.MyNameIsIntent import furhatos.app.templateadvancedskill.perception.UserState +import furhatos.app.templateadvancedskill.perception.UserProfile +import furhatos.app.templateadvancedskill.trivia.TriviaStats +import furhatos.app.templateadvancedskill.trivia.loadTriviaStats +import furhatos.app.templateadvancedskill.trivia.recordTriviaResult data class Transcription(val content: String) data class EngageRequest(val document: String, val answer: String) +data class QuizQuestionPayload(val index: Int, val question: String, val answer: String) + /** Helper: choose EN/NO text based on language */ private fun localized(en: String, no: String, lang: AppLanguage): String = if (lang == AppLanguage.NO) no else en @@ -33,6 +48,332 @@ private fun localized(en: String, no: String, lang: AppLanguage): String = /** Helper: use the most recently set conversation language */ private fun currentConversationLanguage(): AppLanguage = LanguageManager.current +/** Helper: Normalize a yes/no style answer. */ +private fun matchesKeyword(text: String, keywords: Set): Boolean { + val normalized = text.trim().lowercase() + .removeSuffix(".") + .removeSuffix("!") + .removeSuffix("?") + + if (normalized.isEmpty()) return false + + return keywords.any { keyword -> + normalized == keyword || + normalized.startsWith("$keyword ") || + normalized.endsWith(" $keyword") || + normalized.contains(" $keyword ") + } +} + +private fun isAffirmative(text: String): Boolean = + matchesKeyword( + text, + setOf( + "yes", + "yeah", + "yep", + "yup", + "sure", + "ok", + "okay", + "ready", + "of course", + "absolutely", + "let's go", + "lets go", + "yes please", + // Norwegian affirmatives + "ja", + "javisst", + "klart", + "selvfølgelig", + "jepp", + "joda", + "jo", + "ja takk" + ) + ) + +private fun isNegative(text: String): Boolean = + matchesKeyword( + text, + setOf( + "no", + "nope", + "nah", + "not now", + "maybe later", + "another time", + // Norwegian negatives + "nei", + "neida", + "ikke nå", + "ikke nå takk", + "nei takk" + ) + ) + +private fun wantsToStop(text: String): Boolean = + matchesKeyword( + text, + setOf( + "stop", + "done", + "finish", + "enough", + "thanks", + "thank you", + "bye", + "goodbye", + "i'm done", + "im done" + ) + ) + +private val norwegianLanguageRequests = listOf( + "speak norwegian", + "norwegian please", + "in norwegian", + "switch to norwegian", + "på norsk", + "snakk norsk", + "kan du snakke norsk", + "vennligst norsk" +) + +private val englishLanguageRequests = listOf( + "speak english", + "english please", + "in english", + "switch to english", + "på engelsk", + "snakk engelsk", + "kan du snakke engelsk" +) + +private fun explicitLanguageRequest(text: String): AppLanguage? { + val lower = text.lowercase() + return when { + norwegianLanguageRequests.any { lower.contains(it) } -> AppLanguage.NO + englishLanguageRequests.any { lower.contains(it) } -> AppLanguage.EN + else -> null + } +} + +private fun languageSwitchAcknowledgement(lang: AppLanguage): String = + localized( + en = "Sure, I'll continue in English.", + no = "Selvfølgelig, jeg fortsetter på norsk.", + lang = lang + ) + +private fun statsProgressLine(stats: TriviaStats, lang: AppLanguage): String = + localized( + en = "So far you've answered ${stats.correctAnswers} out of ${stats.totalQuestions} correctly.", + no = "Så langt har du ${stats.correctAnswers} av ${stats.totalQuestions} riktige.", + lang = lang + ) + +private fun statsFinalLine(stats: TriviaStats, lang: AppLanguage): String = + localized( + en = "You finished with ${stats.correctAnswers} correct out of ${stats.totalQuestions}.", + no = "Du endte med ${stats.correctAnswers} riktige av ${stats.totalQuestions}.", + lang = lang + ) + +private fun namePrompt(lang: AppLanguage): String = + localized( + en = "Before we finish, what name should I remember you by so I can keep your score?", + no = "Før vi avslutter, hvilket navn skal jeg huske deg som for å lagre scoren din?", + lang = lang + ) + +private val namePrefixPatterns = listOf( + "remember me as", + "remember me by", + "call me", + "my name is", + "name is", + "i am", + "i'm", + "im", + "it's", + "it is", +) + +private val rememberMePatterns = listOf( + "do you remember me", + "remember me", + "do you know me", + "have we met", + "do you recall me" +) + +private val scorePatterns = listOf( + "what's my score", + "whats my score", + "what is my score", + "how many did i get", + "how many correct", + "how am i doing", + "how did i do", + "my stats", + "my score" +) + +private fun extractNameFromUtterance(raw: String): String? { + var candidate = raw.trim() + if (candidate.isEmpty()) return null + + val lower = candidate.lowercase() + for (pattern in namePrefixPatterns) { + val idx = lower.indexOf(pattern) + if (idx != -1) { + candidate = candidate.substring(idx + pattern.length).trim() + break + } + } + + candidate = candidate + .replace(Regex("^(?:the name\\s+)", RegexOption.IGNORE_CASE), "") + .trim() + .trim('\'', '"') + + candidate = candidate + .replace(Regex("[^\\p{L}\\p{M}\\-\\s']"), " ") + .replace(Regex("\\s+"), " ") + .trim() + + return candidate.takeIf { it.length >= 2 } +} + +private fun isRememberMeQuery(text: String): Boolean { + val lower = text.lowercase() + return rememberMePatterns.any { lower.contains(it) } +} + +private fun isScoreQuery(text: String): Boolean { + val lower = text.lowercase() + return scorePatterns.any { lower.contains(it) } +} + +/** Call backend to get a random quiz question from qa_pairs.json. */ +private fun fetchQuizQuestion(): QuizQuestionPayload? { + val baseUrl = BACKEND_URL + val client = OkHttpClient.Builder() + .connectTimeout(30, TimeUnit.SECONDS) + .readTimeout(30, TimeUnit.SECONDS) + .writeTimeout(30, TimeUnit.SECONDS) + .build() + + val request = Request.Builder() + .url("$baseUrl/quiz/question") + .get() + .build() + + return try { + client.newCall(request).execute().use { response -> + if (!response.isSuccessful) { + println("fetchQuizQuestion error: ${response.code} - ${response.message}") + return null + } + val body = response.body?.string() ?: return null + val json = JSONObject(body) + val index = json.optInt("index", -1) + val question = json.optString("question", "") + val answer = json.optString("answer", "") + if (question.isBlank() || answer.isBlank()) { + null + } else { + QuizQuestionPayload(index = index, question = question, answer = answer) + } + } + } catch (e: Exception) { + println("fetchQuizQuestion exception: ${e.message}") + null + } +} + +private fun prepareTriviaQuestionUtterance( + lang: AppLanguage, + preferredLanguage: String +): Pair { + val quiz = fetchQuizQuestion() ?: run { + return Pair( + null, + localized( + en = "I couldn't reach the trivia service right now. Please make sure the backend is running and we can try again.", + no = "Jeg klarte ikke å nå quiz-tjenesten nå. Sørg for at backend kjører, så kan vi prøve igjen.", + lang = lang + ) + ) + } + + val llmUtterance = callTriviaTurn( + phase = "ask", + question = quiz.question, + answer = quiz.answer, + userAnswer = null, + preferredLanguage = preferredLanguage + ) + + val fallback = localized( + en = "Here is your question: ${quiz.question}", + no = "Her er spørsmålet ditt: ${quiz.question}", + lang = lang + ) + + val toAsk = if (llmUtterance.isNotBlank()) llmUtterance else fallback + return Pair(quiz, toAsk) +} + +/** Call backend /trivia/turn to get a localized trivia utterance from the LLM. */ +private fun callTriviaTurn( + phase: String, + question: String, + answer: String, + userAnswer: String?, + preferredLanguage: String +): String { + val baseUrl = BACKEND_URL + val client = OkHttpClient.Builder() + .connectTimeout(30, TimeUnit.SECONDS) + .readTimeout(30, TimeUnit.SECONDS) + .writeTimeout(30, TimeUnit.SECONDS) + .build() + + return try { + val json = JSONObject() + .put("phase", phase) + .put("question", question) + .put("answer", answer) + .put("preferred_language", preferredLanguage) + + if (userAnswer != null) { + json.put("user_answer", userAnswer) + } + + val body = json.toString() + .toRequestBody("application/json; charset=utf-8".toMediaType()) + + val request = Request.Builder() + .url("$baseUrl/trivia/turn") + .post(body) + .build() + + client.newCall(request).execute().use { response -> + if (!response.isSuccessful) { + return "" + } + val respString = response.body?.string() ?: return "" + val jsonResp = JSONObject(respString) + return jsonResp.optString("utterance", "") + } + } catch (e: Exception) { + "" + } +} + // Document Q&A state, inheriting from Parent. fun documentInfoQnA(documentName: String): State = state(parent = Parent) { @@ -45,15 +386,26 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { var lastGestureTime = 0L onEntry { + // For now, the entry into this state is used for the trivia intro flow. + // We still keep the previous behaviour available if needed elsewhere. furhat.gesture(Gestures.Smile) val lang = currentConversationLanguage() - val intro = localized( - en = "Hello! I'm here to help you learn about $documentName. What would you like to know?", - no = "Hei! Jeg er her for å hjelpe deg med $documentName. Hva vil du vite?", - lang = lang - ) - furhat.ask(intro) + val profile = UserState.currentProfile + val name = profile?.name + + val greeting = when (lang) { + AppLanguage.EN -> { + if (name != null) "Hi $name, would you like to do some trivia?" + else "Hi there, would you like to do some trivia?" + } + AppLanguage.NO -> { + if (name != null) "Hei $name, har du lyst til å ta en liten quiz?" + else "Hei, har du lyst til å ta en liten quiz?" + } + } + + furhat.ask(greeting) } onExit { @@ -165,169 +517,599 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { } onResponse { - val userQuestion = it.text.trim() - conversationCount++ - - // Detect language from the *current* question and switch ASR/TTS - val detectedLanguage = LangDetect.detect(userQuestion) - setAppLanguage(detectedLanguage) + val userText = it.text.trim() - val preferredLanguage = when (detectedLanguage) { - AppLanguage.EN -> "English" - AppLanguage.NO -> "Norwegian" + val explicitRequest = explicitLanguageRequest(userText) + if (explicitRequest != null) { + val current = currentConversationLanguage() + if (current != explicitRequest) { + setAppLanguage(explicitRequest) + } + val lang = explicitRequest + val ack = languageSwitchAcknowledgement(lang) + val followUp = when { + conversationCount == 0 -> localized( + en = "Would you like to try a trivia question?", + no = "Har du lyst til å prøve et quizspørsmål?", + lang = lang + ) + lastQuestion.isNotBlank() -> localized( + en = "Here's the current question again: $lastQuestion", + no = "Her er spørsmålet igjen: $lastQuestion", + lang = lang + ) + else -> localized( + en = "How would you like to continue?", + no = "Hvordan vil du fortsette?", + lang = lang + ) + } + furhat.ask("$ack $followUp") + return@onResponse } - // Mood detection stays as-is - userMood = when { - userQuestion.contains(Regex("(great|wonderful|amazing|excellent)", RegexOption.IGNORE_CASE)) -> "positive" - userQuestion.contains(Regex("(bad|terrible|awful|horrible)", RegexOption.IGNORE_CASE)) -> "negative" - else -> "neutral" + // Detect language from the *current* utterance and switch ASR/TTS + var lang = currentConversationLanguage() + val detectedLanguage = LangDetect.detect(userText) + if (detectedLanguage != lang) { + setAppLanguage(detectedLanguage) + lang = detectedLanguage } - if (userQuestion.split(" ").size > 5) { - furhat.gesture(Gestures.GazeAway, priority = 1) + // FIRST TURN: accept/decline trivia + if (conversationCount == 0) { + conversationCount++ + + if (isNegative(userText)) { + val reply = localized( + en = "No problem. If you change your mind later, just tell me you want a quiz.", + no = "Ikke noe problem. Si fra hvis du vil ha en quiz senere.", + lang = lang + ) + furhat.say(reply) + goto(Idle) + return@onResponse + } + + if (!isAffirmative(userText)) { + val clarify = localized( + en = "Just to be sure – would you like to try one Norwegian trivia question?", + no = "Bare så jeg er sikker – vil du prøve et norsk quizspørsmål?", + lang = lang + ) + furhat.ask(clarify) + return@onResponse + } + + // User agreed → fetch a question from backend + val quiz = fetchQuizQuestion() + if (quiz == null) { + val errorMsg = localized( + en = "I couldn't load a trivia question right now. Let's talk about the documents instead.", + no = "Jeg klarte ikke å laste et quizspørsmål nå. La oss heller snakke om dokumentene.", + lang = lang + ) + furhat.say(errorMsg) + goto(Idle) + return@onResponse + } + + lastQuestion = quiz.question + lastAnswer = quiz.answer + + val intro = localized( + en = "Great! Here's your question:", + no = "Supert! Her kommer spørsmålet:", + lang = lang + ) + furhat.say(intro) + furhat.ask(quiz.question) + return@onResponse } - // Call backend with preferred_language - val answer = callDocumentAgent(userQuestion, preferredLanguage) + // SECOND TURN: user answers the quiz question + if (conversationCount == 1) { + conversationCount++ - val cleanAnswer = answer - .replace(Regex("https?://\\S+"), "") - .replace(Regex("\\s+"), " ") - .trim() + val correct = lastAnswer + val userAnswer = userText.trim() - previousQuestions.add(userQuestion) - previousAnswers.add(cleanAnswer) - lastQuestion = userQuestion - lastAnswer = cleanAnswer - - val currentTime = System.currentTimeMillis() - if (currentTime - lastGestureTime > 5000) { - when (userMood) { - "positive" -> furhat.gesture(Gestures.Smile, priority = 2) - "negative" -> furhat.gesture(Gestures.ExpressSad, priority = 2) - else -> furhat.gesture(Gestures.Nod, priority = 2) + val isCorrect = userAnswer.equals(correct, ignoreCase = true) + + if (isCorrect) { + val msg = localized( + en = "Oh, that was correct! The answer is indeed: $correct.", + no = "Det var riktig! Svaret er: $correct.", + lang = lang + ) + furhat.say(msg) + } else { + val msg = localized( + en = "Nice try, but that wasn't quite right. The correct answer is: $correct.", + no = "Godt forsøkt, men det var ikke helt riktig. Det rette svaret er: $correct.", + lang = lang + ) + furhat.say(msg) } - lastGestureTime = currentTime + + val follow = localized( + en = "Thanks for playing! If you want to explore the NorwAI documents, just ask me a question.", + no = "Takk for at du var med! Hvis du vil utforske NorwAI-dokumentene, er det bare å stille et spørsmål.", + lang = lang + ) + furhat.say(follow) + + goto(Idle) + return@onResponse } + } + + onNoResponse { + val lang = currentConversationLanguage() + furhat.gesture(Gestures.ExpressSad) + furhat.ask( + localized( + en = "I didn't catch that. Could you please repeat your question?", + no = "Jeg oppfattet ikke det. Kan du gjenta spørsmålet?", + lang = lang + ) + ) + reentry() + } +} - furhat.say(cleanAnswer) +/** + * Quiz state driven by QA pairs loaded on the backend. + * + * Flow: + * - Robot gives a short intro. + * - Backend provides a random question–answer pair from qa_pairs.json. + * - Robot asks the question and waits for the user(s) to answer. + * - Robot tells the user if the answer was correct and reveals the correct answer. + * - Robot offers another question. + */ +val QuizFromQaPairs: State = state(parent = Parent) { + + var currentQuestion: String = "" + var currentAnswer: String = "" + var expectingAnotherQuestionChoice: Boolean = false + var awaitingStartConfirmation: Boolean = true + var awaitingNameCapture: Boolean = false + + onEntry { + awaitingStartConfirmation = true + expectingAnotherQuestionChoice = false + currentQuestion = "" + currentAnswer = "" + awaitingNameCapture = false + + val lang = currentConversationLanguage() + furhat.gesture(Gestures.Smile) + furhat.ask( + localized( + en = "I'm glad you're here. Would you like to try a trivia question?", + no = "Så hyggelig at du er her. Har du lyst til å prøve et quizspørsmål?", + lang = lang + ) + ) + } + + onResponse { + val lang = currentConversationLanguage() + furhat.gesture(Gestures.Smile) - val client = users.current.perceptionClient val profile = UserState.currentProfile + val userId = profile?.id ?: UserState.getOrCreateTempId() + val stats = loadTriviaStats(userId) + stats?.let { + furhat.say(statsFinalLine(it, lang)) + } - val langCode = when (detectedLanguage) { - AppLanguage.EN -> "en" - AppLanguage.NO -> "no" + furhat.say( + localized( + en = "Thank you for playing quiz with me. Goodbye!", + no = "Takk for at du spilte quiz med meg. Ha det bra!", + lang = lang + ) + ) + if (profile?.name.isNullOrBlank()) { + awaitingNameCapture = true + furhat.ask(namePrompt(lang)) + } else { + goto(Idle) } + } - client?.sendTurn( - userId = profile?.id, - language = langCode, - userText = userQuestion, - robotText = cleanAnswer + onResponse { + if (!awaitingNameCapture) { + raise(it) + return@onResponse + } + + val lang = currentConversationLanguage() + val providedName = it.intent.name?.toText()?.trim() + if (providedName.isNullOrBlank()) { + furhat.ask( + localized( + en = "I didn't quite catch that name. What should I call you?", + no = "Jeg fikk ikke helt med meg navnet. Hva skal jeg kalle deg?", + lang = lang + ) + ) + return@onResponse + } + + val profile = UserState.currentProfile + profile?.name = providedName + val userId = profile?.id ?: UserState.getOrCreateTempId() + users.current.perceptionClient?.sendNameUpdate( + userId = userId, + name = providedName ) - // Localized follow-ups - val followUpPrompt = when { - conversationCount == 1 -> when (userMood) { - "positive" -> localized( - en = "What would you like to know more about?", - no = "Hva vil du vite mer om?", - lang = detectedLanguage + awaitingNameCapture = false + furhat.say( + localized( + en = "Great, I'll remember you as $providedName and keep your quiz stats under that name.", + no = "Flott, jeg skal huske deg som $providedName og lagre quizen din under det navnet.", + lang = lang + ) + ) + goto(Idle) + } + + onResponse { + val userAnswer = it.text.trim() + var lang = currentConversationLanguage() + + val explicitRequest = explicitLanguageRequest(userAnswer) + if (explicitRequest != null) { + if (lang != explicitRequest) { + setAppLanguage(explicitRequest) + lang = explicitRequest + LanguageManager.pinned = explicitRequest + } + val ack = languageSwitchAcknowledgement(lang) + val followUp = when { + awaitingStartConfirmation -> localized( + en = "Would you like to try a trivia question?", + no = "Har du lyst til å prøve et quizspørsmål?", + lang = lang ) - "negative" -> localized( - en = "Would you like me to explain that differently?", - no = "Vil du at jeg skal forklare det på en annen måte?", - lang = detectedLanguage + expectingAnotherQuestionChoice -> localized( + en = "Would you like another question, or should we stop here?", + no = "Vil du ha et nytt spørsmål, eller skal vi stoppe her?", + lang = lang + ) + currentQuestion.isNotBlank() -> localized( + en = "Here is the current question again: $currentQuestion", + no = "Her er spørsmålet igjen: $currentQuestion", + lang = lang ) else -> localized( - en = "What interests you most about that?", - no = "Hva synes du er mest interessant med det?", - lang = detectedLanguage + en = "How would you like to continue?", + no = "Hvordan ønsker du å fortsette?", + lang = lang ) } + furhat.ask("$ack $followUp") + return@onResponse + } - conversationCount == 2 -> when (userMood) { - "positive" -> localized( - en = "Want to explore that further?", - no = "Vil du utforske det videre?", - lang = detectedLanguage + // Handle "do you remember me?" and "what's my score?" at any time. + val profile = UserState.currentProfile + val userId = profile?.id ?: UserState.getOrCreateTempId() + if (isRememberMeQuery(userAnswer)) { + val knownName = profile?.name?.takeIf { it.isNotBlank() } + if (knownName != null) { + val namePart = localized( + en = "Yes, $knownName, I remember you.", + no = "Ja, $knownName, jeg husker deg.", + lang = lang ) - "negative" -> localized( - en = "Would you like me to clarify anything?", - no = "Vil du at jeg skal forklare noe nærmere?", - lang = detectedLanguage + furhat.say(namePart) + val stats = loadTriviaStats(userId) + if (stats != null && stats.totalQuestions > 0) { + furhat.say(statsProgressLine(stats, lang)) + } + val follow = if (awaitingStartConfirmation) { + localized( + en = "Would you like to try a trivia question now?", + no = "Har du lyst til å prøve et quizspørsmål nå?", + lang = lang + ) + } else if (currentQuestion.isNotBlank()) { + localized( + en = "Here is the current question again: $currentQuestion", + no = "Her er spørsmålet igjen: $currentQuestion", + lang = lang + ) + } else { + localized( + en = "How would you like to continue?", + no = "Hvordan vil du fortsette?", + lang = lang + ) + } + furhat.ask(follow) + } else { + furhat.say( + localized( + en = "Yes, I recognize you, but I don’t have your name yet.", + no = "Ja, jeg kjenner deg igjen, men jeg har ikke navnet ditt ennå.", + lang = lang + ) ) - else -> localized( - en = "What would you like to know more about?", - no = "Hva vil du vite mer om?", - lang = detectedLanguage + awaitingNameCapture = true + furhat.ask(namePrompt(lang)) + } + return@onResponse + } + + if (isScoreQuery(userAnswer)) { + val stats = loadTriviaStats(userId) + if (stats != null && stats.totalQuestions > 0) { + furhat.say(statsProgressLine(stats, lang)) + } else { + furhat.say( + localized( + en = "I haven't recorded any quiz answers for you yet.", + no = "Jeg har ikke registrert noen quizsvar for deg ennå.", + lang = lang + ) + ) + } + val follow = if (awaitingStartConfirmation) { + localized( + en = "Want to start with a trivia question?", + no = "Vil du starte med et quizspørsmål?", + lang = lang + ) + } else if (currentQuestion.isNotBlank()) { + localized( + en = "Here's the current question again: $currentQuestion", + no = "Her er spørsmålet igjen: $currentQuestion", + lang = lang + ) + } else { + localized( + en = "Should we continue with another question?", + no = "Skal vi fortsette med et nytt spørsmål?", + lang = lang ) } + furhat.ask(follow) + return@onResponse + } - else -> { - try { - val engagePrompt = callEngageUser(documentName, cleanAnswer) - if (engagePrompt.isNotEmpty()) { - engagePrompt - } else { - when (userMood) { - "positive" -> localized( - en = "What would you like to explore next?", - no = "Hva vil du utforske videre?", - lang = detectedLanguage - ) - "negative" -> localized( - en = "Would you like me to explain something else?", - no = "Vil du at jeg skal forklare noe annet?", - lang = detectedLanguage - ) - else -> localized( - en = "What interests you most?", - no = "Hva synes du er mest interessant?", - lang = detectedLanguage - ) - } - } - } catch (e: Exception) { - when (userMood) { - "positive" -> localized( - en = "What would you like to explore next?", - no = "Hva vil du utforske videre?", - lang = detectedLanguage + if (awaitingNameCapture) { + val cleaned = extractNameFromUtterance(userAnswer) + + if (!cleaned.isNullOrBlank()) { + val profile = UserState.currentProfile + profile?.name = cleaned + users.current.perceptionClient?.sendNameUpdate( + userId = profile?.id, + name = cleaned + ) + + awaitingNameCapture = false + furhat.say( + localized( + en = "Perfect, I'll remember you as $cleaned and keep your quiz stats linked to that name.", + no = "Supert, jeg skal huske deg som $cleaned og knytte quizstatistikken til det navnet.", + lang = lang + ) + ) + goto(Idle) + } else { + furhat.ask(namePrompt(lang)) + } + return@onResponse + } + + val detectedLanguage = LangDetect.detect(userAnswer) + // Honor pinned language if user explicitly set one; otherwise allow detect to switch on strong signals only. + val targetLang = LanguageManager.pinned ?: detectedLanguage + if (targetLang != lang) { + setAppLanguage(detectedLanguage) + lang = targetLang + } + + val preferredLanguage = when (lang) { + AppLanguage.EN -> "English" + AppLanguage.NO -> "Norwegian" + } + + if (awaitingStartConfirmation) { + when { + isNegative(userAnswer) -> { + furhat.say( + localized( + en = "No problem, we can do the quiz another time.", + no = "Ikke noe problem, vi kan ta quizen en annen gang.", + lang = lang ) - "negative" -> localized( - en = "Would you like me to explain something else?", - no = "Vil du at jeg skal forklare noe annet?", - lang = detectedLanguage + ) + goto(Idle) + return@onResponse + } + + isAffirmative(userAnswer) -> { + awaitingStartConfirmation = false + val (quiz, utterance) = prepareTriviaQuestionUtterance(lang, preferredLanguage) + if (quiz == null) { + furhat.say(utterance) + goto(Idle) + return@onResponse + } + + currentQuestion = quiz.question + currentAnswer = quiz.answer + expectingAnotherQuestionChoice = false + furhat.ask(utterance) + return@onResponse + } + + else -> { + furhat.ask( + localized( + en = "Just to be sure – would you like to try a trivia question?", + no = "Bare så jeg er sikker – vil du prøve et quizspørsmål?", + lang = lang ) - else -> localized( - en = "What interests you most?", - no = "Hva synes du er mest interessant?", - lang = detectedLanguage + ) + return@onResponse + } + } + } + + if (expectingAnotherQuestionChoice) { + when { + isNegative(userAnswer) || + userAnswer.contains("thanks", ignoreCase = true) || + userAnswer.contains("thank you", ignoreCase = true) || + userAnswer.contains("done", ignoreCase = true) || + userAnswer.contains("stop", ignoreCase = true) -> { + furhat.say( + localized( + en = "Okay, we'll stop the quiz here. Thanks for playing!", + no = "Greit, vi avslutter quizen her. Takk for at du spilte!", + lang = lang ) + ) + val stats = loadTriviaStats(userId) + stats?.let { furhat.say(statsFinalLine(it, lang)) } + if (profile?.name.isNullOrBlank()) { + awaitingNameCapture = true + furhat.ask(namePrompt(lang)) + } else { + goto(Idle) } + return@onResponse } + + isAffirmative(userAnswer) -> { + val (quiz, utterance) = prepareTriviaQuestionUtterance(lang, preferredLanguage) + if (quiz == null) { + furhat.say(utterance) + goto(Idle) + return@onResponse + } + + currentQuestion = quiz.question + currentAnswer = quiz.answer + expectingAnotherQuestionChoice = false + furhat.ask(utterance) + return@onResponse + } + + else -> { + furhat.ask( + localized( + en = "Just say yes if you want another question, or no if you'd like to stop.", + no = "Si bare ja hvis du vil ha et nytt spørsmål, eller nei hvis du vil stoppe.", + lang = lang + ) + ) + return@onResponse + } + } + } + + if (currentQuestion.isBlank()) { + awaitingStartConfirmation = true + expectingAnotherQuestionChoice = false + furhat.ask( + localized( + en = "Let me ask the question first. Shall I start the quiz now?", + no = "La meg stille spørsmålet først. Skal jeg starte quizen nå?", + lang = lang + ) + ) + return@onResponse + } + + if (userAnswer.contains("thanks", ignoreCase = true) || + userAnswer.contains("thank you", ignoreCase = true) || + userAnswer.contains("done", ignoreCase = true) || + userAnswer.contains("stop", ignoreCase = true) + ) { + furhat.say( + localized( + en = "No worries, we can stop the quiz here. Thanks for playing!", + no = "Ikke noe problem, vi stopper quizen her. Takk for at du spilte!", + lang = lang + ) + ) + val stats = loadTriviaStats(userId) + stats?.let { furhat.say(statsFinalLine(it, lang)) } + if (profile?.name.isNullOrBlank()) { + awaitingNameCapture = true + furhat.ask(namePrompt(lang)) + } else { + goto(Idle) } + return@onResponse } - when (userMood) { - "positive" -> furhat.gesture(Gestures.Smile, priority = 2) - "negative" -> furhat.gesture(Gestures.ExpressSad, priority = 2) - else -> furhat.gesture(Gestures.Nod, priority = 2) + // Basic correctness check for gesture only: case-insensitive full or partial match. + fun norm(s: String): String = s.lowercase() + .replace(Regex("[^\\p{L}\\p{N}\\s]"), " ") + .replace(Regex("\\s+"), " ") + .trim() + val normUser = norm(userAnswer) + val normCorrect = norm(currentAnswer) + val userTokens = normUser.split(" ").filter { it.isNotBlank() } + val correctTokens = normCorrect.split(" ").filter { it.isNotBlank() } + val tokenOverlap = if (userTokens.isNotEmpty() && correctTokens.isNotEmpty()) { + userTokens.intersect(correctTokens.toSet()).size.toDouble() / correctTokens.size.toDouble() + } else 0.0 + val correct = + normUser == normCorrect || + normCorrect.contains(normUser) || + normUser.contains(normCorrect) || + tokenOverlap >= 0.4 + + if (correct) { + furhat.gesture(Gestures.Smile, priority = 2) + } else { + furhat.gesture(Gestures.ExpressSad, priority = 2) + } + + recordTriviaResult(userId, correct) + + val llmUtterance = callTriviaTurn( + phase = "feedback", + question = currentQuestion, + answer = currentAnswer, + userAnswer = userAnswer, + preferredLanguage = preferredLanguage + ) + + expectingAnotherQuestionChoice = true + + val toAsk = if (llmUtterance.isNotBlank()) { + llmUtterance + } else { + localized( + en = "The correct answer is: $currentAnswer. Would you like another question?", + no = "Det riktige svaret er: $currentAnswer. Vil du ha et nytt spørsmål?", + lang = lang + ) } - furhat.ask(followUpPrompt) + // Use ask() so that the user can immediately say yes/no to another question. + furhat.ask(toAsk) } onNoResponse { val lang = currentConversationLanguage() - furhat.gesture(Gestures.ExpressSad) + furhat.gesture(Gestures.Nod) furhat.ask( localized( - en = "I didn't catch that. Could you please repeat your question?", - no = "Jeg oppfattet ikke det. Kan du gjenta spørsmålet?", + en = "I didn't quite catch your answer. Could you repeat it?", + no = "Jeg oppfattet ikke helt svaret ditt. Kan du gjenta det?", lang = lang ) ) @@ -337,7 +1119,7 @@ fun documentInfoQnA(documentName: String): State = state(parent = Parent) { // Helper function to call the /ask endpoint (now with preferred_language). private fun callDocumentAgent(question: String, preferredLanguage: String): String { - val baseUrl = AWS_BACKEND_URL + val baseUrl = BACKEND_URL val client = OkHttpClient.Builder() .connectTimeout(60, TimeUnit.SECONDS) .readTimeout(60, TimeUnit.SECONDS) @@ -407,7 +1189,7 @@ private fun callDocumentAgent(question: String, preferredLanguage: String): Stri // Helper function to call the /engage endpoint (unchanged logic, language handled by backend). private fun callEngageUser(documentName: String, answer: String): String { - val baseUrl = AWS_BACKEND_URL // or switch based on config if needed + val baseUrl = BACKEND_URL // automatically resolves to local if available val client = OkHttpClient.Builder() .connectTimeout(30, TimeUnit.SECONDS) .readTimeout(30, TimeUnit.SECONDS) diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt index baa61a7..98b71b3 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/documentWaitingToStart.kt @@ -2,7 +2,11 @@ package furhatos.app.templateadvancedskill.flow.main import furhatos.flow.kotlin.* import furhatos.app.templateadvancedskill.flow.Parent -import furhatos.app.templateadvancedskill.params.AWS_BACKEND_URL +import furhatos.app.templateadvancedskill.params.BACKEND_URL +import furhatos.app.templateadvancedskill.perception.UserMemory +import furhatos.app.templateadvancedskill.perception.UserState +import furhatos.app.templateadvancedskill.flow.main.restartPerceptionClient +import furhatos.flow.kotlin.users import okhttp3.MediaType.Companion.toMediaType import okhttp3.OkHttpClient import okhttp3.Request @@ -13,39 +17,25 @@ import java.net.ConnectException import java.net.SocketTimeoutException import java.util.concurrent.TimeUnit +/** + * Initial handoff state after Greeting. + * Marks returning users and jumps straight into the trivia flow. + */ val DocumentWaitingToStart: State = state(parent = Parent) { onEntry { - furhat.ask( - "I'm ready to assist with your document questions. " + - "Could you please tell me what subject you're interested in, " + - "or simply the name of the document?" - ) - } - - // When any response is detected, transition to document-specific Q&A. - onResponse { - val userInput = it.text.trim() + val profile = UserState.currentProfile - // Call the API endpoint /get_docs to perform document retrieval/classification. - val bestDocName = callGetDocs(userInput) - - if (bestDocName.isNullOrBlank()) { - furhat.say( - "I'm having trouble finding a matching document right now. " + - "Could you try rephrasing the title or subject?" - ) - reentry() - } else { - goto(documentInfoQnA(bestDocName)) + // Mark the user as seen so future sessions in this process + // can be treated as returning. + if (profile != null) { + UserMemory.markSeen(profile.id) } - } - onNoResponse { - furhat.ask( - "I didn't catch that. Please tell me the subject or the name of the document you're interested in." - ) - reentry() + // Ensure perception WS is running for every conversation entry. + restartPerceptionClient(furhat, users.current) + + goto(QuizFromQaPairs) } } @@ -54,7 +44,7 @@ val DocumentWaitingToStart: State = state(parent = Parent) { * returns the best matching document name (as provided by the backend). */ fun callGetDocs(userInput: String): String? { - val url = "$AWS_BACKEND_URL/get_docs" + val url = "$BACKEND_URL/get_docs" val client = OkHttpClient.Builder() .connectTimeout(30, TimeUnit.SECONDS) @@ -82,12 +72,14 @@ fun callGetDocs(userInput: String): String? { json.getString("response") } } catch (e: ConnectException) { - // Fallback value – you may want to handle this more gracefully in your flow - "I'm sorry, I cannot connect to the server right now. Please try again later." + println("callGetDocs connection error: ${e.message}") + null } catch (e: SocketTimeoutException) { - "I'm sorry, the server is taking too long to respond. Please try again later." + println("callGetDocs timeout: ${e.message}") + null } catch (e: Exception) { - "I apologize, but I encountered an error processing your request." + println("callGetDocs general error: ${e.message}") + null } } diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt index 9285442..53cce85 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/flow/main/greeting.kt @@ -1,8 +1,8 @@ package furhatos.app.templateadvancedskill.flow.main import furhatos.app.templateadvancedskill.language.AppLanguage -import furhatos.app.templateadvancedskill.language.I18n import furhatos.app.templateadvancedskill.language.setAppLanguage +import furhatos.app.templateadvancedskill.params.BACKEND_URL import furhatos.app.templateadvancedskill.perception.PerceptionClient import furhatos.flow.kotlin.* import furhatos.flow.kotlin.Furhat @@ -10,6 +10,7 @@ import furhatos.flow.kotlin.furhat.audiofeed.AudioFeedListener import furhatos.flow.kotlin.furhat.camerafeed.CameraFeedListener import furhatos.flow.kotlin.furhat.camerafeed.FaceData import furhatos.flow.kotlin.users +import furhatos.records.User import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.SupervisorJob @@ -41,6 +42,7 @@ private object PerceptionCameraListener : CameraFeedListener { override fun cameraImage(image: BufferedImage, imageData: ByteArray, faces: List) { val client = PerceptionStreaming.activeClient ?: return val payload = byteArrayOf(0x01) + imageData + println("PerceptionCameraListener sending frame bytes=${payload.size}") client.sendBinary(payload) } } @@ -49,40 +51,46 @@ private object PerceptionAudioListener : AudioFeedListener { override fun audioData(data: ByteArray) { val client = PerceptionStreaming.activeClient ?: return val payload = byteArrayOf(0x02) + data + println("PerceptionAudioListener sending audio bytes=${payload.size}") client.sendBinary(payload) } } -private const val PERCEPTION_WS_URL = "ws://localhost:8000/ws/perception" +private val PERCEPTION_WS_URL: String = + BACKEND_URL.replaceFirst("http", "ws") + "/ws/perception" private val perceptionScope = CoroutineScope(SupervisorJob() + Dispatchers.IO) + /** + * Force-start (or restart) perception WS streaming for the current user/session. + * Always sets a fresh sessionId and ensures listeners are attached. + * Uses the user-scoped perceptionClient property to keep association stable. + */ + fun restartPerceptionClient(furhat: Furhat, currentUser: User) { + // Close any existing client + currentUser.perceptionClient?.close() + PerceptionStreaming.stop() + + val sessionId = "furhat-session-" + System.currentTimeMillis() + val client = PerceptionClient( + backendWsUrl = PERCEPTION_WS_URL, + sessionId = sessionId, + robotId = "furhat-ntnu-01" + ) + + currentUser.perceptionClient = client + client.connect(perceptionScope) + PerceptionStreaming.start(client) + PerceptionStreaming.ensureListeners(furhat) + println("Perception WS connect initiated to $PERCEPTION_WS_URL (session=$sessionId)") + } + val Greeting: State = state { onEntry { - val sessionId = "furhat-session-" + System.currentTimeMillis() - - val client = PerceptionClient( - backendWsUrl = PERCEPTION_WS_URL, - sessionId = sessionId, - robotId = "furhat-ntnu-01" - ) - - users.current.perceptionClient = client - client.connect(perceptionScope) - PerceptionStreaming.start(client) - PerceptionStreaming.ensureListeners(furhat) + restartPerceptionClient(furhat, users.current) setAppLanguage(AppLanguage.EN) - furhat.say(I18n.t("intro_bilingual")) - - val docName = "this topic" - furhat.say(I18n.t("greet", docName)) goto(DocumentWaitingToStart) } - - onExit { - users.current.perceptionClient?.close() - PerceptionStreaming.stop() - } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt index c37ac36..1d9e8f7 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LangDetect.kt @@ -1,22 +1,95 @@ package furhatos.app.templateadvancedskill.language +/** + * Lightweight heuristic EN/NO detector tuned for short ASR snippets. + * Avoids extra dependencies; relies on diacritics, stopwords, suffixes, + * and simple n-gram hints to bias toward Norwegian when signals are clear. + */ object LangDetect { + private var lastStable: AppLanguage = AppLanguage.EN + + /** + * Heuristic detector for English vs Norwegian. + * + * Design choices (no external packages to keep footprint small): + * - Immediate NO if Norwegian diacritics (æ/ø/å) are present. + * - Token scoring against compact stopword lists for EN/NO. + * - Norwegian boosts via suffix patterns, common bigrams, and “kj/skj” chars. + * - Margin-based decision to avoid flip-flopping; otherwise reuse lastStable. + * + * Tuned for short ASR hypotheses where statistical detectors often overfit to English. + */ fun detect(text: String): AppLanguage { - val t = text.lowercase() + val normalized = text.lowercase() + .replace(Regex("[^\\p{L}\\p{M}\\s]"), " ") + .replace(Regex("\\s+"), " ") + .trim() + + if (normalized.isBlank()) return lastStable - // Check for Norwegian special characters - val norwegianChars = listOf('æ', 'ø', 'å') - if (norwegianChars.any { it in t }) { + // Fast path: Norwegian diacritics + if (normalized.any { it == 'æ' || it == 'ø' || it == 'å' }) { + lastStable = AppLanguage.NO return AppLanguage.NO } - // Common Norwegian words - val norwegianWords = listOf("ikke", "hvordan", "hva", "hvorfor", "forklar", "beskriv", "omtrent") - if (norwegianWords.any { t.contains(it) }) { + val tokens = normalized.split(" ").filter { it.isNotBlank() } + + // Minimal stopword sets chosen for low false positives on short utterances + val noStops = setOf( + "og", "i", "på", "ikke", "hva", "hvorfor", "hvordan", "hvor", "hvem", "hvilken", "hvilket", + "den", "det", "dette", "der", "til", "skal", "kan", "vil", "bare", "litt", "omtrent", + "snakk", "norsk", "engelsk", "quiz", "spørsmål", "svaret", "riktig", "feil", + "nei", "ja", "gjerne", "takk", "vær", "vær så snill", "vennligst", "flott", "supert" + ) + val enStops = setOf( + "and", "in", "on", "the", "a", "of", "why", "how", "what", "where", "who", + "this", "that", "there", "to", "will", "can", "just", "little", "about", + "speak", "english", "norwegian", "quiz", "question", "answer", "right", "wrong" + ) + + // Explicit language keywords override weak signals + if (tokens.any { it == "norsk" || it == "norwegian" || it == "på" && tokens.contains("norsk") }) { + lastStable = AppLanguage.NO return AppLanguage.NO } + if (tokens.any { it == "english" || it == "engelsk" }) { + lastStable = AppLanguage.EN + return AppLanguage.EN + } + + var noScore = 0 + var enScore = 0 + + for (t in tokens) { + if (t in noStops) noScore++ + if (t in enStops) enScore++ + } - // Default: English - return AppLanguage.EN + // Boost Norwegian if any token ends with common Norwegian suffixes + val noSuffixes = listOf("en", "et", "ene", "ende", "ene", "het", "heten", "lig", "lige") + if (tokens.any { t -> noSuffixes.any { suf -> t.length > suf.length && t.endsWith(suf) } }) { + noScore += 2 + } + + // Bigram hints (common Norwegian question openers) + val textNoPunct = normalized + if (textNoPunct.startsWith("hva er") || textNoPunct.startsWith("hvordan") || textNoPunct.startsWith("hvorfor") || textNoPunct.startsWith("hvor er")) { + noScore += 2 + } + + // Character n-gram hints (simple heuristic) + if (normalized.contains("kj") || normalized.contains("skj")) { + noScore += 1 + } + + // Decide by margin; if close, use last stable to prevent flip-flop. + val decision = when { + noScore >= enScore + 2 -> AppLanguage.NO + enScore >= noScore + 2 -> AppLanguage.EN + else -> lastStable + } + lastStable = decision + return decision } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt index 9a81a5f..11b3c3d 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/language/LanguageManager.kt @@ -6,30 +6,38 @@ import furhatos.util.Language /** * Tracks the current language of the robot (English or Norwegian). + * `pinned` is set when the user explicitly asks for a language and prevents + * auto-detection overrides until cleared. */ object LanguageManager { var current: AppLanguage = AppLanguage.EN + var pinned: AppLanguage? = null } /** - * Extension function that can be called from any Furhat state (because "this" is a FlowControlRunner). + * Extension to set Furhat ASR + TTS for the given language. * - * Sets Furhat's: - * - ASR language - * - TTS voice + * Uses built-in Polly voices: + * - EN: Kendra-Neural (fallback to default EN if not installed) + * - NO: Ida-Neural (fallback to default NO if not installed) * - * Usage inside a state: - * setAppLanguage(AppLanguage.NO) + * Called from any state (FlowControlRunner receiver). */ fun FlowControlRunner.setAppLanguage(lang: AppLanguage) { if (LanguageManager.current == lang) return // No change needed LanguageManager.current = lang when (lang) { - AppLanguage.EN -> - furhat.setVoice(Language.ENGLISH_US, "Matthew", true) + AppLanguage.EN -> { + furhat.setInputLanguage(Language.ENGLISH_US) + runCatching { furhat.setVoice(language = Language.ENGLISH_US, name = "Kendra-Neural", setInputLanguage = true) } + .recoverCatching { furhat.setVoice(language = Language.ENGLISH_US, setInputLanguage = true) } + } - AppLanguage.NO -> - furhat.setVoice(Language.NORWEGIAN, "Hans", true) + AppLanguage.NO -> { + furhat.setInputLanguage(Language.NORWEGIAN) + runCatching { furhat.setVoice(language = Language.NORWEGIAN, name = "Ida-Neural", setInputLanguage = true) } + .recoverCatching { furhat.setVoice(language = Language.NORWEGIAN, setInputLanguage = true) } + } } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt index 734deee..038e636 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/nlu/intents.kt @@ -12,85 +12,142 @@ import furhatos.app.templateadvancedskill.language.AppLanguage class NiceToMeetYouIntent : Intent() { override fun getExamples(lang: Language): List { - return listOf( - "glad to meet you", - "a pleasure to meet you", - "nice to see you", - "great to meet you", - "happy to see you", - "very nice to finally meet you", - "fun to meet up with you" - ) + return when (lang) { + Language.NORWEGIAN -> listOf( + "hyggelig å møte deg", + "godt å se deg", + "hyggelig å treffe deg", + "så kjekt å møte deg", + "hyggelig å se deg", + "fint å endelig møte deg", + "kjekt å treffes" + ) + else -> listOf( + "glad to meet you", + "a pleasure to meet you", + "nice to see you", + "great to meet you", + "happy to see you", + "very nice to finally meet you", + "fun to meet up with you" + ) + } } } class HowAreYouIntent : Intent() { override fun getExamples(lang: Language): List { - return listOf( - "how are you", - "how are you doing today", - "what's up", - "how are things with you", - "how's it going?", - "how are you feeling", - "how's life", - "what's going on with you" - ) + return when (lang) { + Language.NORWEGIAN -> listOf( + "hvordan går det", + "hvordan har du det", + "hvordan går det i dag", + "går det bra", + "hvordan står det til", + "hva skjer", + "hvordan føler du deg" + ) + else -> listOf( + "how are you", + "how are you doing today", + "what's up", + "how are things with you", + "how's it going?", + "how are you feeling", + "how's life", + "what's going on with you" + ) + } } } class HelpIntent : Intent() { override fun getExamples(lang: Language): List { - return listOf( - "I need help", - "help me please", - "can someone help me", - "I need assistance" - ) + return when (lang) { + Language.NORWEGIAN -> listOf( + "jeg trenger hjelp", + "kan du hjelpe meg", + "hjelp meg", + "kan noen hjelpe", + "jeg trenger assistanse" + ) + else -> listOf( + "I need help", + "help me please", + "can someone help me", + "I need assistance" + ) + } } } class WhatIsThisIntent : Intent() { override fun getExamples(lang: Language): List { - return listOf( - "What is this", - "what am I supposed to say", - "what should I say", - "I don't know what to do", - "what am I supposed to do now", - "should I say something", - "what's going on", - "what is happening here", - "can someone tell me what is going on" - ) + return when (lang) { + Language.NORWEGIAN -> listOf( + "hva er dette", + "hva skal jeg si", + "hva burde jeg si", + "jeg vet ikke hva jeg skal gjøre", + "hva gjør jeg nå", + "burde jeg si noe", + "hva skjer", + "hva er det som skjer her", + "kan noen si meg hva som foregår" + ) + else -> listOf( + "What is this", + "what am I supposed to say", + "what should I say", + "I don't know what to do", + "what am I supposed to do now", + "should I say something", + "what's going on", + "what is happening here", + "can someone tell me what is going on" + ) + } } } class UncertainResponseIntent : Intent() { override fun getExamples(lang: Language): List { - return listOf( - "I don't know", - "I'm not sure", - "what do you think", - "what's your opinion", - "I see it being", - "I think it could be", - "maybe", - "possibly", - "perhaps", - "I'm not certain", - "I'm uncertain", - "I'm not sure about that", - "I'm not sure what to think", - "I'm not sure what to say", - "I'm not sure what to do", - "I'm not sure what to make of that", - "I'm not sure what to make of this", - "I'm not sure what to make of it", - "I'm not sure what to make of that", - "I'm not sure what to make of this", - "I'm not sure what to make of it" - ) + return when (lang) { + Language.NORWEGIAN -> listOf( + "jeg vet ikke", + "jeg er ikke sikker", + "hva tenker du", + "hva er din mening", + "kanskje", + "muligens", + "det kan være", + "jeg er usikker", + "jeg vet ikke helt", + "ikke helt sikker på det", + "jeg er ikke sikker på hva jeg skal si", + "jeg er ikke sikker på hva jeg skal gjøre" + ) + else -> listOf( + "I don't know", + "I'm not sure", + "what do you think", + "what's your opinion", + "I see it being", + "I think it could be", + "maybe", + "possibly", + "perhaps", + "I'm not certain", + "I'm uncertain", + "I'm not sure about that", + "I'm not sure what to think", + "I'm not sure what to say", + "I'm not sure what to do", + "I'm not sure what to make of that", + "I'm not sure what to make of this", + "I'm not sure what to make of it" + ) + } } } diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/parms.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/parms.kt index 99fe881..bbcbe21 100755 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/parms.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/parms.kt @@ -1,8 +1,64 @@ package furhatos.app.templateadvancedskill + +import java.net.InetSocketAddress +import java.net.URI +import java.net.Socket +/** + * Central place for backend/robot URLs. + * + * BACKEND_URL defaults to the laptop IP (override via env BACKEND_URL). + * ROBOT_IP_ADDRESS tracks the current robot address for reachability/debug. + */ object params { - val LOCAL_BACKEND_URL = "http://localhost:8000" - val AWS_BACKEND_URL = "http://51.20.7.41:8000" - val ROBOT_IP_ADDRESS = "10.53.50.123" -// val BACKEND_IP_ADDRESS = System.getenv("BACKEND_IP_ADDRESS") ?: "13.49.238.142" //!!!!!NOTE: change this ip address each time you rerun + private const val LAPTOP_BACKEND_URL = "" + private const val CLOUD_BACKEND_URL = "http://51.20.7.41:8000" + private const val LOCAL_BACKEND_URL = "http://localhost:8000" + + private val envOverride = System.getenv("BACKEND_URL")?.takeIf { it.isNotBlank() } + + /** + * Default to the developer laptop IP unless explicitly overridden. + * This ensures the robot never falls back to localhost, which it cannot reach. + */ + val BACKEND_URL: String by lazy { + val resolved = envOverride ?: LAPTOP_BACKEND_URL + println("[params] BACKEND_URL resolved to $resolved") + resolved + } + + // Updated to the latest robot IP provided by the user. + val ROBOT_IP_ADDRESS = "" + + /** + * Kept around in case we want to re-enable automatic detection later. + */ + @Suppress("unused") + private fun determineBackendUrl(): String { + return when { + isHostReachable(LAPTOP_BACKEND_URL) -> LAPTOP_BACKEND_URL + isHostReachable(CLOUD_BACKEND_URL) -> CLOUD_BACKEND_URL + else -> LOCAL_BACKEND_URL + } + } + + private fun isHostReachable(targetUrl: String): Boolean { + return try { + val uri = URI(targetUrl) + val port = if (uri.port != -1) { + uri.port + } else { + when (uri.scheme?.lowercase()) { + "https" -> 443 + else -> 80 + } + } + Socket().use { socket -> + socket.connect(InetSocketAddress(uri.host, port), 1500) + true + } + } catch (_: Exception) { + false + } + } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt index dcc7647..451ae71 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/PerceptionClient.kt @@ -4,12 +4,20 @@ import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper import com.fasterxml.jackson.module.kotlin.readValue import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.delay import kotlinx.coroutines.launch +import kotlinx.coroutines.Job import okhttp3.* import okio.ByteString.Companion.toByteString /** * Client for the /ws/perception WebSocket endpoint. + * + * Responsibility: + * - Open and maintain the WS to the backend. + * - Stream binary camera/audio frames with a 1-byte prefix (0x01 video, 0x02 audio). + * - Send text payloads for turns and name updates. + * - Keep the socket alive with periodic pings and automatic reconnects. */ class PerceptionClient( private val backendWsUrl: String, // e.g. "ws://localhost:8000/ws/perception" @@ -22,12 +30,28 @@ class PerceptionClient( @Volatile private var webSocket: WebSocket? = null + @Volatile + private var reconnectAttempts: Int = 0 + private val maxReconnectAttempts = 5 + @Volatile + private var scopeRef: CoroutineScope? = null + @Volatile + private var keepaliveJob: Job? = null /** * Connect to the backend WebSocket in the given coroutine scope. * This should be called once per Furhat interaction session. */ fun connect(scope: CoroutineScope) { + scopeRef = scope + reconnectAttempts = 0 + startWebSocket(scope) + } + + /** + * Open the websocket and attach the listener for this client. + */ + private fun startWebSocket(scope: CoroutineScope) { val request = Request.Builder() .url(backendWsUrl) .build() @@ -36,7 +60,10 @@ class PerceptionClient( override fun onOpen(ws: WebSocket, response: Response) { webSocket = ws + println("Perception WS opened to $backendWsUrl (session=$sessionId)") sendHello() + reconnectAttempts = 0 + startKeepalive(scope) } override fun onMessage(ws: WebSocket, text: String) { @@ -46,10 +73,16 @@ class PerceptionClient( override fun onFailure(ws: WebSocket, t: Throwable, response: Response?) { // Log as needed; for now just print println("Perception WS failure: ${t.message}") + webSocket = null + stopKeepalive() + scheduleReconnect() } override fun onClosed(ws: WebSocket, code: Int, reason: String) { println("Perception WS closed: $code / $reason") + webSocket = null + stopKeepalive() + scheduleReconnect() } } @@ -58,6 +91,45 @@ class PerceptionClient( } } + /** + * Schedule a delayed reconnect with capped attempts. + */ + private fun scheduleReconnect() { + val scope = scopeRef ?: return + if (reconnectAttempts >= maxReconnectAttempts) { + println("Perception WS reconnect aborted after $reconnectAttempts attempts") + return + } + reconnectAttempts++ + println("Perception WS scheduling reconnect attempt $reconnectAttempts/$maxReconnectAttempts") + scope.launch(Dispatchers.IO) { + delay(2000) + startWebSocket(this) + } + } + + /** + * Begin periodic ping messages to keep the WS alive. + */ + private fun startKeepalive(scope: CoroutineScope) { + stopKeepalive() + keepaliveJob = scope.launch(Dispatchers.IO) { + while (webSocket != null) { + try { + webSocket?.send("""{"type":"ping","payload":{"session_id":"$sessionId","ts":${System.currentTimeMillis()}}}""") + } catch (e: Exception) { + println("Perception WS keepalive send error: ${e.message}") + } + delay(5000) + } + } + } + + private fun stopKeepalive() { + keepaliveJob?.cancel() + keepaliveJob = null + } + /** * Send "hello" message once connection opens. */ @@ -138,7 +210,14 @@ class PerceptionClient( fun sendBinary(payload: ByteArray) { try { - webSocket?.send(payload.toByteString()) + val socket = webSocket + if (socket == null) { + println("Perception WS binary send skipped: socket is null") + return + } + val type = if (payload.isNotEmpty()) payload[0].toInt() else -1 + println("Perception WS sending binary type=$type bytes=${payload.size}") + socket.send(payload.toByteString()) } catch (e: Exception) { println("Perception WS binary send error: ${e.message}") } diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserMemory.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserMemory.kt new file mode 100644 index 0000000..0ce1cb5 --- /dev/null +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserMemory.kt @@ -0,0 +1,27 @@ +package furhatos.app.templateadvancedskill.perception + +/** + * Simple in-memory helper for greeting users differently when + * they return to the robot. + * + * This does not replace the backend database; it just keeps track of + * which user IDs we have already greeted in this skill process. + */ +object UserMemory { + + private val seenUserIds = mutableSetOf() + + /** Returns true if this user ID has been seen before in this process. */ + fun hasSeenBefore(userId: String?): Boolean { + if (userId.isNullOrBlank()) return false + return seenUserIds.contains(userId) + } + + /** Mark this user ID as seen so future greetings can treat them as returning. */ + fun markSeen(userId: String?) { + if (userId.isNullOrBlank()) return + seenUserIds.add(userId) + } +} + + diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt index 1cdd73f..a1a44f2 100644 --- a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/perception/UserState.kt @@ -1,10 +1,18 @@ package furhatos.app.templateadvancedskill.perception +/** + * Holds the currently recognized user profile for the running skill instance. + * Uses a stable temp ID when no backend identity has been resolved yet so we + * can still persist trivia stats and name updates once known. + */ object UserState { @Volatile var currentProfile: UserProfile? = null + @Volatile + private var tempUserId: String? = null + /** * Merge an IdentityUpdatePayload into the local UserProfile. */ @@ -30,4 +38,17 @@ object UserState { existing.lastSeen = update.lastSeen } } + + /** + * Returns the current user id if known; otherwise returns a stable + * session-local temporary id to allow persistence (scores, stats) + * even when perception has not yet resolved a biometric identity. + */ + fun getOrCreateTempId(): String { + val existing = tempUserId + if (existing != null) return existing + val generated = "temp-" + java.util.UUID.randomUUID().toString() + tempUserId = generated + return generated + } } \ No newline at end of file diff --git a/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/trivia/TriviaStats.kt b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/trivia/TriviaStats.kt new file mode 100644 index 0000000..ebc08d4 --- /dev/null +++ b/furhat_skills/Conversation/src/main/kotlin/furhatos/app/templateadvancedskill/trivia/TriviaStats.kt @@ -0,0 +1,144 @@ +package furhatos.app.templateadvancedskill.trivia + +import furhatos.app.templateadvancedskill.params.BACKEND_URL +import okhttp3.MediaType.Companion.toMediaType +import okhttp3.OkHttpClient +import okhttp3.Request +import okhttp3.RequestBody.Companion.toRequestBody +import org.json.JSONObject +import java.time.Instant +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.TimeUnit + +/** + * Local view of a user's trivia statistics. + * Cached in-memory and synchronized with the backend via /memory/trivia. + */ +data class TriviaStats( + var totalQuestions: Int = 0, + var correctAnswers: Int = 0, + var lastUpdated: Instant = Instant.now(), +) + +/** + * In-memory cache keyed by userId to reduce backend calls. + */ +private object TriviaStatsCache { + private val statsByUser = ConcurrentHashMap() + + fun get(userId: String?): TriviaStats? = + userId?.let { statsByUser[it] } + + fun replace(userId: String, stats: TriviaStats) { + statsByUser[userId] = stats + } + + fun record(userId: String, correct: Boolean): TriviaStats { + val stats = statsByUser.getOrPut(userId) { TriviaStats() } + stats.totalQuestions += 1 + if (correct) { + stats.correctAnswers += 1 + } + stats.lastUpdated = Instant.now() + return stats + } +} + +/** + * Thin HTTP client for trivia stats persistence. + */ +private object TriviaStatsApi { + private val client: OkHttpClient by lazy { + OkHttpClient.Builder() + .connectTimeout(10, TimeUnit.SECONDS) + .readTimeout(10, TimeUnit.SECONDS) + .writeTimeout(10, TimeUnit.SECONDS) + .build() + } + + fun fetch(userId: String): TriviaStats? { + return try { + val request = Request.Builder() + .url("${BACKEND_URL}/memory/trivia/$userId") + .get() + .build() + + client.newCall(request).execute().use { response -> + if (!response.isSuccessful) return null + val payload = response.body?.string()?.takeIf { it.isNotBlank() } ?: return null + parseStats(payload) + } + } catch (_: Exception) { + null + } + } + + fun record(userId: String, correct: Boolean): TriviaStats? { + return try { + val json = JSONObject() + .put("user_id", userId) + .put("correct", correct) + + val request = Request.Builder() + .url("${BACKEND_URL}/memory/trivia") + .post( + json.toString() + .toRequestBody("application/json; charset=utf-8".toMediaType()) + ) + .build() + + client.newCall(request).execute().use { response -> + if (!response.isSuccessful) return null + val payload = response.body?.string()?.takeIf { it.isNotBlank() } ?: return null + parseStats(payload) + } + } catch (_: Exception) { + null + } + } + + private fun parseStats(jsonPayload: String): TriviaStats? { + return try { + val json = JSONObject(jsonPayload) + TriviaStats( + totalQuestions = json.optInt("total_questions", 0), + correctAnswers = json.optInt("correct_answers", 0), + lastUpdated = Instant.now(), + ) + } catch (_: Exception) { + null + } + } +} + +/** + * Load trivia stats for the given user, using cache first then backend. + */ +fun loadTriviaStats(userId: String?): TriviaStats? { + if (userId.isNullOrBlank()) return null + val cached = TriviaStatsCache.get(userId) + if (cached != null) { + return cached + } + val fetched = TriviaStatsApi.fetch(userId) + if (fetched != null) { + TriviaStatsCache.replace(userId, fetched) + } + return fetched +} + +/** + * Record a trivia result for the given user. + * Always updates local cache; attempts to sync to backend and returns the freshest stats. + */ +fun recordTriviaResult(userId: String?, correct: Boolean): TriviaStats? { + if (userId.isNullOrBlank()) return null + val local = TriviaStatsCache.record(userId, correct) + val synced = TriviaStatsApi.record(userId, correct) + if (synced != null) { + TriviaStatsCache.replace(userId, synced) + return synced + } + return local +} + diff --git a/middleware/main.py b/middleware/main.py index 0588eee..bfd6f43 100755 --- a/middleware/main.py +++ b/middleware/main.py @@ -31,21 +31,25 @@ retrieving relevant document context, and generating responses using an LLM. """ -from fastapi import FastAPI, HTTPException, WebSocket +from fastapi import FastAPI, HTTPException, WebSocket, Depends from pydantic import BaseModel import uvicorn import asyncio import logging from datetime import datetime +from sqlalchemy.orm import Session # Import the DocumentAgent and utility functions from the backend. from my_furhat_backend.agents.document_agent import DocumentAgent from my_furhat_backend.utils.util import ( - get_list_docs, - classify_text + get_list_docs, + classify_text, ) +from my_furhat_backend.utils.qa_pairs import get_random_qa_pair +from my_furhat_backend.utils.ollama_bootstrap import ensure_ollama_ready -from my_furhat_backend.db.session import init_db +from my_furhat_backend.db import crud +from my_furhat_backend.db.session import init_db, get_db from my_furhat_backend.perception.websocket_handler import perception_ws_handler # Initialize the FastAPI application. @@ -58,6 +62,8 @@ class Transcription(BaseModel): content: str + # Optional preferred language hint from the client, e.g. "English" or "Norwegian" + preferred_language: str | None = None class EngageRequest(BaseModel): @@ -65,6 +71,35 @@ class EngageRequest(BaseModel): answer: str +class QuizQuestionResponse(BaseModel): + index: int + question: str + answer: str + + +class TriviaTurnRequest(BaseModel): + phase: str # "ask" or "feedback" + question: str + answer: str + user_answer: str | None = None + preferred_language: str | None = None + + +class TriviaTurnResponse(BaseModel): + utterance: str + + +class TriviaMemoryRequest(BaseModel): + user_id: str + correct: bool + + +class TriviaStatsResponse(BaseModel): + user_id: str + total_questions: int + correct_answers: int + + # For demonstration purposes, using a simple in-memory store for the latest response. latest_response = None @@ -75,6 +110,7 @@ class EngageRequest(BaseModel): @app.on_event("startup") def on_startup(): init_db() + ensure_ollama_ready() logger.info("Database initialized.") @@ -82,7 +118,11 @@ def on_startup(): async def ask_question(transcription: Transcription): global latest_response try: - latest_response = await asyncio.to_thread(agent.run, transcription.content) + latest_response = await asyncio.to_thread( + agent.run, + transcription.content, + transcription.preferred_language, + ) response = { "status": "success", "response": latest_response, @@ -102,7 +142,11 @@ async def ask_question(transcription: Transcription): async def transcribe(transcription: Transcription): global latest_response try: - latest_response = await asyncio.to_thread(agent.run, transcription.content) + latest_response = await asyncio.to_thread( + agent.run, + transcription.content, + transcription.preferred_language, + ) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) return {"status": "transcription received"} @@ -149,6 +193,87 @@ async def engage(engage_request: EngageRequest): raise HTTPException(status_code=500, detail=str(e)) +@app.get("/quiz/question", response_model=QuizQuestionResponse) +async def quiz_question() -> QuizQuestionResponse: + """ + Return a single random question–answer pair from qa_pairs.json. + + The answer is included so the frontend can judge correctness and + reveal the correct answer to the user. + """ + pair = get_random_qa_pair() + if pair is None: + raise HTTPException( + status_code=500, + detail="No QA pairs available. Ensure qa_pairs.json is present.", + ) + return QuizQuestionResponse(index=pair.index, question=pair.question, answer=pair.answer) + + +@app.post("/trivia/turn", response_model=TriviaTurnResponse) +async def trivia_turn(req: TriviaTurnRequest) -> TriviaTurnResponse: + """ + LLM-powered helper for the trivia game. + + - phase == "ask": localise and phrase the trivia question in the user's language. + - phase == "feedback": phrase a short correctness/feedback line in the user's language. + """ + try: + utterance = await asyncio.to_thread( + agent.trivia_turn, + req.phase, + req.question, + req.answer, + req.user_answer, + req.preferred_language, + ) + return TriviaTurnResponse(utterance=utterance) + except Exception as e: + logger.error(f"Error in trivia_turn endpoint: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@app.get("/memory/trivia/{user_id}", response_model=TriviaStatsResponse) +async def get_trivia_stats( + user_id: str, + db: Session = Depends(get_db), +) -> TriviaStatsResponse: + if not user_id: + raise HTTPException(status_code=400, detail="user_id is required") + + stats = await asyncio.to_thread(crud.get_trivia_stat, db, user_id) + if stats is None: + return TriviaStatsResponse(user_id=user_id, total_questions=0, correct_answers=0) + + return TriviaStatsResponse( + user_id=user_id, + total_questions=stats.total_questions, + correct_answers=stats.correct_answers, + ) + + +@app.post("/memory/trivia", response_model=TriviaStatsResponse) +async def update_trivia_stats( + payload: TriviaMemoryRequest, + db: Session = Depends(get_db), +) -> TriviaStatsResponse: + if not payload.user_id: + raise HTTPException(status_code=400, detail="user_id is required") + + stats = await asyncio.to_thread( + crud.increment_trivia_stat, + db, + payload.user_id, + payload.correct, + ) + + return TriviaStatsResponse( + user_id=stats.user_id, + total_questions=stats.total_questions, + correct_answers=stats.correct_answers, + ) + + @app.websocket("/ws/perception") async def ws_perception(websocket: WebSocket): await perception_ws_handler(websocket) diff --git a/my_furhat_backend/RAG/rag_flow.py b/my_furhat_backend/RAG/rag_flow.py old mode 100755 new mode 100644 index 930d1d1..66a8c55 --- a/my_furhat_backend/RAG/rag_flow.py +++ b/my_furhat_backend/RAG/rag_flow.py @@ -1,333 +1,230 @@ """ -RAG (Retrieval-Augmented Generation) Module - -This module implements a sophisticated document retrieval system using Chroma vector store. -It provides functionality for loading, chunking, and retrieving documents with semantic search -and reranking capabilities. - -Key Features: - - Document loading and chunking - - Vector store management with Chroma - - Semantic search with reranking - - GPU-accelerated embeddings - - Persistent storage +Simple RAG (Retrieval-Augmented Generation) module. + +Design choices: +- Lightweight lexical BM25 (rank_bm25) instead of heavier vector embeddings to + avoid extra dependencies and keep startup fast on low-resource machines. +- Input corpus from DOCUMENTS_PATH; prefers QA JSONs when available for better + trivia/document alignment, otherwise falls back to PDFs/txt. +- In-memory only: no external DB or persistent index to simplify deployment. """ -import os +from __future__ import annotations + +import glob +import json import logging -from langchain_huggingface import HuggingFaceEmbeddings -from langchain_community.document_loaders import PyPDFLoader +import os +from typing import List + from langchain_core.documents import Document +from langchain_community.document_loaders import PyPDFLoader from langchain_text_splitters import RecursiveCharacterTextSplitter -from langchain_chroma import Chroma -from langchain.retrievers import ContextualCompressionRetriever -from langchain.retrievers.document_compressors import CrossEncoderReranker -from langchain_community.cross_encoders import HuggingFaceCrossEncoder -from langchain_community.embeddings import LlamaCppEmbeddings +from rank_bm25 import BM25Okapi + from my_furhat_backend.config.settings import config -from my_furhat_backend.utils.gpu_utils import print_gpu_status, clear_gpu_cache -from typing import List -# Set up logging configuration for the module -logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) + class RAG: """ - Manages Retrieval-Augmented Generation (RAG) tasks using a Chroma vector store. - - This class provides a comprehensive interface for: - - Loading and chunking documents (PDFs) - - Managing a persistent Chroma vector store - - Performing semantic search with reranking - - GPU-accelerated document processing - - Attributes: - hf (bool): Whether to use HuggingFace embeddings - persist_directory (str): Path to store the vector database - path_to_document (str): Path to the source document - vector_store (Chroma): The vector store instance - documents (List[Document]): Loaded document chunks - embeddings: The embedding model instance + Minimal RAG helper with BM25 over chunked documents/QA pairs. """ - def __init__(self, hf: bool = True, persist_directory: str = None, path_to_document: str = None): - """ - Initialize the RAG system. - - Args: - hf (bool): Whether to use HuggingFace embeddings - persist_directory (str): Path to store the vector database - path_to_document (str): Path to the source document - """ - print_gpu_status() - - self.hf = hf - self.persist_directory = persist_directory or config["VECTOR_STORE_PATH"] - self.path_to_document = path_to_document - - logger.info(f"Initializing RAG with:") - logger.info(f"- HF embeddings: {hf}") - logger.info(f"- Persist directory: {self.persist_directory}") - logger.info(f"- Document path: {self.path_to_document}") - - # Initialize embeddings first - logger.info("Initializing embeddings...") - self.embeddings = self._initialize_embeddings() - - # Then initialize vector store with the embeddings - logger.info("Initializing vector store...") - self.vector_store = self._initialize_vector_store() - - # Load documents and populate vector store if needed - logger.info("Loading documents...") - self.documents = self.__load_docs() - if self.documents: - logger.info(f"Found {len(self.documents)} documents, populating vector store...") - self.__populate_chroma(self.vector_store) - else: - logger.warning("No documents loaded, vector store will be empty") - - print_gpu_status() - - def _initialize_embeddings(self): - """ - Initialize the embedding model. - - Returns: - The initialized embedding model + def __init__( + self, + persist_directory: str | None = None, + pdf_path: str | None = None, # kept for backward-compatibility, no longer required + default_k: int = 20, + ) -> None: + # In the updated version, we treat DOCUMENTS_PATH as a directory + # containing many small source documents (PDFs and/or .txt files). + # The old pdf_path argument is kept only so existing callers do not break, + # but is no longer used directly. + self.documents_path = config["DOCUMENTS_PATH"] + self.default_k = default_k + + # In-memory corpus and BM25 index + self.chunks: List[Document] = [] + self.tokenized_corpus: List[List[str]] = [] + self.bm25: BM25Okapi | None = None + + self._build_index() + + def _load_documents(self) -> List[Document]: """ - if self.hf: - # Auto-detect device: prefer MPS (macOS), then CUDA, then CPU - import torch - if torch.backends.mps.is_available(): - device = 'mps' - elif torch.cuda.is_available(): - device = 'cuda' - else: - device = 'cpu' - logger.info(f"Using device: {device} for embeddings") - - return HuggingFaceEmbeddings( - model_name="sentence-transformers/all-MiniLM-L6-v2", - model_kwargs={'device': device}, - encode_kwargs={'normalize_embeddings': True} - ) - else: - return LlamaCppEmbeddings( - model_path=os.path.join(config["GGUF_MODELS_PATH"], "all-MiniLM-L6-v2-Q4_K_M.gguf"), - n_ctx=2048, - n_batch=512, - n_gpu_layers=32 + Load source documents from DOCUMENTS_PATH. + + Priority rationale: + 1) Prefer QA JSON (qa_pairs.json) to align with trivia use-cases and + avoid noisy PDF extraction when structured Q&A exists. + 2) Otherwise, load PDFs and plain text for a generic corpus. + """ + base_dir = self.documents_path + if not base_dir or not os.path.isdir(base_dir): + logger.warning("RAG: documents directory not found at %s", base_dir) + return [] + + # --- 1) Prefer JSON QA-pair documents, if present --- + json_pattern = os.path.join(base_dir, "*.json") + json_paths = sorted(glob.glob(json_pattern)) + qa_docs: List[Document] = [] + + for path in json_paths: + try: + with open(path, "r", encoding="utf-8") as f: + data = json.load(f) + except Exception as exc: # noqa: BLE001 + logger.warning("RAG: failed to load JSON %s: %s", path, exc) + continue + + # Expect structure like {"qa_pairs": [{"question": "...", "answer": "..."}]} + pairs = data.get("qa_pairs") + if not isinstance(pairs, list): + continue + + for idx, pair in enumerate(pairs): + if not isinstance(pair, dict): + continue + question = str(pair.get("question") or "").strip() + answer = str(pair.get("answer") or "").strip() + if not question or not answer: + continue + + page_content = f"Question: {question}\nAnswer: {answer}" + qa_docs.append( + Document( + page_content=page_content, + metadata={ + "source": os.path.basename(path), + "index": idx, + "type": "qa_pair", + }, + ) + ) + + if qa_docs: + logger.info( + "RAG: loaded %d QA-pair documents from %d JSON file(s) in %s", + len(qa_docs), + len(json_paths), + base_dir, ) - - def _initialize_vector_store(self) -> Chroma: - """ - Initialize the vector store with appropriate settings. - - Returns: - Chroma: Initialized vector store - """ - if os.path.exists(self.persist_directory): - logger.info("Loading existing vector store...") - return Chroma( - persist_directory=self.persist_directory, - embedding_function=self.embeddings + # Treat QA pairs as the only corpus when present. + return qa_docs + + # --- 2) Fallback: PDFs and plain-text snippets --- + docs: List[Document] = [] + # 1) Load all PDFs + pdf_pattern = os.path.join(base_dir, "*.pdf") + pdf_paths = sorted(glob.glob(pdf_pattern)) + for path in pdf_paths: + try: + loader = PyPDFLoader(path) + pdf_docs = loader.load() + docs.extend(pdf_docs) + except Exception as exc: # noqa: BLE001 + logger.warning("RAG: failed to load PDF %s: %s", path, exc) + + # 2) Load all plain-text snippets (optional, for mixed corpora) + txt_pattern = os.path.join(base_dir, "*.txt") + txt_paths = sorted(glob.glob(txt_pattern)) + for path in txt_paths: + try: + with open(path, "r", encoding="utf-8") as f: + text = f.read().strip() + except OSError as exc: + logger.warning("RAG: failed to read %s: %s", path, exc) + continue + + if not text: + continue + + docs.append( + Document( + page_content=text, + metadata={ + "source": os.path.basename(path), + "path": path, + }, + ) ) - else: - logger.info("Creating new vector store...") - return Chroma( - persist_directory=self.persist_directory, - embedding_function=self.embeddings + + if not docs: + logger.warning( + "RAG: no documents found in %s (no .pdf or .txt files)", base_dir ) - - def __load_docs(self) -> List[Document]: - """ - Load documents from a PDF file. - - Returns: - List[Document]: List of Document objects loaded from the file - """ - try: - logger.info(f"Attempting to load document from: {self.path_to_document}") - logger.info(f"File exists: {os.path.exists(self.path_to_document)}") - if os.path.exists(self.path_to_document): - logger.info(f"File size: {os.path.getsize(self.path_to_document)} bytes") - logger.info(f"File permissions: {oct(os.stat(self.path_to_document).st_mode)[-3:]}") - - if not os.path.exists(self.path_to_document): - logger.error(f"Document file does not exist at: {self.path_to_document}") - return [] - - loader = PyPDFLoader(self.path_to_document) - docs = loader.load() - logger.info(f"Successfully loaded {len(docs)} document(s) from {self.path_to_document}") - for i, doc in enumerate(docs): - logger.info(f"Document {i+1} length: {len(doc.page_content)} characters") - logger.info(f"Document {i+1} metadata: {doc.metadata}") - return docs - except Exception as e: - logger.error(f"Error loading documents from {self.path_to_document}: {e}") - logger.error(f"Full error details: {str(e)}") return [] - - def __load_and_chunk_docs(self) -> List[Document]: + + logger.info( + "RAG: loaded %d documents from %s (pdfs: %d, txts: %d)", + len(docs), + base_dir, + len(pdf_paths), + len(txt_paths), + ) + return docs + + def _chunk_documents(self, docs: List[Document]) -> List[Document]: """ - Load documents and split them into chunks. - - Returns: - List[Document]: List of document chunks + Split documents into overlapping chunks for retrieval. + + Chosen settings: chunk_size 800, overlap 150 to preserve context while + keeping chunks compact for BM25 scoring. """ - docs = self.__load_docs() if not docs: - logger.error("No documents loaded; cannot perform chunking.") return [] - - logger.info("Starting document chunking...") - text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) - chunks = text_splitter.split_documents(docs) - logger.info(f"Successfully split documents into {len(chunks)} chunk(s)") - for i, chunk in enumerate(chunks): - logger.info(f"Chunk {i+1} length: {len(chunk.page_content)} characters") - return chunks - - def __populate_chroma(self, vector_store: Chroma) -> None: - """ - Populate the Chroma vector store with document chunks. - - Args: - vector_store (Chroma): The vector store to populate - """ - chunks = self.__load_and_chunk_docs() - if chunks: - logger.info(f"Adding {len(chunks)} chunks to vector store...") - try: - _ = vector_store.add_documents(documents=chunks) - logger.info("Successfully populated vector store with document chunks") - except Exception as e: - logger.error(f"Error adding documents to vector store: {e}") - else: - logger.warning("No document chunks available to populate the vector store.") - - def __create_and_populate_chroma(self) -> Chroma: - """ - Create or load a Chroma vector store and populate it with documents. - - Returns: - Chroma: The initialized vector store - """ - if os.path.exists(self.persist_directory): - logger.info("Persist directory exists. Loading existing vector store.") - return self.__load_db() - else: - logger.info("Creating new Chroma vector store.") - vector_store = Chroma.from_documents( - self.__load_and_chunk_docs(), - self.embeddings, - persist_directory=self.persist_directory - ) - return vector_store - - def __load_db(self) -> Chroma: - """ - Load an existing Chroma vector store. - - Returns: - Chroma: The loaded vector store - """ - logger.info(f"Loading vector store from {self.persist_directory}.") - return Chroma(persist_directory=self.persist_directory, embedding_function=self.embeddings) - - def add_docs_to_db(self, path_to_docs: str) -> None: - """ - Add new documents to the existing vector store. - - Args: - path_to_docs (str): Path to the new document(s) - """ - self.path_to_document = path_to_docs - self.__populate_chroma(self.vector_store) - - def retrieve_similar(self, query_text: str, top_n: int = 5, search_kwargs: int = 20, rerank: bool = True) -> List[Document]: - """ - Retrieve document chunks similar to the query text. - - Args: - query_text (str): The query string - top_n (int): Number of top documents to return after reranking - search_kwargs (int): Number of documents to retrieve initially - rerank (bool): Whether to rerank results using cross-encoder - - Returns: - List[Document]: List of relevant document chunks - """ - if rerank: - logger.info("Reranking documents...") - return self.__rerank(query_text, top_n=top_n, search_kwargs=search_kwargs) - return self.vector_store.similarity_search(query_text) - - def get_document_context(self, document: str) -> str: - """ - Retrieve the context of a specific document. - - Args: - document (str): Name or identifier of the document - - Returns: - str: The document's context and main themes - """ - prompt = ( - "Extract the overarching context and main themes from the document titled " - f"'{document}'. Focus on summarizing the key topics, narrative, and any major findings " - "without including extraneous details." + + splitter = RecursiveCharacterTextSplitter( + chunk_size=800, + chunk_overlap=150, ) - return self.retrieve_similar(prompt) - - def __rerank(self, prompt: str, top_n: int = 5, search_kwargs: int = 20) -> List[Document]: + chunks = splitter.split_documents(docs) + logger.info("RAG: created %d chunks", len(chunks)) + return chunks + + def _build_index(self) -> None: """ - Rerank retrieved documents using a cross-encoder. - - Args: - prompt (str): The query text - top_n (int): Number of top documents to return - search_kwargs (int): Number of documents to retrieve initially - - Returns: - List[Document]: Reranked document chunks + Tokenize chunks and build the BM25 index in memory. + + BM25 was chosen over embeddings to minimize dependencies and runtime + cost; sufficient for small corpora and trivia Q&A retrieval. """ - model = HuggingFaceCrossEncoder(model_name="mixedbread-ai/mxbai-rerank-base-v1") - compressor = CrossEncoderReranker(model=model, top_n=top_n) - compression_retriever = ContextualCompressionRetriever( - base_compressor=compressor, - base_retriever=self.vector_store.as_retriever(search_kwargs={"k": search_kwargs}) - ) - return compression_retriever.invoke(prompt) + docs = self._load_documents() + self.chunks = self._chunk_documents(docs) - def __del__(self): - """Cleanup method to clear GPU cache when the RAG instance is destroyed.""" - clear_gpu_cache() + if not self.chunks: + logger.warning("RAG: no chunks available; BM25 index will be empty.") + self.tokenized_corpus = [] + self.bm25 = None + return - def get_list_docs(self) -> List[str]: + def tokenize(text: str) -> List[str]: + return (text or "").lower().split() + + self.tokenized_corpus = [ + tokenize(chunk.page_content) for chunk in self.chunks + ] + self.bm25 = BM25Okapi(self.tokenized_corpus) + logger.info("RAG: BM25 index built over %d chunks", len(self.chunks)) + + def query(self, query_text: str, k: int | None = None) -> List[Document]: """ - Get a list of all available documents in the vector store. - - Returns: - List[str]: List of document names/identifiers + Run a BM25 lexical search against the indexed chunks. + + Lightweight, in-memory retrieval; returns top-k chunks (default k=self.default_k). """ - try: - # Get all documents from the vector store - if not self.vector_store: - self.vector_store = self.__load_db() - - # Get unique document names from metadata - doc_names = set() - for doc in self.vector_store.get()["metadatas"]: - if "source" in doc: - doc_name = os.path.basename(doc["source"]) - doc_names.add(doc_name) - - return list(doc_names) - - except Exception as e: - logger.error(f"Error getting document list: {e}") + if not query_text or self.bm25 is None or not self.chunks: return [] + + k = k or self.default_k + + query_tokens = query_text.lower().split() + # BM25Okapi.get_top_n returns the top-n documents directly + top_docs: List[Document] = self.bm25.get_top_n( + query_tokens, self.chunks, n=k + ) + return top_docs + + diff --git a/my_furhat_backend/agents/document_agent.py b/my_furhat_backend/agents/document_agent.py old mode 100755 new mode 100644 index 143cb62..8e08bf6 --- a/my_furhat_backend/agents/document_agent.py +++ b/my_furhat_backend/agents/document_agent.py @@ -1,1420 +1,256 @@ """ -Document Agent Module +Simplified DocumentAgent for the NorwAI backend. -This module implements a sophisticated document interaction system that combines RAG (Retrieval-Augmented Generation) -with a state-based conversation flow. The DocumentAgent orchestrates a multi-step workflow for processing and -responding to document-related queries. +Design choices: +- Keep RAG retrieval lightweight (BM25) via rag_flow; no vector DB or caching layers. +- Default to Ollama-backed LLM to avoid heavy GPU deps; can swap via create_llm config. +- Minimal prompt building with optional language steering for bilingual trivia flows. -Key Components: - - DocumentAgent: Main class managing the conversation workflow - - QuestionCache: Caching system for questions and answers with similarity matching - - State Graph: Manages conversation flow and state transitions - -The workflow includes: - 1. Input processing - 2. Document retrieval - 3. Content summarization - 4. Uncertainty checking - 5. Response generation - 6. Follow-up handling +Pipeline: question -> RAG.retrieve(context) -> build prompt -> LLM -> answer """ -import os +from __future__ import annotations + import logging -import uuid -import shutil -import sys -import langgraph.checkpoint.base as _checkpoint_base +from typing import List -# Temporary compatibility shim for langgraph <-> langgraph-checkpoint mismatch. -# Older versions of langgraph-checkpoint (<2.0.13) do not expose -# `EXCLUDED_METADATA_KEYS`, but langgraph>=0.3.25 expects it during import. -# When missing, provide a conservative default so the runtime can proceed. -if not hasattr(_checkpoint_base, "EXCLUDED_METADATA_KEYS"): - _checkpoint_base.EXCLUDED_METADATA_KEYS = set() # type: ignore[attr-defined] +from langchain_core.documents import Document -from langgraph.graph import StateGraph, START, END -from langchain_core.messages import HumanMessage, AIMessage, ToolMessage, BaseMessage, SystemMessage -from typing_extensions import TypedDict, Annotated, List from my_furhat_backend.config.settings import config -from langgraph.checkpoint.memory import MemorySaver -import json -from pathlib import Path -from sentence_transformers import SentenceTransformer -import numpy as np -from typing import Dict, Tuple, Optional -from datetime import datetime -from transformers import pipeline -import re -import torch -import random - -from my_furhat_backend.models.chatbot_factory import create_chatbot -from my_furhat_backend.utils.util import clean_output +from my_furhat_backend.models.llm_factory import create_llm from my_furhat_backend.RAG.rag_flow import RAG -from my_furhat_backend.utils.gpu_utils import print_gpu_status, clear_gpu_cache -from my_furhat_backend.models.llm_factory import HuggingFaceLLM -# Set up logging logger = logging.getLogger(__name__) -# Set up cache directories -CACHE_DIR = config["HF_HOME"] -os.makedirs(CACHE_DIR, exist_ok=True) - -class State(TypedDict): - """ - Conversation state type definition. - - Attributes: - messages (List[BaseMessage]): List of conversation messages - input (str): Current user input - """ - messages: Annotated[List[BaseMessage], "add_messages"] - input: str - -class QuestionCache: - """ - A cache system for storing and retrieving question-answer pairs with similarity matching. - - This class provides functionality to: - - Store and retrieve question-answer pairs - - Find similar questions using semantic similarity - - Clean and normalize questions and answers - - Persist the cache to disk - - The cache uses sentence embeddings to compute semantic similarity between questions, - allowing for fuzzy matching of similar questions even if they're not exact matches. - - Attributes: - cache_file (str): Path to the JSON file where the cache is persisted - cache (Dict): In-memory cache of question-answer pairs - model: Sentence transformer model for computing embeddings - """ - - def __init__(self, cache_file: str = os.path.join(config["HF_HOME"], "question_cache.json")): - """ - Initialize the QuestionCache. - - Args: - cache_file (str): Path to the cache file - """ - self.cache_file = cache_file - self._ensure_cache_file() - self.cache: Dict[str, Dict] = self._load_cache() - self.model = SentenceTransformer('all-MiniLM-L6-v2', cache_folder=config["HF_HOME"]) - - def _normalize_question(self, question: str) -> str: - """ - Normalize the question by removing common words and standardizing format. - - Args: - question (str): The input question - - Returns: - str: The normalized question - """ - question = question.lower() - common_words = {'what', 'is', 'the', 'about', 'can', 'you', 'tell', 'me', 'please', 'thank', 'thanks'} - words = [w for w in question.split() if w not in common_words] - return ' '.join(words) - - def _clean_answer(self, answer: str) -> str: - """ - Clean the answer by removing unnecessary conversational elements. - - Args: - answer (str): The input answer - - Returns: - str: The cleaned answer - """ - conversational_phrases = [ - "Hey there!", "I've got", "let me tell you", - "feel free to ask", "I'm here to help", - "So what do you think?", "I'm here and ready to assist" - ] - for phrase in conversational_phrases: - answer = answer.replace(phrase, "") - - answer = ' '.join(answer.split()) - return answer.strip() - - def _ensure_cache_file(self) -> None: - """Ensure the cache file exists, create it if it doesn't.""" - cache_path = Path(self.cache_file) - if not cache_path.exists(): - with open(cache_path, 'w') as f: - json.dump({}, f, indent=2) - print(f"Created new cache file at {self.cache_file}") - - def _load_cache(self) -> Dict: - """ - Load the cache from file if it exists, otherwise return empty dict. - - Returns: - Dict: The loaded cache or empty dict if loading fails - """ - try: - with open(self.cache_file, 'r') as f: - return json.load(f) - except (FileNotFoundError, json.JSONDecodeError) as e: - print(f"Error loading cache: {e}") - return {} - - def _save_cache(self) -> None: - """Save the cache to file.""" - try: - with open(self.cache_file, 'w') as f: - json.dump(self.cache, f, indent=2) - print(f"Saved cache to {self.cache_file}") - except Exception as e: - print(f"Error saving cache: {e}") - - def _compute_similarity(self, question1: str, question2: str) -> float: - """ - Compute cosine similarity between two questions. - - Args: - question1 (str): First question - question2 (str): Second question - - Returns: - float: Cosine similarity score between 0 and 1 - """ - embeddings = self.model.encode([question1, question2]) - return float(np.dot(embeddings[0], embeddings[1]) / - (np.linalg.norm(embeddings[0]) * np.linalg.norm(embeddings[1]))) - - def find_similar_question(self, question: str, threshold: float = 0.8) -> Optional[Tuple[str, str, float]]: - """ - Find the most similar question in the cache. - - Args: - question (str): The question to find similar matches for - threshold (float): Minimum similarity score (0-1) to consider a match - - Returns: - Optional[Tuple[str, str, float]]: Tuple of (question, answer, similarity) if found, - None if no match above threshold - """ - normalized_question = self._normalize_question(question) - best_similarity = 0 - best_match = None - - for cached_q, data in self.cache.items(): - normalized_cached_q = self._normalize_question(cached_q) - similarity = self._compute_similarity(normalized_question, normalized_cached_q) - if similarity > best_similarity: - best_similarity = similarity - best_match = (cached_q, data['answer'], similarity) - - if best_match and best_match[2] >= threshold: - return best_match - return None - - def add_question(self, question: str, answer: str) -> None: - """ - Add a new question and answer to the cache. - - Args: - question (str): The question to cache - answer (str): The answer to cache - """ - cleaned_answer = self._clean_answer(answer) - - self.cache[question] = { - 'answer': cleaned_answer, - 'timestamp': datetime.now().isoformat(), - 'normalized_question': self._normalize_question(question) - } - self._save_cache() class DocumentAgent: """ - Orchestrates a multi-step conversational workflow for document interaction. - - The agent manages: - - Document retrieval and context gathering - - Content summarization and analysis - - Uncertainty checking and clarification - - Response generation and follow-up handling - - Conversation state management - - The workflow is implemented as a state graph with checkpointed memory for resumption. - """ - - def __init__( - self, - model: str = "llama3.1:instruct", # Ollama model tag - base_url: str = "http://localhost:11434", - **kwargs - ): - # Default chatbot backend = Ollama - self.chatbot = create_chatbot( - "ollama", - model=model, - base_url=base_url, - num_ctx=8192, # typical context for Ollama - temperature=0.7, - top_p=0.9, - **kwargs - ) - self.llm = self.chatbot.llm - - # Initialize summarizer - self.summarizer = HuggingFaceLLM( - model_id="sshleifer/distilbart-cnn-12-6", - task="summarization", - max_length=1024, - max_new_tokens=100, - temperature=0.7, - top_p=0.9, - do_sample=True, - min_length=30, - no_repeat_ngram_size=3 - ) - - print_gpu_status() - - self.graph = StateGraph(State) - - # Initialize memory checkpointer for state persistence - self.memory = MemorySaver() - - # Initialize RAG instance for document retrieval and context gathering - # RAG is needed for: - # 1. Retrieving document context in the engage() method - # 2. Getting document context in retrieve_context() method - # 3. Listing available documents in check_uncertainty() method - # Note: RAG can work without documents (empty vector store), but requires langchain-huggingface - self.rag_instance = None - try: - # Check if langchain-huggingface is available (required for RAG) - from langchain_huggingface import HuggingFaceEmbeddings - self.rag_instance = RAG( - hf=True, # Use HuggingFace embeddings - persist_directory=config.get("VECTOR_STORE_PATH"), # Use configured vector store path - path_to_document=None # Can be set later or via config if needed (works fine without documents) - ) - logger.info("RAG instance initialized successfully (vector store may be empty if no documents loaded)") - except ImportError as e: - logger.warning(f"langchain-huggingface not available, RAG features disabled: {e}") - except Exception as e: - logger.warning(f"Failed to initialize RAG instance: {e}. Some features may not work.") - - # Initialize caches with larger sizes - self.question_cache = QuestionCache() - self.context_cache = {} - self.summary_cache = {} # New cache for document summaries - - self._build_graph() - - self.compiled_graph = self.graph.compile(checkpointer=self.memory) - - # Initialize conversation memory with a larger size - self.conversation_memory = [] - self.max_memory_size = 10 # Increased from 5 - - # Initialize sentiment analyzer with specific model and caching - try: - self.sentiment_analyzer = pipeline( - "sentiment-analysis", - model="distilbert-base-uncased-finetuned-sst-2-english", - device="cuda", # Specify device directly - model_kwargs={"cache_dir": config["HF_HOME"]} # Enable model caching - ) - except Exception as e: - print(f"Error creating pipeline: {e}") - self.sentiment_analyzer = None - - # Initialize personality traits - self.personality_traits = { - "curiosity": 0.8, - "empathy": 0.7, - "enthusiasm": 0.6 - } - - def __del__(self): - """Cleanup method to clear GPU cache when the agent is destroyed.""" - clear_gpu_cache() - - def _build_graph(self) -> None: - """ - Build the state graph defining the conversation workflow. - - The graph consists of the following nodes: - 1. input_node: Process user input - 2. retrieval_node: Retrieve relevant document context - 3. summarization_node: Summarize retrieved content - 4. content_analysis_node: Analyze content for uncertainty - 5. uncertainty_response_node: Handle uncertainty cases - 6. generation_node: Generate final response - 7. format_response_node: Format and clean response - 8. answer_followup_node: Handle follow-up questions - """ - # Add nodes to the graph with corresponding callback functions - self.graph.add_node("capture_input", self.input_node) - self.graph.add_node("retrieval", self.retrieval_node) - self.graph.add_node("content_analysis", self.content_analysis_node) - self.graph.add_node("summarization", self.summarization_node) - self.graph.add_node("uncertainty_response", self.uncertainty_response_node) - self.graph.add_node("generation", self.generation_node) - self.graph.add_node("format_response", self.format_response_node) - self.graph.add_node("answer_followup", self.answer_followup_node) + Minimal RAG+LLM agent. - # Define linear flow from the start to input - self.graph.add_edge(START, "capture_input") - - # Add conditional edge from input to either retrieval or answer_followup - self.graph.add_conditional_edges( - "capture_input", - lambda state: self._determine_next_node(state), - { - "retrieval": "retrieval", - "answer_followup": "answer_followup" - } - ) - - # Define linear flow from retrieval to analysis - self.graph.add_edge("retrieval", "content_analysis") - - # Define conditional branching from the content analysis node - self.graph.add_conditional_edges( - "content_analysis", - lambda state: state.get("next"), - { - "summarization": "summarization", - "uncertainty_response": "uncertainty_response", - "generation": "generation" - } - ) - - # If summarization is executed, then proceed to generation - self.graph.add_edge("summarization", "generation") - - # After generation, proceed directly to response formatting - self.graph.add_edge("generation", "format_response") - - # Both uncertainty_response and format_response lead to the END node - self.graph.add_edge("uncertainty_response", END) - self.graph.add_edge("format_response", END) - self.graph.add_edge("answer_followup", END) - - def input_node(self, state: State) -> dict: - """ - Process the initial user input. - - Args: - state (State): Current conversation state - - Returns: - dict: Updated state with processed input - """ - # Ensure that the 'messages' list exists in the state - state.setdefault("messages", []) - # Create a HumanMessage using the user's input - human_msg = HumanMessage(content=state.get("input", "")) - # Append the human message to the conversation history - state["messages"].append(human_msg) - return {"messages": state["messages"]} - - def retrieval_node(self, state: State) -> dict: - """ - Retrieve relevant document context using RAG. - - Args: - state (State): Current conversation state - - Returns: - dict: Updated state with retrieved context - """ - messages = state["messages"] - input_text = state["input"] - - # Check cache for similar questions - cached_result = self.question_cache.find_similar_question(input_text) - if cached_result: - cached_question, cached_answer, similarity = cached_result - messages.append(AIMessage(content=cached_answer)) - return {"messages": messages} - - # Retrieve context from RAG - if self.rag_instance is None: - logger.warning("RAG instance not initialized, returning empty context") - context = [] - else: - context = self.rag_instance.get_document_context(input_text) - self.context_cache[input_text] = context - - # Create a ToolMessage with the retrieval results - tool_msg = ToolMessage( - content=f"Retrieved context:\n{context}", - name="document_retriever", - tool_call_id=str(uuid.uuid4()) - ) - messages.append(tool_msg) - - return {"messages": messages} - - def summarization_node(self, state: State) -> dict: - """ - Summarize the retrieved document context. - - This method: - 1. Extracts document context from the retrieval message - 2. Checks for cached summaries to avoid redundant processing - 3. Generates a new summary if no cache exists - 4. Maintains a size-limited summary cache - 5. Creates a new tool message with the summarized content - - The summarization process uses a HuggingFace model optimized for - extractive summarization with controlled length and coherence. - - Args: - state (State): Current conversation state containing messages and context + Public API: + - run(question: str) -> str + - engage(document_name: str, answer: str) -> str + - clear_all_caches() -> None (no-op for compatibility) + """ - Returns: - dict: Updated state with the summarized context added as a new message - - Note: - If no retrieval message is found, a default message indicating - no context is available is added to the state. - """ - messages = state["messages"] - - # Locate the ToolMessage that holds the retrieved document context - retrieval_msg = next( - (msg for msg in messages - if isinstance(msg, ToolMessage) and msg.name == "document_retriever"), - None - ) - - if retrieval_msg: - # Remove any header text from the retrieval message - text_to_summarize = retrieval_msg.content.replace("Retrieved context:\n", "") - - # Check if we have a cached summary for this content - content_hash = hash(text_to_summarize) - if content_hash in self.summary_cache: - summarized_text = self.summary_cache[content_hash] - else: - # Generate a summary of the retrieved content using the summarizer - summarized_text = self.summarizer.query(text_to_summarize) - # Cache the summary - self.summary_cache[content_hash] = summarized_text - - # Limit cache size - if len(self.summary_cache) > 1000: - self.summary_cache.pop(next(iter(self.summary_cache))) - else: - summarized_text = "No document context available to summarize." - - # Create a new ToolMessage for the summarized context - summary_msg = ToolMessage( - content=f"Summarized context:\n{summarized_text}", - name="summarizer", - tool_call_id=str(uuid.uuid4()) - ) - messages.append(summary_msg) - - return {"messages": messages} - - def content_analysis_node(self, state: State) -> dict: - """ - Analyze content for uncertainty and quality. - - This method performs several analyses on the retrieved content: - 1. Checks content length and determines if summarization is needed - 2. Analyzes content quality and relevance - 3. Identifies potential uncertainties or gaps in the information - 4. Determines if additional context or clarification is needed - - Args: - state (State): Current conversation state containing messages and context + def __init__(self) -> None: + # Initialize a simple RAG helper for semantic search over the NorwAI PDF. + self.rag = RAG() - Returns: - dict: Updated state with analysis results and next action determination - - Note: - If no retrieval message is found, the state is updated to proceed to - uncertainty_response node. - """ - messages = state["messages"] - - # Get the retrieval message - retrieval_msg = next( - (msg for msg in messages - if isinstance(msg, ToolMessage) and msg.name == "document_retriever"), - None + # Initialize the Ollama-backed LLM using the configured model and system prompt. + self.llm = create_llm( + "ollama", + model=config.get("OLLAMA_MODEL", "llama3.2:latest"), + base_url=config.get("OLLAMA_BASE_URL", "http://localhost:11434"), + system_prompt=config.get("OLLAMA_SYSTEM_PROMPT"), ) - - if not retrieval_msg: - state["next"] = "uncertainty_response" - return state - - # Extract the actual content (remove the header) - content = retrieval_msg.content.replace("Retrieved context:\n", "").strip() - - # Check content length (rough estimate of tokens) - content_length = len(content.split()) - needs_summary = content_length > 500 # If content is longer than 500 words, summarize - - # Create a comprehensive prompt for the LLM to analyze the content - prompt = f"""Analyze the following retrieved content and determine the best way to process it. -Consider all aspects and provide a structured response: - -Content to analyze: -{content} - -Analyze the following aspects: - -1. Content Length and Complexity - - Is the content longer than 300 words? - - Does it contain multiple paragraphs or sections? - - Is there redundant or repetitive information? - - Would it benefit from being more concise? - -2. Information Quality - - Is the information complete and specific? - - Are there any ambiguities or uncertainties? - - Is the information relevant to the query? - - Is there unnecessary detail that could be condensed? - -3. Summarization Benefits - - Would summarizing help focus on key points? - - Is there extraneous information that could be removed? - - Would a shorter version be more effective? - - Could the information be more impactful if condensed? - -Respond in the following format: -UNCERTAINTY_PRESENT: [yes/no] -NEXT_STEP: [summarization/uncertainty_response/generation] -REASONING: [brief explanation of the decision, including specific reasons for or against summarization]""" - - # Get the LLM's analysis - response = self.llm.query(prompt) - - # Parse the response - analysis = {} - for line in response.content.split('\n'): - if ':' in line: - key, value = line.split(':', 1) - analysis[key.strip()] = value.strip().lower() - - # Determine next step based on content length and analysis - next_step = analysis.get("next_step", "generation") - if needs_summary and next_step != "uncertainty_response": - next_step = "summarization" - elif next_step == "summarization" and not needs_summary: - # If LLM suggests summarization but content is short, check reasoning - reasoning = analysis.get("reasoning", "").lower() - if any(keyword in reasoning for keyword in ["redundant", "repetitive", "condense", "concise", "focus"]): - next_step = "summarization" - else: - next_step = "generation" - - # Update state with analysis results - state.update({ - "needs_summary": needs_summary, - "uncertainty": analysis.get("uncertainty_present", "no") == "yes", - "next": next_step - }) - - return state - - def uncertainty_response_node(self, state: State) -> dict: - """ - Handle cases where content uncertainty is detected. - - Args: - state (State): Current conversation state - Returns: - dict: Updated state with clarification if needed - """ - messages = state["messages"] - uncertainty_score = state.get("uncertainty_score", 0) - - if uncertainty_score > 0.7: - clarification = self._generate_clarification(state) - messages.append(AIMessage(content=clarification)) - - return {"messages": messages} - - def generation_node(self, state: State) -> dict: - """ - Generate the final response using the chatbot. - - Args: - state (State): Current conversation state - - Returns: - dict: Updated state with generated response - """ - messages = state.get("messages", []) - summary = state.get("summary", "") - - # Add the summary as context to the conversation - if summary: - messages.insert(0, SystemMessage(content=f"Context: {summary}")) - - # Add a concise response instruction to the system prompt - concise_instruction = SystemMessage(content=""" - Please provide a concise response that: - 1. Uses 2-3 sentences maximum - 2. Focuses on the most important information - 3. Avoids unnecessary details - 4. Gets straight to the point - 5. Uses clear and direct language - 6. Avoids conversational fillers - 7. Does not use phrases like "Well," "So," "Actually," etc. - 8. Does not add unnecessary context - 9. Does not ask follow-up questions unless absolutely necessary - 10. Stays focused on answering the user's question - """) - messages.insert(0, concise_instruction) - - # Use the chatbot to generate the response - updated_state = self.chatbot.chatbot({"messages": messages}) - return updated_state - - def format_response_node(self, state: State) -> dict: - """ - Format and clean the generated response. - - Args: - state (State): Current conversation state - - Returns: - dict: Updated state with formatted response - """ - messages = state["messages"] - last_message = messages[-1].content - - # Clean and format the response - cleaned_response = clean_output(last_message) - - # Split into sentences and analyze content - sentences = cleaned_response.split('.') - - # Determine response type and adjust accordingly - response_type = self._analyze_response_type(cleaned_response) - - # Ensure minimum response length - if len(cleaned_response.split()) < 10: - # If response is too short, add more context - context = self._get_conversation_context() - if context: - cleaned_response = f"{cleaned_response} {context}" + @staticmethod + def _build_context(docs: List[Document], max_chars: int = 4000) -> str: + """ + Turn retrieved documents into a compact textual context for the LLM. + """ + if not docs: + return "No relevant document context could be retrieved for this question." + + segments: List[str] = [] + for doc in docs: + content = (doc.page_content or "").strip() + if not content: + continue + metadata = doc.metadata or {} + page = metadata.get("page") + # Keep a simple textual prefix instead of bracketed page labels to avoid + # awkward symbols in spoken output. + if isinstance(page, int): + prefix = f"Page {page + 1}: " else: - # If no context available, add a follow-up question - cleaned_response = f"{cleaned_response} Would you like me to elaborate on any specific aspect?" - - # Always limit to 2-3 sentences maximum, regardless of content type - if len(sentences) > 3: - cleaned_response = '. '.join(sentences[:3]) + '.' - - # Remove common repetitive phrases and conversational fillers - cleaned_response = re.sub(r'Let me think about that\.?\s*', '', cleaned_response) - cleaned_response = re.sub(r'I notice you\'re interested in\.?\s*', '', cleaned_response) - cleaned_response = re.sub(r'Based on the document\.?\s*', '', cleaned_response) - cleaned_response = re.sub(r'According to the document\.?\s*', '', cleaned_response) - cleaned_response = re.sub(r'In the document\.?\s*', '', cleaned_response) - cleaned_response = re.sub(r'Well,?\s*', '', cleaned_response) - cleaned_response = re.sub(r'So,?\s*', '', cleaned_response) - cleaned_response = re.sub(r'Actually,?\s*', '', cleaned_response) - cleaned_response = re.sub(r'You know,?\s*', '', cleaned_response) - cleaned_response = re.sub(r'I mean,?\s*', '', cleaned_response) - - # Remove any remaining conversational elements - cleaned_response = re.sub(r'That\'s fascinating!?\s*', '', cleaned_response) - cleaned_response = re.sub(r'That\'s interesting!?\s*', '', cleaned_response) - cleaned_response = re.sub(r'That\'s surprising!?\s*', '', cleaned_response) - - # Ensure the response starts with a capital letter - cleaned_response = cleaned_response.strip() - if cleaned_response: - cleaned_response = cleaned_response[0].upper() + cleaned_response[1:] - - messages[-1].content = cleaned_response - - # Update conversation memory - self._update_conversation_memory(state["input"], cleaned_response) - - return {"messages": messages} - - def _analyze_response_type(self, text: str) -> str: - """ - Analyze the type of response to determine appropriate formatting. - - Args: - text (str): The response text to analyze - - Returns: - str: The type of response ('technical', 'casual', or 'general') - """ - # Technical indicators - technical_words = {'implementation', 'algorithm', 'process', 'system', 'method', 'function', 'data', 'analysis'} - # Casual indicators - casual_words = {'cool', 'awesome', 'interesting', 'fun', 'great', 'nice', 'good', 'bad', 'wow'} - - words = set(text.lower().split()) - technical_count = len(words.intersection(technical_words)) - casual_count = len(words.intersection(casual_words)) - - if technical_count > casual_count and technical_count > 2: - return "technical" - elif casual_count > technical_count and casual_count > 2: - return "casual" - else: - return "general" - - def _analyze_sentiment(self, text: str) -> float: - """ - Analyze the sentiment of the given text. - - Args: - text (str): Text to analyze - - Returns: - float: Sentiment score between -1 and 1 - """ - result = self.sentiment_analyzer(text)[0] - return float(result["score"]) if result["label"] == "POSITIVE" else -float(result["score"]) - - def _adjust_tone(self, text: str, sentiment: float) -> str: - """ - Adjust the tone of the text based on sentiment. - - Args: - text (str): Text to adjust - sentiment (float): Sentiment score - - Returns: - str: Adjusted text - """ - if sentiment < -0.5: - return f"I understand your concern. {text}" - elif sentiment > 0.5: - return f"I'm glad you're interested! {text}" - return text - - def _generate_engaging_prompt(self, document_name: str, answer: str) -> str: - """ - Generate an engaging follow-up prompt. - - Args: - document_name (str): Name of the document - answer (str): Previous answer - - Returns: - str: Engaging follow-up prompt - """ - # Use personality traits to influence prompt generation - curiosity_level = self.personality_traits["curiosity"] - empathy_level = self.personality_traits["empathy"] - - # Generate different types of prompts based on personality - if curiosity_level > 0.7: - return f"I'm really curious about this! What would you like to explore next about {document_name}?" - elif empathy_level > 0.7: - return f"I find this topic fascinating. What aspects of {document_name} would you like to discuss further?" - else: - return f"Would you like to know more about {document_name}?" - - def _update_conversation_memory(self, question: str, answer: str) -> None: - """ - Update the conversation memory with new Q&A pair. - - Args: - question (str): User question - answer (str): System answer - """ - # Add new exchange - self.conversation_memory.append({ - "question": question, - "answer": answer, - "timestamp": datetime.now().isoformat() - }) - - # Keep only last N exchanges for context - if len(self.conversation_memory) > self.max_memory_size: - # Remove oldest entries - self.conversation_memory = self.conversation_memory[-self.max_memory_size:] - - # Clean up old follow-up questions - self.conversation_memory = [ - msg for msg in self.conversation_memory - if not (msg.get("follow_up", False) and - (datetime.now() - datetime.fromisoformat(msg["timestamp"])).days > 1) - ] - - def _get_conversation_context(self) -> str: - """ - Get a formatted summary of the conversation history. - - This method: - 1. Retrieves the conversation memory containing previous Q&A pairs - 2. Formats each exchange into a readable Q&A format - 3. Returns an empty string if no conversation history exists - - The formatted context is used to: - - Provide continuity in the conversation - - Help maintain context for follow-up questions - - Enable the model to reference previous exchanges - - Returns: - str: A formatted string containing the conversation history, - or an empty string if no history exists - - Example: - >>> agent._get_conversation_context() - "Previous conversation: - Q: What is the main topic? - A: The main topic is AI research. - Q: Can you elaborate? - A: It focuses on machine learning applications." - """ - if not self.conversation_memory: - return "" - - context = "Previous conversation:\n" - for exchange in self.conversation_memory: - context += f"Q: {exchange['question']}\nA: {exchange['answer']}\n" - return context - - def clear_all_caches(self) -> None: - """ - Clear all caches and memory to free up resources and reset the agent's state. - - This method performs a complete cleanup of: - 1. Question cache file and in-memory cache - 2. Context cache for document retrieval - 3. GPU memory cache (if CUDA is available) - 4. Conversation memory - 5. Summary cache - - This is typically called when: - - The context window is exceeded - - An error occurs during processing - - The agent needs to be reset - - Memory usage needs to be optimized - - Note: - This is a destructive operation that will remove all cached - information. The agent will need to rebuild its caches - for subsequent queries. - """ - # Clear question cache file - if os.path.exists(self.question_cache.cache_file): - os.remove(self.question_cache.cache_file) - self.question_cache.cache = {} - print("Question cache cleared") - - # Clear context cache - self.context_cache.clear() - print("Context cache cleared") - - # Clear GPU cache - if torch.cuda.is_available(): - torch.cuda.empty_cache() - print("GPU cache cleared") - - # Clear conversation memory - self.conversation_memory.clear() - print("Conversation memory cleared") - - # Clear summary cache - self.summary_cache.clear() - print("Summary cache cleared") - - def run(self, initial_input: str, system_prompt: str = None) -> str: - """ - Execute the document agent workflow with the given input. - - This method orchestrates the complete workflow: - 1. Processes the input through the state graph - 2. Handles document retrieval and context gathering - 3. Manages response generation and caching - 4. Handles error cases and context window limitations - - Args: - initial_input (str): The user's question or input text - system_prompt (str, optional): Custom system prompt to override default - - Returns: - str: The generated response or error message - - Raises: - Exception: If there's an error during processing, with appropriate error message - """ - try: - # Truncate input if too long - max_input_length = 400 # words - input_words = initial_input.split() - if len(input_words) > max_input_length: - initial_input = ' '.join(input_words[:max_input_length]) + '...' - - # Don't cache or process "I don't know" type responses - if any(phrase in initial_input.lower() for phrase in ["i don't know", "i do not know", "you tell me", "tell me"]): - # Get the last follow-up question from the conversation - last_follow_up = next( - (msg for msg in reversed(self.conversation_memory) - if "follow_up" in msg), - None + prefix = "" + segments.append(f"{prefix}{content}") + + context = "\n\n".join(segments) + if len(context) > max_chars: + context = context[:max_chars] + "..." + return context or "No relevant document context could be retrieved for this question." + + @staticmethod + def _build_prompt(question: str, context: str, preferred_language: str | None = None) -> str: + """ + Construct a single-string prompt for the underlying LLM. + """ + system = config.get("OLLAMA_SYSTEM_PROMPT", "") + + # Optional language steering. The trivia Q&A pairs are typically in Norwegian, + # but the user may speak a different language. You MUST respond in the user's + # language, translating any Norwegian text or facts as needed. + lang_hint = None + if preferred_language: + pl = preferred_language.lower() + if pl.startswith("norw") or pl in {"no", "nb", "nn"}: + lang_hint = ( + "The user is speaking Norwegian. You MUST respond only in Norwegian. " + "If the context or Q&A pairs are written in another language, translate " + "them into natural Norwegian before answering. Never respond in English." ) - if last_follow_up: - # Answer the follow-up question instead of treating it as a new query - return self._answer_follow_up(last_follow_up["question"]) - return "I apologize, but I don't have enough context to provide a meaningful answer." - - # Check cache for similar questions first - similar_question = self.question_cache.find_similar_question(initial_input) - if similar_question: - cached_question, cached_answer, similarity = similar_question - # Return the cached answer directly if similarity is high enough - if similarity > 0.8: # Using the same threshold as find_similar_question - return cached_answer - - # Use a default system prompt if none is provided - if system_prompt is None: - system_prompt = ( - "You are a friendly and knowledgeable assistant having a casual conversation. " - "Keep your responses concise and engaging - aim for 2-3 sentences maximum. " - "Use natural, conversational language and avoid formal or academic tone. " - "Focus on the most interesting or relevant aspects of the topic. " - "Don't repeat information unless necessary. " - "If the user seems disengaged, be more concise. " - "If they show interest, you can elaborate slightly. " - "Use contractions and casual expressions. " - "Avoid phrases like 'Let me think about that' or 'I notice you're interested in'. " - "Be direct and engaging, like chatting with a friend. " - "Adapt your tone based on the user's engagement level. " - "If they ask short questions, give short answers. " - "If they ask detailed questions, provide more context. " - "Use natural transitions between topics. " - "Avoid robotic or overly formal language. " - "Be conversational but professional. " - "Use appropriate humor when relevant. " - "Show enthusiasm for interesting topics. " - "Be empathetic when discussing complex or challenging topics." + elif pl.startswith("eng") or pl in {"en", "en-us", "en-gb"}: + lang_hint = ( + "The user is speaking English. You MUST respond only in English. " + "Most of the context and Q&A pairs may be written in Norwegian; " + "translate all relevant information into natural English before answering. " + "Never respond in Norwegian." ) + prompt_parts = [ + system, + "", + "You are given context from documents or question–answer pairs.", + "Answer the user's question using only this context.", + lang_hint, + 'If the context does not contain the answer, say "I couldn’t find that in the documents you provided."', + "", + "Context:", + context, + "", + f"Question: {question}", + "", + "Answer:", + ] + return "\n".join(part for part in prompt_parts if part is not None) - # Initialize the conversation state with the system prompt as the first human message - state: State = { - "input": initial_input, - "messages": [HumanMessage(content=system_prompt)] - } - - # Configuration settings for state graph execution - config = {"configurable": {"thread_id": "1"}} - - # Process the conversation state through the compiled graph in streaming mode - for step in self.compiled_graph.stream(state, config, stream_mode="values"): - pass - - # Retrieve the final AI message - final_ai_msg = next((msg for msg in reversed(state["messages"]) if isinstance(msg, AIMessage)), None) - - if final_ai_msg: - # Truncate the response if too long - max_response_length = 1000 # words - response_words = final_ai_msg.content.split() - if len(response_words) > max_response_length: - final_ai_msg.content = ' '.join(response_words[:max_response_length]) + '...' - - # Cache the question and answer - self.question_cache.add_question(initial_input, final_ai_msg.content) - # Store in conversation memory with document name - self.conversation_memory.append({ - "question": initial_input, - "answer": final_ai_msg.content, - "document_name": "CMRPublished", # Default document name - "timestamp": datetime.now().isoformat() - }) - return clean_output(final_ai_msg.content) - - return "No response generated." - - except Exception as e: - if "exceed context window" in str(e): - # Clear caches and try again with a shorter input - self.clear_all_caches() - return "I apologize, but the question was too long. Could you please rephrase it to be more concise?" - else: - # For other errors, clear caches and return error message - self.clear_all_caches() - return f"I encountered an error: {str(e)}. The caches have been cleared. Please try again." - - def _answer_follow_up(self, follow_up_question: str) -> str: + def run(self, initial_input: str, preferred_language: str | None = None) -> str: """ - Answer a follow-up question directly without treating it as a new query. - - Args: - follow_up_question (str): The follow-up question to answer - - Returns: - str: The answer to the follow-up question + Main entry point used by the FastAPI backend (/ask, /transcribe). """ - # Get the previous answer from conversation memory - previous_exchange = next( - (msg for msg in reversed(self.conversation_memory) - if not msg.get("follow_up", False)), # Get the last non-follow-up exchange - None - ) - - if not previous_exchange: - return "I apologize, but I don't have enough context to provide a meaningful answer." - - previous_answer = previous_exchange.get("answer", "") - - # Truncate previous answer to prevent token overflow - max_answer_length = 200 # words - answer_words = previous_answer.split() - - if len(answer_words) > max_answer_length: - previous_answer = ' '.join(answer_words[:max_answer_length]) + '...' - - # Create a prompt to answer the follow-up question - prompt = f"""Answer the following follow-up question based on the previous answer. + question = (initial_input or "").strip() + if not question: + return "I didn't receive a question to answer." -Previous Answer: -{previous_answer} + try: + # 1) Retrieve relevant chunks from the vector store + docs = self.rag.query(question, k=20) -Follow-up Question: -{follow_up_question} + # 2) Build a compact textual context + context = self._build_context(docs) -Guidelines: -1. Keep the answer concise and focused -2. Aim for 2-3 sentences maximum -3. Provide a direct answer to the follow-up question -4. Use the previous answer to support your response -5. Keep the response concise and focused -6. Use natural, conversational language -7. If you can't answer based on the previous context, say so clearly -8. Make sure your answer builds on the previous discussion -9. Avoid repeating information from the previous answer unless relevant to the follow-up + # 3) Build a simple prompt and query the LLM + prompt = self._build_prompt(question, context, preferred_language) + raw_response = self.llm.query(prompt) -Generate a direct answer:""" + return str(raw_response).strip() - # Get the answer from the LLM - response = self.llm.query(prompt) - answer = response.content if isinstance(response, AIMessage) else str(response) - - return clean_output(answer) + except Exception as exc: # noqa: BLE001 + logger.exception("Error in DocumentAgent.run: %s", exc) + return f"I encountered an error while answering your question: {exc}" def engage(self, document_name: str, answer: str) -> str: """ - Generate an engaging follow-up question based on the document context and previous answer. - - Args: - document_name (str): Name of the document being discussed - answer (str): The previous answer to generate a follow-up for - - Returns: - str: A conversational follow-up question - """ - if self.rag_instance is None: - raise ValueError("RAG instance is not initialized. Cannot retrieve document context.") - - # Get document context from cache or retrieve it - if document_name not in self.context_cache: - self.context_cache[document_name] = self.rag_instance.get_document_context(document_name) - - # Extract text content from document list and limit context length - context_docs = self.context_cache[document_name] - context = "\n".join(doc.page_content for doc in context_docs) - - # Truncate context and answer to prevent token overflow - max_context_length = 1200 # Increased from 1000 - max_answer_length = 750 # Increased from 500 - - # Split into words and truncate - context_words = context.split() - answer_words = answer.split() - - if len(context_words) > max_context_length: - context = ' '.join(context_words[:max_context_length]) + '...' - if len(answer_words) > max_answer_length: - answer = ' '.join(answer_words[:max_answer_length]) + '...' - - # Create a more sophisticated prompt for generating engaging follow-ups - prompt = f"""Based on the previous answer and document context, generate a natural, engaging follow-up question. + Generate a single follow-up question based on the last answer. -Previous Answer: -{answer} + This is used by the /engage endpoint to keep the conversation going. + """ + try: + prompt = ( + "You are helping a user explore a NorwAI document.\n" + "Based on the previous answer, suggest ONE natural follow-up question " + "that stays on topic and encourages deeper exploration.\n\n" + f"Document: {document_name}\n" + f"Previous answer: {answer}\n\n" + "Follow-up question:" + ) + raw_response = self.llm.query(prompt) + return str(raw_response).strip() + except Exception as exc: # noqa: BLE001 + logger.exception("Error in DocumentAgent.engage: %s", exc) + return "I couldn't generate a follow-up question right now." -Document Context: -{context} + def trivia_turn( + self, + phase: str, + question: str, + answer: str, + user_answer: str | None = None, + preferred_language: str | None = None, + ) -> str: + """ + Generate localized trivia utterances for the robot. + + phase: + - "ask": Turn the raw trivia question into a natural question in the + user's language (do NOT reveal the answer). + - "feedback": Explain briefly if the user's answer was correct or not, + state the correct answer, and optionally invite another round. + """ + phase = (phase or "").strip().lower() + q = (question or "").strip() + a = (answer or "").strip() + ua = (user_answer or "").strip() + + if not q or not a: + return "I don't have a valid trivia question and answer to use." + + # Reuse the language-hinting logic from _build_prompt + lang_hint = None + if preferred_language: + pl = preferred_language.lower() + if pl.startswith("norw") or pl in {"no", "nb", "nn"}: + lang_hint = ( + "The user is speaking Norwegian. You MUST respond only in Norwegian. " + "If the trivia question and answer are written in another language, " + "translate them into natural Norwegian before speaking. Never respond in English." + ) + elif pl.startswith("eng") or pl in {"en", "en-us", "en-gb"}: + lang_hint = ( + "The user is speaking English. You MUST respond only in English. " + "Most trivia questions and answers may be written in Norwegian; " + "translate all relevant information into natural English before speaking. " + "Never respond in Norwegian." + ) -Guidelines for generating an engaging follow-up: -1. Focus on the most interesting or surprising aspect of the previous answer -2. Ask about implications, consequences, or future developments -3. Use natural, conversational language -4. Make the question specific and focused -5. Consider the document context to ensure relevance -6. Keep the question concise and direct -7. Make it feel like a natural continuation of the conversation -8. Focus on the "why" or "how" rather than just the "what" -9. Make the question thought-provoking but not too complex -10. Use a friendly, curious tone + if phase == "ask": + prompt = ( + f"{lang_hint or ''}\n\n" + "You are running a Norwegian trivia game as a social robot.\n" + "You are given ONE trivia question (originally Norwegian):\n\n" + f"Question: {q}\n" + f"Correct answer: {a}\n\n" + "Task:\n" + " - Produce exactly ONE short, natural-sounding question in the user's language.\n" + " - Do NOT reveal or hint at the correct answer.\n" + " - Do NOT add explanations, commentary, or extra instructions.\n" + "Return only the question sentence you will speak aloud." + ) + elif phase == "feedback": + prompt = ( + f"{lang_hint or ''}\n\n" + "You are running a Norwegian trivia game as a social robot.\n" + "You are given one trivia question, its correct answer, and what the user answered:\n\n" + f"Question: {q}\n" + f"Correct answer: {a}\n" + f"User answer: {ua}\n\n" + "Task:\n" + " - In the user's language, say briefly whether they were correct or not (speak directly to 'you/your', never say 'the user').\n" + " - Clearly state the correct answer.\n" + " - Be kind and lenient; if their answer is close, acknowledge that politely.\n" + " - Optionally add ONE short follow-up sentence inviting them to try another question.\n" + "Keep the total output to at most two short sentences." + ) + else: + return "I didn't recognise this trivia phase." -Generate a single, engaging follow-up question:""" - try: - # Get the follow-up question from the LLM - response = self.llm.query(prompt) - follow_up = response.content if isinstance(response, AIMessage) else str(response) - - # Clean up the response to make it more conversational - follow_up = re.sub(r'\d+\)\s*', '', follow_up) # Remove numbered questions - follow_up = re.sub(r'feel free to ask me follow up questions like:', '', follow_up) - follow_up = re.sub(r'questions like:|questions such as:|like:|such as:|for example:|including:', '', follow_up) - follow_up = re.sub(r'etc\.|etc|\.\.\.|\.\.', '', follow_up) - follow_up = re.sub(r'\s+', ' ', follow_up).strip() - - # Add a conversational prefix based on content and personality - curiosity_level = self.personality_traits["curiosity"] - empathy_level = self.personality_traits["empathy"] - - # More natural and varied prefixes based on content - if any(word in follow_up.lower() for word in ['interesting', 'fascinating', 'surprising']): - prefixes = [ - "That's fascinating!", - "I find that really interesting!", - "That caught my attention!", - "That's quite intriguing!" - ] - follow_up = f"{random.choice(prefixes)} {follow_up}?" - elif any(word in follow_up.lower() for word in ['implication', 'consequence', 'impact']): - prefixes = [ - "I'm curious about the implications -", - "That raises an interesting question -", - "This makes me wonder -", - "That leads me to think -" - ] - follow_up = f"{random.choice(prefixes)} {follow_up}?" - elif any(word in follow_up.lower() for word in ['future', 'develop', 'next']): - prefixes = [ - "Looking ahead,", - "Moving forward,", - "In the future,", - "Going forward," - ] - follow_up = f"{random.choice(prefixes)} {follow_up}?" - else: - prefixes = [ - "I'm curious,", - "I'd love to know,", - "That makes me wonder,", - "I'm interested in," - ] - follow_up = f"{random.choice(prefixes)} {follow_up}?" - - # Store the follow-up question in conversation memory - self.conversation_memory.append({ - "question": follow_up, - "follow_up": True, - "timestamp": datetime.now().isoformat() - }) - - return follow_up - - except Exception as e: - if "exceed context window" in str(e): - # Clear caches and return a simpler follow-up - self.clear_all_caches() - return "What would you like to know more about?" - else: - # For other errors, clear caches and return a fallback - self.clear_all_caches() - return "Would you like to explore another aspect of this topic?" + raw_response = self.llm.query(prompt) + return str(raw_response).strip() + except Exception as exc: # noqa: BLE001 + logger.exception("Error in DocumentAgent.trivia_turn: %s", exc) + return "I couldn't generate the trivia line right now." - def answer_followup_node(self, state: State) -> dict: - """ - Handle follow-up questions by answering based on previous conversation context. - - Args: - state (State): Current conversation state - - Returns: - dict: Updated state with the follow-up answer - """ - messages = state["messages"] - input_text = state["input"] - - response = self._answer_follow_up(input_text) - messages.append(AIMessage(content=response)) - - return {"messages": messages} - - def _determine_next_node(self, state: State) -> str: - """ - Determine whether to route to retrieval or answer_followup based on the input. - Uses LLM to make a more nuanced decision about whether the input is a follow-up question. - - Args: - state (State): Current conversation state - - Returns: - str: Either "retrieval" or "answer_followup" based on the analysis + def clear_all_caches(self) -> None: """ - # Get the user's input - user_input = state.get("input", "").lower() - - # Check for document name mentions - if self.rag_instance is not None and any(doc.lower() in user_input for doc in self.rag_instance.get_list_docs()): - # Clear conversation memory when switching documents - self.conversation_memory = [] - return "retrieval" - - # If there's no conversation history, it's not a follow-up - if not self.conversation_memory: - return "retrieval" - - # Get the last non-follow-up exchange for context - last_exchange = next( - (msg for msg in reversed(self.conversation_memory) - if not msg.get("follow_up", False)), - None - ) - - if not last_exchange: - return "retrieval" - - # Create a prompt for the LLM to analyze if this is a follow-up question - prompt = f"""Analyze if the following user input is a follow-up question to the previous conversation. -Consider the context and determine if the user is asking for clarification or additional information about the previous answer. - -Previous Answer: -{last_exchange.get('answer', '')} - -Current User Input: -{user_input} - -Guidelines for determining if it's a follow-up: -1. Is the user asking for clarification about something mentioned in the previous answer? -2. Is the user asking for more details about a specific point from the previous answer? -3. Is the user using phrases like "I don't know", "you tell me", or "tell me"? -4. Is the user asking about a specific aspect mentioned in the previous answer? -5. Is the question directly related to the previous discussion? -6. Is the user asking about a different document or topic? -7. Is the user asking for a summary or overview of the document? -8. Is the user asking for key points or main topics? - -Respond with only one word: "followup" if it's a follow-up question, or "retrieval" if it's a new question.""" + Compatibility stub. - # Get the LLM's decision - response = self.llm.query(prompt) - decision = response.content.strip().lower() - - # If it's a request for key points or summary, clear memory and do retrieval - if any(phrase in user_input.lower() for phrase in ["key points", "main points", "summary", "overview", "talking points"]): - self.conversation_memory = [] - return "retrieval" - - return "answer_followup" if decision == "followup" else "retrieval" - - def _generate_clarification(self, state: State) -> str: - """ - Generate a clarification message when uncertainty is detected in the content. - - Args: - state (State): Current conversation state containing messages and context - - Returns: - str: A clarification message to help resolve uncertainty + The simplified agent does not maintain long-lived semantic caches, + but this method is kept so existing tooling or docs calling it + will not break. """ - # Get the retrieval message to analyze the content - retrieval_msg = next( - (msg for msg in state["messages"] - if isinstance(msg, ToolMessage) and msg.name == "document_retriever"), - None - ) - - if not retrieval_msg: - return "I apologize, but I'm having trouble understanding the context. Could you please rephrase your question?" - - # Extract the content and user's question - content = retrieval_msg.content.replace("Retrieved context:\n", "").strip() - user_question = state.get("input", "") - - # Create a prompt for the LLM to identify the specific areas of uncertainty - prompt = f"""Analyze the following content and question to identify areas of uncertainty and generate a helpful clarification request. - -Content: -{content} - -User Question: -{user_question} + logger.info("DocumentAgent.clear_all_caches called; no caches to clear in simplified agent.") -Guidelines for generating clarification: -1. Identify specific parts of the content that are unclear or ambiguous -2. Point out any missing or incomplete information -3. Ask for clarification in a friendly, conversational tone -4. Focus on the most important aspects that need clarification -5. Keep the clarification request concise and specific -6. Use natural language that a human would use -7. Avoid technical jargon unless necessary -8. Make sure the clarification request is directly related to the user's question -9. If multiple aspects need clarification, prioritize the most important ones -10. End with an open-ended question to encourage user engagement -Generate a natural clarification request:""" - - # Get the clarification from the LLM - response = self.llm.query(prompt) - clarification = response.content if isinstance(response, AIMessage) else str(response) - - # Clean up the response - clarification = clean_output(clarification) - - # Add a friendly prefix if not already present - if not any(clarification.lower().startswith(phrase) for phrase in [ - "i'm not sure", "i'm unclear", "could you clarify", - "i need more information", "i'm having trouble understanding" - ]): - clarification = f"I'm not entirely sure about this, but {clarification}" - - return clarification - -if __name__ == "__main__": - # Clear cache if requested - if "--clear-cache" in sys.argv: - print("Clearing cache...") - if os.path.exists(config["HF_HOME"]): - shutil.rmtree(config["HF_HOME"]) - os.makedirs(config["HF_HOME"]) - print("Cache cleared.") - - # Instantiate the DocumentAgent. - agent = DocumentAgent() - print("Chat with the DocumentAgent. Type 'exit' or 'quit' to stop.") - print("GPU Status:") - print_gpu_status() - - # Run a loop to continuously accept user input. - while True: - # Read user input. - user_input = input("You: ") - # Allow the user to exit the loop. - if user_input.lower() in {"exit", "quit"}: - print("Exiting chat.") - break - - try: - # Run the agent with the provided input. - print("\nProcessing your query...") - response = agent.run(user_input) - print("\nAgent:", response) - - # Test the engage functionality - print("\nGenerating follow-up...") - follow_up = agent.engage("CMRPublished", response) - print("Agent Follow-up:", follow_up) - - print("\n" + "="*50 + "\n") - except Exception as e: - print(f"\nError occurred: {str(e)}") - print("Please try again or type 'exit' to quit.\n") diff --git a/my_furhat_backend/agents/test_2_conversational_agent.py b/my_furhat_backend/agents/test_2_conversational_agent.py index b4a50a0..a6b1e03 100755 --- a/my_furhat_backend/agents/test_2_conversational_agent.py +++ b/my_furhat_backend/agents/test_2_conversational_agent.py @@ -1,3 +1,10 @@ +""" +Secondary LangGraph conversational agent scaffold (not used in production). + +Design: reference/example showing router + RAG + grading flows. Uses legacy +paths and prompts; kept for experimentation only. +""" + import os import time import logging diff --git a/my_furhat_backend/agents/test_conversational_agent.py b/my_furhat_backend/agents/test_conversational_agent.py index 3c06671..ea1c2b6 100755 --- a/my_furhat_backend/agents/test_conversational_agent.py +++ b/my_furhat_backend/agents/test_conversational_agent.py @@ -1,3 +1,11 @@ +""" +Example conversational agent using LangGraph (not used in production). + +Design: kept as a reference scaffold to show a LangGraph flow with RAG and +tool-style context injections. Uses a legacy RAG signature and static model +path; update or remove for production. +""" + import os import logging import uuid diff --git a/my_furhat_backend/config/settings.py b/my_furhat_backend/config/settings.py index 4478417..e76282e 100755 --- a/my_furhat_backend/config/settings.py +++ b/my_furhat_backend/config/settings.py @@ -1,3 +1,13 @@ +""" +Configuration loader for the backend. + +Design choices: +- Merge .env (dotenv_values) with OS env vars, preferring OS env overrides. +- Default all caches/models/docs into a writable .cache under the project root + (macOS /mnt is read-only). +- Push computed defaults into os.environ so downstream code can rely on env vars. +""" + import os from dotenv import load_dotenv, dotenv_values from pathlib import Path @@ -20,6 +30,55 @@ "DOCUMENTS_PATH": os.getenv("DOCUMENTS_PATH", str(CACHE_DIR / "documents")), "MODEL_PATH": os.getenv("MODEL_PATH", str(CACHE_DIR / "models")), "GGUF_MODELS_PATH": os.getenv("GGUF_MODELS_PATH", str(CACHE_DIR / "models/gguf")), + "OLLAMA_BASE_URL": os.getenv("OLLAMA_BASE_URL", "http://localhost:11434"), + "OLLAMA_MODEL": os.getenv("OLLAMA_MODEL", "llama3.2:latest"), + "OLLAMA_SYSTEM_PROMPT": os.getenv( + "OLLAMA_SYSTEM_PROMPT", + """ +You are Kaia, a warm, friendly, and highly personable social robot. +Your job is to run gentle, engaging Q&A conversations using the provided +document context and RAG results. + +Your personality and behaviour rules: + +1. BE PERSONABLE & NATURAL + - Speak like a polite human tutor, not a formal encyclopedia. + - Use light empathy, encouragement, and conversational warmth. + - Keep responses concise unless the user asks for detail. + +2. BE LENIENT & POSITIVE + - Treat partially correct answers as "on the right track." + - If the user is incorrect, correct them gently and kindly. + - Never shame the user; always encourage continued conversation. + +3. BE INTERACTIVE + - Ask follow-up questions naturally. + - Offer hints instead of hard corrections when appropriate. + - Keep the conversation fun, supportive, and curiosity-driven. + +4. MILD HUMOUR ALLOWED + - You may be lightly playful or humorous, but never sarcastic or rude. + +5. STRICT LANGUAGE HANDLING + - ALWAYS answer in the language the user is using. + - If the context is in a different language, translate it naturally. + - Never switch languages unless explicitly asked. + +6. USE DOCUMENT CONTEXT INTELLIGENTLY + - Combine the retrieved context with your general reasoning. + - If context is missing, answer with your best safe guess and say so. + - Never invent “facts from the document” that do not exist. + +7. ANSWER FORMAT + - Start answers warmly (“Good question!”, “Nice thought!”, etc.). + - Keep tone friendly, supportive, and slightly conversational. + - Encourage the user to continue the dialogue. + +Your overall goal: +Create a friendly, smart, polite, and flexible Q&A tutor experience +where users feel comfortable exploring ideas freely. +""".strip(), + ), "CUDA_VISIBLE_DEVICES": os.getenv("CUDA_VISIBLE_DEVICES", "0"), "PYTORCH_CUDA_ALLOC_CONF": os.getenv("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:512"), # NEW: database URL for user-recognition + memory diff --git a/my_furhat_backend/llm_tools/tools.py b/my_furhat_backend/llm_tools/tools.py index 045db50..9e1adaf 100755 --- a/my_furhat_backend/llm_tools/tools.py +++ b/my_furhat_backend/llm_tools/tools.py @@ -1,3 +1,13 @@ +""" +Tool wrappers exposed to LLMs via LangChain's @tool decorator. + +Design choices: +- Keep schemas minimal with Pydantic for validation and tool-call clarity. +- Use thin wrappers around existing API clients (Foursquare, OSM/Nominatim, Overpass) + to avoid duplicating request logic here. +- Return plain dict/list payloads suitable for direct LLM consumption. +""" + from langchain_core.tools import tool from pydantic import BaseModel, Field from my_furhat_backend.api_clients.foursquare_client import FoursquareClient @@ -16,8 +26,8 @@ def foursquare_tool(lat: float, lon: float, query: str, tool_call_id: str = None """ Search for venues using the Foursquare API. - This tool uses the FoursquareClient to search for places near the specified coordinates - that match the given search query. + Thin wrapper over FoursquareClient.search_places; keeps the tool signature + LLM-friendly and returns only the results list. Parameters: lat (float): Latitude coordinate. @@ -49,14 +59,8 @@ def osm_tool(lat: float, lon: float, query: str, tool_call_id: str = None) -> li """ Search for Points of Interest (POIs) using OpenStreetMap's Nominatim service. - This tool provides geospatial search functionality by: - 1. Converting coordinates to a location string - 2. Querying the Nominatim service for nearby POIs - 3. Filtering results based on the search query - 4. Returning formatted POI information - - The tool uses rate-limited API calls to comply with Nominatim's - usage policies and includes error handling for API failures. + Wrapper over OSMClient.search_pois (Nominatim). Rate limits and error handling + are inside the client; this function stays declarative for LLM tools. Parameters: lat (float): Latitude coordinate in decimal degrees (-90 to 90) @@ -92,7 +96,7 @@ def overpass_tool(lat: float, lon: float, query: str, tool_call_id: str = None) """ Search for Points of Interest (POIs) using the Overpass API. - This tool uses the OverpassClient to query for POIs around the specified location that match the given query. + Wrapper over OverpassClient.search_pois for Overpass-based POI search. Parameters: lat (float): Latitude coordinate. diff --git a/my_furhat_backend/memory/summarizer.py b/my_furhat_backend/memory/summarizer.py index 3cc2872..2d6afb8 100644 --- a/my_furhat_backend/memory/summarizer.py +++ b/my_furhat_backend/memory/summarizer.py @@ -6,7 +6,10 @@ def _naive_summary(turns): """ Very simple summary: first + last user utterances trimmed. - Replace with a proper LLM call later. + + Design choice: keep it cheap and dependency-free. This is a placeholder + until an LLM-based summarizer is plugged in; avoids extra calls when not + needed but keeps conversation summaries non-empty. """ if not turns: return "" @@ -20,6 +23,12 @@ def _naive_summary(turns): def update_summary_for_conversation(db: Session, conversation: Conversation): + """ + Compute and persist a conversation summary using the naive summarizer. + + Rationale: touch only the last N turns (limit=50) to keep it inexpensive + on each call; swap _naive_summary with an LLM call when desired. + """ turns = crud.get_recent_turns(db, conversation, limit=50) summary = _naive_summary(turns) crud.update_conversation_summary(db, conversation, summary) \ No newline at end of file diff --git a/my_furhat_backend/models/chatbot_factory.py b/my_furhat_backend/models/chatbot_factory.py index f227bdb..58a2d0e 100755 --- a/my_furhat_backend/models/chatbot_factory.py +++ b/my_furhat_backend/models/chatbot_factory.py @@ -1,15 +1,10 @@ """ Chatbot Factory Module -This module provides a factory pattern implementation for creating different types of chatbots. -It supports both HuggingFace and LlamaCpp-based chatbots, with a common interface for -conversation handling and response generation. - -Key Components: - - BaseChatbot: Abstract base class defining the chatbot interface - - Chatbot_HuggingFace: Implementation using HuggingFace models - - Chatbot_LlamaCpp: Implementation using LlamaCpp models - - create_chatbot: Factory function for instantiating chatbots +Design choices: +- Common chatbot interface over multiple backends (HF, LlamaCpp, Ollama). +- Keep formatting/cleaning helpers centralized (util.py) to reduce duplication. +- Prefer Ollama when heavy deps (torch/transformers) are missing. """ from abc import ABC, abstractmethod @@ -34,15 +29,8 @@ class BaseChatbot(ABC): def chatbot(self, state: dict) -> dict: """ Process the conversation state and return an updated state. - - Args: - state (dict): Current conversation state containing messages - - Returns: - dict: Updated conversation state with new AI response - - Raises: - ValueError: If no messages are found in the state + + Contract: implementations mutate/append to state["messages"] and return state. """ pass @@ -70,16 +58,7 @@ def __init__(self, model_instance=None, model_id: str = "HuggingFaceTB/SmolLM2-1 def chatbot(self, state: dict) -> dict: """ - Process conversation state using HuggingFace model. - - Args: - state (dict): Current conversation state - - Returns: - dict: Updated state with new AI response - - Raises: - ValueError: If no messages are found in the state + Process conversation state using a HF model; format as ChatML for simplicity. """ messages = state.get("messages", []) if not messages: @@ -126,16 +105,7 @@ def __init__(self, model_instance=None, model_id: str = "my_furhat_backend/ggufs def chatbot(self, state: dict) -> dict: """ - Process conversation state using LlamaCpp model. - - Args: - state (dict): Current conversation state - - Returns: - dict: Updated state with new AI response - - Raises: - ValueError: If no messages are found in the state + Process conversation state using LlamaCpp; uses structured prompt/response parsing. """ messages = state.get("messages", []) if not messages: @@ -166,7 +136,7 @@ class Chatbot_Ollama(BaseChatbot): def __init__( self, model_instance=None, - model: str = "llama3.1:instruct", + model: str = "llama3.2:latest", base_url: str = "http://localhost:11434", **kwargs ): @@ -181,8 +151,8 @@ def set_language(self, lang: str | None): def _infer_language(self, text: str) -> str | None: """ - Super-light heuristic: detect Norwegian vs English vs fallback. - Swap to a real detector if you like. + Super-light heuristic: detect Norwegian vs English vs fallback; kept + minimal to avoid extra deps. Override or enhance as needed. """ if not text: return None diff --git a/my_furhat_backend/models/classifier.py b/my_furhat_backend/models/classifier.py index bbe3452..98bc0e1 100755 --- a/my_furhat_backend/models/classifier.py +++ b/my_furhat_backend/models/classifier.py @@ -1,45 +1,65 @@ -from transformers import pipeline +try: + from transformers import pipeline # type: ignore[import] +except Exception as e: # noqa: BLE001 + print( + f"[classifier] Transformers pipeline unavailable, falling back to " + f"naive string-similarity classifier: {e}" + ) + pipeline = None # type: ignore[assignment] + class TextClassifier: """ - A simple text classifier using a zero-shot classification pipeline. + Text classifier for ranking documents against a query. - This class leverages Hugging Face's transformers library to perform zero-shot - classification, which allows classification without task-specific training data. + Design: + - Preferred: Hugging Face zero-shot classification (if transformers+torch available). + - Fallback: simple lexical overlap scoring when HF stack is not available, to avoid hard + dependency on GPU/libtorch for small doc sets. """ - def __init__(self, model_id="facebook/bart-large-mnli"): + def __init__(self, model_id: str = "facebook/bart-large-mnli"): + """ + Initialize the TextClassifier. + + If transformers.pipeline is available, create a zero-shot classification + pipeline. Otherwise, use a lightweight lexical heuristic to keep running + without the HF stack. """ - Initialize the TextClassifier with a specified model. + if pipeline is None: + self.classifier = None + else: + self.classifier = pipeline("zero-shot-classification", model=model_id) - Parameters: - model_id (str): The Hugging Face model identifier to use for zero-shot classification. - Default is "facebook/bart-large-mnli". + def _lexical_score(self, text: str, label: str) -> float: """ - # Create a zero-shot classification pipeline using the specified model. - self.classifier = pipeline("zero-shot-classification", model=model_id) - - def classify(self, text: str, labels: list) -> dict: + Very simple similarity: proportion of label tokens that appear in the text. """ - Classify the given text into one or more labels using zero-shot classification. + text_l = text.lower() + label_tokens = [tok for tok in label.lower().split() if tok] + if not label_tokens: + return 0.0 + matches = sum(1 for tok in label_tokens if tok in text_l) + return matches / len(label_tokens) - Parameters: - text (str): The text to classify. - labels (list): A list of candidate labels for classification. + def classify(self, text: str, labels: list[str]) -> dict: + """ + Rank labels for a given text. Returns: - dict: A dictionary mapping each label to its corresponding score, sorted in descending order. + dict: {label: score}, sorted by score descending. """ - # Perform zero-shot classification on the input text with the given labels, - # allowing multiple labels to be assigned (multi_label=True). - scores = self.classifier(text, labels, multi_label=True) - - # Create a dictionary mapping labels to their scores. - scores_with_labels = dict(zip(scores["labels"], scores["scores"])) - + if not labels: + return {} + + # HF-based zero-shot classification if available + if self.classifier is not None: + scores = self.classifier(text, labels, multi_label=True) + scores_with_labels = dict(zip(scores["labels"], scores["scores"])) + else: + # Fallback: lexical overlap + scores_with_labels = {label: self._lexical_score(text, label) for label in labels} + # Sort the dictionary by scores in descending order. - scores_with_labels_descending = dict( - sorted(scores_with_labels.items(), key=lambda item: item[1], reverse=True) - ) - - return scores_with_labels_descending + sorted_items = sorted(scores_with_labels.items(), key=lambda item: item[1], reverse=True) + return dict(sorted_items) diff --git a/my_furhat_backend/models/llm_factory.py b/my_furhat_backend/models/llm_factory.py index 97af264..81bce86 100755 --- a/my_furhat_backend/models/llm_factory.py +++ b/my_furhat_backend/models/llm_factory.py @@ -1,28 +1,45 @@ """ Language Model Factory Module -This module provides a factory pattern implementation for creating and managing different types of language models. -It supports both HuggingFace and LlamaCpp models with GPU optimization and monitoring capabilities. - -Classes: - BaseLLM: Abstract base class defining the interface for all LLM implementations. - HuggingFaceLLM: Implementation using HuggingFace's API and models. - LlamaCcpLLM: Implementation using LlamaCpp for local model inference. - -Functions: - create_llm: Factory function to create instances of different LLM types. +Design choices: +- Provide a unified interface (BaseLLM) for multiple backends. +- Prefer minimal deps where possible; Ollama can run without torch/transformers. +- HuggingFace/LlamaCpp paths are guarded behind availability checks to avoid hard + failures when GPU/libtorch is missing. """ from abc import ABC, abstractmethod import multiprocessing +import os +import requests +import shutil +import subprocess +import time + +try: + import torch # type: ignore[import] +except Exception as e: # noqa: BLE001 + # Torch is only required for local HuggingFace / Llama models. + # Ollama-based flows can run without it. + print(f"[llm_factory] Torch unavailable, GPU-backed HF/llama models disabled: {e}") + torch = None # type: ignore[assignment] + from langchain_community.chat_models import ChatLlamaCpp from langchain_community.chat_models import ChatOllama + +try: + from transformers import pipeline # type: ignore[import] +except Exception as e: # noqa: BLE001 + print(f"[llm_factory] Transformers pipeline unavailable, HF models disabled: {e}") + pipeline = None # type: ignore[assignment] + from my_furhat_backend.config.settings import config -from my_furhat_backend.utils.gpu_utils import setup_gpu, move_model_to_device, print_gpu_status, clear_gpu_cache -from transformers import pipeline -import torch -import os -import requests +from my_furhat_backend.utils.gpu_utils import ( + setup_gpu, + move_model_to_device, + print_gpu_status, + clear_gpu_cache, +) class BaseLLM(ABC): """Abstract base class for all LLM implementations.""" @@ -31,24 +48,17 @@ class BaseLLM(ABC): def query(self, text: str, tool: bool = False) -> str: """ Process a query with the language model. - - Args: - text (str): The input text or prompt to be processed. - tool (bool): If True, invoke the model with pre-bound tools. - - Returns: - str: The generated response from the language model. + + tool flag is for implementations that support tool-calling; default + implementations may ignore it. Kept simple to avoid overfitting to any + specific provider API. """ pass @abstractmethod def bind_tools(self, tools: list, tool_schema: dict | str = None) -> None: """ - Bind external tools to the language model for extended functionality. - - Args: - tools (list): A list of tools to be bound to the language model. - tool_schema (dict | str, optional): The schema or configuration for the tools. + Optional tool-binding hook; no-op for backends that don't support it. """ pass @@ -63,28 +73,49 @@ class HuggingFaceLLM(BaseLLM): def __init__(self, model_id: str, task: str = "text-generation", **kwargs): """ Initialize the HuggingFace LLM with optimized settings. - - Args: - model_id (str): The model identifier from HuggingFace - task (str): The task type (default: "text-generation") - **kwargs: Additional arguments for model configuration + + Design: prefer running local HF pipelines only when torch+transformers + are present; otherwise caller should select Ollama. """ self.model_id = model_id self.task = task + # We require both torch and transformers.pipeline to be available + if torch is None or pipeline is None: + raise RuntimeError( + "Torch/Transformers pipeline is not available; HuggingFaceLLM cannot be " + "initialized. Use an Ollama-backed model instead or install a compatible " + "PyTorch/Transformers build." + ) + self.device_info = setup_gpu() - - # Optimize model loading - self.model_kwargs = { - "device_map": "auto", # Automatically handle device placement - "torch_dtype": torch.float16, # Use half precision - "low_cpu_mem_usage": True, # Optimize CPU memory usage - "load_in_8bit": True, # Use 8-bit quantization - "max_memory": {0: "16GB"} if self.device_info["cuda_available"] else None - } - - # Update with any additional kwargs - self.model_kwargs.update(kwargs) - + + # Generation defaults (can be overridden by kwargs) + self.max_length = kwargs.pop("max_length", 1024) + self.max_new_tokens = kwargs.pop("max_new_tokens", 256) + self.temperature = kwargs.pop("temperature", 0.7) + self.top_p = kwargs.pop("top_p", 0.9) + self.do_sample = kwargs.pop("do_sample", True) + self.extra_generation_kwargs = {} + for key in ("min_length", "no_repeat_ngram_size", "repetition_penalty"): + if key in kwargs: + self.extra_generation_kwargs[key] = kwargs.pop(key) + + # torch-related model kwargs + dtype = torch.float16 if self.device_info["cuda_available"] else torch.float32 + self.model_kwargs = {"torch_dtype": dtype} + + # Choose device for the pipeline: CUDA -> 0, CPU -> -1, fallback to MPS string if available + if self.device_info["cuda_available"]: + self.pipeline_device = 0 + elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): + self.pipeline_device = "mps" + else: + self.pipeline_device = -1 + + # Allow explicit overrides via model_kwargs argument + extra_model_kwargs = kwargs.pop("model_kwargs", {}) + self.model_kwargs.update(extra_model_kwargs) + # Create the pipeline with optimized settings self.__create_pipeline() @@ -93,7 +124,7 @@ def __del__(self): clear_gpu_cache() def __create_pipeline(self): - """Create the HuggingFace pipeline with optimized settings.""" + """Create the HuggingFace pipeline with optimized settings; fallback to CPU on failure.""" try: # Clear GPU cache before loading clear_gpu_cache() @@ -102,7 +133,8 @@ def __create_pipeline(self): self.pipeline = pipeline( task=self.task, model=self.model_id, - **self.model_kwargs + device=self.pipeline_device, + model_kwargs=self.model_kwargs ) # Move model to GPU if available @@ -114,11 +146,11 @@ def __create_pipeline(self): except Exception as e: print(f"Error creating pipeline: {e}") # Fallback to CPU if GPU fails - self.model_kwargs["device_map"] = "cpu" self.pipeline = pipeline( task=self.task, model=self.model_id, - **self.model_kwargs + device=-1, + model_kwargs={"torch_dtype": torch.float32} ) def __truncate_input(self, prompt: str) -> str: @@ -136,7 +168,12 @@ def __truncate_input(self, prompt: str) -> str: if not tokenizer: return prompt - tokens = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=self.model_kwargs["max_length"]) + tokens = tokenizer( + prompt, + return_tensors="pt", + truncation=True, + max_length=self.max_length, + ) return tokenizer.decode(tokens["input_ids"][0]) except Exception as e: print(f"Error truncating input: {e}") @@ -145,12 +182,8 @@ def __truncate_input(self, prompt: str) -> str: def query(self, prompt: str) -> str: """ Process a query with optimized inference parameters. - - Args: - prompt (str): The input text or prompt to be processed. - - Returns: - str: The generated response from the language model. + + Keeps generation config minimal and truncates inputs to avoid OOM. """ try: # Truncate input to prevent OOM errors @@ -159,26 +192,33 @@ def query(self, prompt: str) -> str: if self.pipeline: # Optimize generation parameters generation_config = { - "max_new_tokens": 256, # Reduced from 512 - "temperature": 0.7, - "top_p": 0.9, - "do_sample": True, - "num_beams": 1, # Use greedy decoding for speed - "pad_token_id": self.pipeline.tokenizer.eos_token_id, - "use_cache": True, # Enable KV cache - "return_dict_in_generate": True + "max_new_tokens": self.max_new_tokens, + "temperature": self.temperature, + "top_p": self.top_p, + "do_sample": self.do_sample, + "num_beams": 1, + "pad_token_id": getattr( + self.pipeline.tokenizer, "eos_token_id", None + ), + "use_cache": True, + "return_dict_in_generate": True, } + generation_config.update(self.extra_generation_kwargs) # Generate response with optimized parameters - result = self.pipeline( - truncated_prompt, - **generation_config - ) + result = self.pipeline(truncated_prompt, **generation_config) if isinstance(result, list) and len(result) > 0: - if isinstance(result[0], dict): - return result[0]["generated_text"] - return result[0] + entry = result[0] + if isinstance(entry, dict): + if "generated_text" in entry: + return entry["generated_text"] + if "summary_text" in entry: + return entry["summary_text"] + # Some pipelines wrap text differently + if "text" in entry: + return entry["text"] + return entry return str(result) else: # Fallback to API if pipeline fails @@ -188,12 +228,13 @@ def query(self, prompt: str) -> str: json={ "inputs": truncated_prompt, "parameters": { - "max_new_tokens": 256, - "temperature": 0.7, - "top_p": 0.9, - "do_sample": True - } - } + "max_new_tokens": self.max_new_tokens, + "temperature": self.temperature, + "top_p": self.top_p, + "do_sample": self.do_sample, + **self.extra_generation_kwargs, + }, + }, ) response.raise_for_status() return response.json()[0]["generated_text"] @@ -337,18 +378,20 @@ class OllamaLLM(BaseLLM): def __init__( self, - model: str = "llama3.1:instruct", + model: str = "llama3.2:latest", base_url: str = "http://localhost:11434", + system_prompt: str | None = None, **kwargs ): """ Args: - model: Ollama model name/tag (e.g., 'llama3.1:instruct', 'mixtral:8x7b-instruct', 'qwen2.5:14b-instruct'). + model: Ollama model name/tag (e.g., 'llama3.2:latest', 'mixtral:8x7b-instruct', 'qwen2.5:14b-instruct'). base_url: Ollama server URL. **kwargs: Generation/runtime options (temperature, top_p, num_ctx, num_gpu, repeat_penalty, etc.). """ self.model = model self.base_url = base_url + self.system_prompt = system_prompt self.gen_kwargs = { # Reasonable multilingual/chat defaults; override via **kwargs "temperature": 0.8, @@ -358,21 +401,105 @@ def __init__( } self.gen_kwargs.update(kwargs) - # Eager check that Ollama server is reachable (optional but helpful) + self.chat_llm = None + self.fallback_llm = None + + if not self._initialize_chat_llm(): + print( + "[OllamaLLM] Ollama server unavailable. Falling back to HuggingFace model." + ) + fallback_model = os.getenv( + "OLLAMA_FALLBACK_MODEL", "HuggingFaceTB/SmolLM2-1.7B-Instruct" + ) + try: + self.fallback_llm = HuggingFaceLLM(model_id=fallback_model) + except Exception as e: + print(f"[OllamaLLM] Failed to initialize fallback HuggingFace model: {e}") + + _boot_attempted = False + _model_pull_attempted = False + + def _initialize_chat_llm(self) -> bool: + if not self._ensure_ollama_server(): + return False + self._pull_model() + try: + self.chat_llm = ChatOllama( + model=self.model, + base_url=self.base_url, + system=self.system_prompt, + options=self.gen_kwargs, + ) + return True + except Exception as e: + print(f"[OllamaLLM] Failed to initialize ChatOllama: {e}") + self.chat_llm = None + return False + + def _ping_server(self, timeout: float = 2.0) -> bool: try: - r = requests.get(f"{self.base_url}/api/tags", timeout=2) + r = requests.get(f"{self.base_url}/api/tags", timeout=timeout) r.raise_for_status() + return True + except Exception: + return False + + def _start_ollama_process(self) -> bool: + ollama_binary = shutil.which("ollama") + if not ollama_binary: + return False + + try: + subprocess.Popen( + [ollama_binary, "serve"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + start_new_session=True, + ) + # give the server a moment to start + for _ in range(5): + time.sleep(1) + if self._ping_server(timeout=1.0): + return True except Exception as e: - print(f"[OllamaLLM] Warning: Could not reach Ollama at {self.base_url}: {e}") - - # LangChain wrapper; keeps your interface consistent with ChatLlamaCpp - # ChatOllama accepts model/base_url and a dict of 'options' for runtime - self.chat_llm = ChatOllama( - model=self.model, - base_url=self.base_url, - # map gen kwargs into options LangChain forwards to Ollama - options=self.gen_kwargs - ) + print(f"[OllamaLLM] Failed to start Ollama process: {e}") + return False + + def _ensure_ollama_server(self) -> bool: + if self._ping_server(): + return True + + if not OllamaLLM._boot_attempted: + OllamaLLM._boot_attempted = True + if self._start_ollama_process(): + return True + + return self._ping_server(timeout=3.0) + + def _pull_model(self) -> bool: + if OllamaLLM._model_pull_attempted: + return False + + ollama_binary = shutil.which("ollama") + if not ollama_binary: + print("[OllamaLLM] Cannot pull model because the 'ollama' binary is not in PATH.") + OllamaLLM._model_pull_attempted = True + return False + + print(f"[OllamaLLM] Attempting to pull model '{self.model}' via 'ollama pull'.") + try: + subprocess.run( + [ollama_binary, "pull", self.model], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + OllamaLLM._model_pull_attempted = True + return True + except Exception as e: + print(f"[OllamaLLM] Failed to pull model '{self.model}': {e}") + OllamaLLM._model_pull_attempted = True + return False def __del__(self): try: @@ -394,18 +521,51 @@ def query(self, text: str, tool: bool = False) -> str: """ Send a prompt to the Ollama model. If you've bound tools, LangChain will handle tool calling. """ + if self.chat_llm is not None: + try: + print_gpu_status() + out = self.chat_llm.invoke(text) + print_gpu_status() + return getattr(out, "content", str(out)) + except Exception as e: + print(f"[OllamaLLM] query error: {e}") + # Mark Ollama connection unusable so we can transparently + # fall back to HuggingFace on subsequent calls. + if "status code 404" in str(e) and self._pull_model(): + if self._initialize_chat_llm(): + try: + out = self.chat_llm.invoke(text) + return getattr(out, "content", str(out)) + except Exception as second_e: + print(f"[OllamaLLM] query error after pulling model: {second_e}") + self.chat_llm = None + if self.fallback_llm is None: + fallback_model = os.getenv( + "OLLAMA_FALLBACK_MODEL", "HuggingFaceTB/SmolLM2-1.7B-Instruct" + ) + try: + self.fallback_llm = HuggingFaceLLM(model_id=fallback_model) + except Exception as err: + print( + f"[OllamaLLM] Failed to initialize fallback HuggingFace model: {err}" + ) + + fallback_response = self._invoke_fallback(text, tool) + if fallback_response is not None: + return fallback_response + + return ( + "Ollama backend is unavailable and no fallback model could be initialized." + ) + + def _invoke_fallback(self, text: str, tool: bool) -> str | None: + if self.fallback_llm is None: + return None + try: - print_gpu_status() - # For Chat models, we can use .invoke with a simple human message - # If you prefer plain text, LangChain accepts string directly. - out = self.chat_llm.invoke(text) - print_gpu_status() - # ChatOllama returns a BaseMessage or string depending on version; - # extract text robustly: - return getattr(out, "content", str(out)) - except Exception as e: - print(f"[OllamaLLM] query error: {e}") - return "" + return self.fallback_llm.query(text, tool=tool) + except TypeError: + return self.fallback_llm.query(text) def create_llm(llm_type: str, **kwargs) -> BaseLLM: """ diff --git a/my_furhat_backend/perception/face.py b/my_furhat_backend/perception/face.py index b8ceb87..307ae5f 100644 --- a/my_furhat_backend/perception/face.py +++ b/my_furhat_backend/perception/face.py @@ -1,4 +1,13 @@ # my_furhat_backend/perception/face.py +""" +Face recognition helpers using InsightFace. + +Design choices: +- Load InsightFace once at import; if unavailable, fail soft (returns None) so + the rest of the backend still works without GPU/onnxruntime. +- Use simple cosine similarity over stored embeddings; no external vector DB to + keep dependencies light. +""" from __future__ import annotations @@ -28,7 +37,7 @@ def _bytes_to_bgr(image_bytes: bytes) -> Optional[np.ndarray]: - """Decode image bytes into an OpenCV BGR array.""" + """Decode image bytes into an OpenCV BGR array; fail soft on errors.""" try: image = Image.open(io.BytesIO(image_bytes)).convert("RGB") return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) @@ -43,6 +52,9 @@ def extract_face_embedding(frame_bytes: bytes) -> Optional[np.ndarray]: Returns: np.ndarray of shape (512,) or similar, or None if failed/no face. + + Rationale: keep it minimal—pick the largest face, use InsightFace normed + embeddings; no batching, no multi-face disambiguation for now. """ if _face_app is None: return None @@ -89,6 +101,9 @@ def match_face_embedding( Returns: (User, similarity) if similarity >= threshold, else None. + + Chosen approach: in-DB scan with cosine similarity—sufficient for small user + sets; avoids adding a vector DB dependency. """ if embedding is None or embedding.size == 0: return None diff --git a/my_furhat_backend/perception/language.py b/my_furhat_backend/perception/language.py index 128ab07..43c6244 100644 --- a/my_furhat_backend/perception/language.py +++ b/my_furhat_backend/perception/language.py @@ -1,10 +1,13 @@ from typing import Dict - +# Placeholder language ID to keep pipeline simple; can be swapped for a real +# langid model later (e.g., fasttext/langdetect) once dependencies are allowed. def detect_language(text: str) -> str: """ - VERY simple placeholder – always returns 'en' for now. - Later: plug in a real language-id model or reuse ASR metadata. + Placeholder: always returns 'en'. + + Chosen to keep dependencies minimal on the backend; language is primarily + inferred client-side or via session distributions. """ return "en" @@ -12,8 +15,7 @@ def detect_language(text: str) -> str: def update_language_distribution(existing: Dict[str, float], lang_code: str, weight: float = 1.0) -> Dict[str, float]: """ Update a language distribution dict with a new observation. - existing: e.g. {"en": 0.7, "no": 0.3} - lang_code: e.g. "en" + Keeps probabilities normalized; lightweight alternative to heavier models. """ dist = dict(existing) if existing else {} dist[lang_code] = dist.get(lang_code, 0.0) + weight diff --git a/my_furhat_backend/perception/session_state.py b/my_furhat_backend/perception/session_state.py index 6034277..7b8dfc4 100644 --- a/my_furhat_backend/perception/session_state.py +++ b/my_furhat_backend/perception/session_state.py @@ -12,28 +12,41 @@ class SessionState: # Simple in-memory store: session_id -> SessionState +# Design choice: keep lightweight per-process state; no external store required. _sessions: Dict[str, SessionState] = {} def get_or_create_session(session_id: str) -> SessionState: + """ + Return existing session state or create a new one. + """ if session_id not in _sessions: _sessions[session_id] = SessionState(session_id=session_id) return _sessions[session_id] def update_session_user(session_id: str, user_id: str) -> SessionState: + """ + Attach a user_id to the session (used by perception and trivia stats). + """ state = get_or_create_session(session_id) state.user_id = user_id return state def update_session_language_dist(session_id: str, language_dist: Dict[str, float]) -> SessionState: + """ + Store/refresh language distribution for this session. + """ state = get_or_create_session(session_id) state.language_dist = language_dist return state def increment_turn_index(session_id: str) -> int: + """ + Increment and return the dialogue turn index for this session. + """ state = get_or_create_session(session_id) state.turn_index += 1 return state.turn_index \ No newline at end of file diff --git a/my_furhat_backend/perception/voice.py b/my_furhat_backend/perception/voice.py index d1947f7..d620f17 100644 --- a/my_furhat_backend/perception/voice.py +++ b/my_furhat_backend/perception/voice.py @@ -1,4 +1,13 @@ # my_furhat_backend/perception/voice.py +""" +Voice recognition helpers using Resemblyzer. + +Design choices: +- Load Resemblyzer encoder once; if missing deps (soundfile/torch), fail soft + and return None so the backend still runs without voice ID. +- Simple cosine similarity over stored embeddings; no vector DB to keep deps + light for small user sets. +""" from __future__ import annotations @@ -6,9 +15,14 @@ from typing import Optional, Tuple, List import numpy as np -import soundfile as sf from sqlalchemy.orm import Session +try: + import soundfile as sf # type: ignore[import] +except Exception as e: # noqa: BLE001 + print(f"[voice] Failed to import soundfile: {e}") + sf = None # type: ignore[assignment] + from my_furhat_backend.db.models import User # --------- Load Resemblyzer encoder once ---------- @@ -23,13 +37,16 @@ def _bytes_to_mono_float32(audio_bytes: bytes) -> Optional[np.ndarray]: - """Decode audio bytes into a mono float32 numpy array.""" + """Decode audio bytes into a mono float32 numpy array; fail soft if soundfile missing/decoding fails.""" + if sf is None: + # soundfile backend not available + return None try: audio, sr = sf.read(io.BytesIO(audio_bytes), dtype="float32") if audio.ndim > 1: # stereo → mono audio = np.mean(audio, axis=1) return preprocess_wav(audio, source_sr=sr) - except Exception as e: + except Exception as e: # noqa: BLE001 print(f"[voice] Failed to decode audio: {e}") return None @@ -40,6 +57,9 @@ def extract_voice_embedding(audio_bytes: bytes) -> Optional[np.ndarray]: Returns: np.ndarray or None. + + Rationale: keep it minimal—single utterance embedding, reject very short + clips; no diarization or multi-speaker handling. """ if _voice_encoder is None: return None @@ -78,6 +98,9 @@ def match_voice_embedding( Returns: (User, similarity) if similarity >= threshold, else None. + + Chosen approach: scan stored embeddings in DB with cosine similarity. + Works for small user sets without adding a vector search dependency. """ if embedding is None or embedding.size == 0: return None diff --git a/my_furhat_backend/perception/websocket_handler.py b/my_furhat_backend/perception/websocket_handler.py index 193eb19..5adc9b1 100644 --- a/my_furhat_backend/perception/websocket_handler.py +++ b/my_furhat_backend/perception/websocket_handler.py @@ -1,7 +1,18 @@ from __future__ import annotations +""" +Perception websocket handler. + +Design choices: +- Keep it single-file and lightweight: FastAPI WebSocket + in-memory session state. +- Binary frames prefixed (0x01 video, 0x02 audio); text frames carry hello/turn/name. +- Face/voice recognition are optional; failures should not crash the WS. +- Create a user on the fly if perception can’t match, so stats can persist. +""" + import json import uuid +import logging from datetime import datetime from typing import Any, Dict, Optional @@ -20,6 +31,8 @@ from my_furhat_backend.perception import face as face_mod from my_furhat_backend.perception import voice as voice_mod +logger = logging.getLogger(__name__) + VIDEO_PREFIX = 0x01 AUDIO_PREFIX = 0x02 @@ -70,6 +83,9 @@ async def _send_identity_update( def _ensure_user_for_session(db: Session, session_id: str): """ Ensure there is a User associated with this session. + + Chosen to create a user on-demand when no match exists so that turn/name + updates and trivia stats have a stable user_id even without biometrics. """ state = get_or_create_session(session_id) if state.user_id: @@ -91,6 +107,10 @@ def _ensure_user_for_session(db: Session, session_id: str): def _update_identity_from_face(db: Session, session_id: str, frame_bytes: bytes): + """ + Try to match or create a user from a face embedding; returns User or None. + Soft-fails if face embedding cannot be extracted. + """ state = get_or_create_session(session_id) emb = face_mod.extract_face_embedding(frame_bytes) @@ -112,6 +132,10 @@ def _update_identity_from_face(db: Session, session_id: str, frame_bytes: bytes) def _update_identity_from_voice(db: Session, session_id: str, audio_bytes: bytes): + """ + Try to match or create a user from a voice embedding; returns User or None. + Soft-fails if voice embedding cannot be extracted. + """ state = get_or_create_session(session_id) emb = voice_mod.extract_voice_embedding(audio_bytes) @@ -141,6 +165,11 @@ async def _handle_text_message( """ Handle JSON text messages: hello, turn, name_update. Returns updated current_session_id. + + Rationale: + - "hello" establishes session_id and initializes language dist. + - "turn" updates language dist and turn index; keeps user_id stable/created. + - "name_update" writes name to DB and echoes identity_update back. """ try: msg = json.loads(text) @@ -260,6 +289,9 @@ async def _handle_binary_message( Handle binary streaming frames: - 0x01 + JPEG bytes => video frame - 0x02 + audio bytes => audio chunk + + Keeps silent if no session_id yet (requires hello first); soft-fails on + decoding/embedding errors to avoid dropping the WS. """ if not data: return @@ -276,8 +308,10 @@ async def _handle_binary_message( state = get_or_create_session(session_id) if stream_type == VIDEO_PREFIX: + logger.info(f"[perception] video frame received, bytes={len(payload_bytes)} session={session_id}") user = _update_identity_from_face(db, session_id, payload_bytes) elif stream_type == AUDIO_PREFIX: + logger.info(f"[perception] audio chunk received, bytes={len(payload_bytes)} session={session_id}") user = _update_identity_from_voice(db, session_id, payload_bytes) else: # Unknown binary subtype; ignore @@ -306,16 +340,26 @@ async def perception_ws_handler(websocket: WebSocket): - Binary streaming frames from the Furhat camera/mic. Designed to run continuously while the skill is active. + + Design: keep the handler minimal—no background tasks here; reconnect logic + is handled client-side. On disconnect, DB session is closed cleanly. """ await websocket.accept() db: Session = SessionLocal() current_session_id: Optional[str] = None + log = logging.getLogger("perception.ws") try: while True: - message = await websocket.receive() + try: + message = await websocket.receive() + except RuntimeError: + # Disconnect already received; exit cleanly + log.info("[perception] runtime disconnect") + break if "text" in message and message["text"] is not None: + log.info(f"[perception] text message received {len(message['text'])} bytes") current_session_id = await _handle_text_message( websocket, db, @@ -333,6 +377,6 @@ async def perception_ws_handler(websocket: WebSocket): # Otherwise ignore (e.g. pings) except WebSocketDisconnect: - pass + log.info("[perception] WebSocketDisconnect") finally: db.close() \ No newline at end of file diff --git a/my_furhat_backend/utils/gpu_utils.py b/my_furhat_backend/utils/gpu_utils.py index 1fb350e..b1a1942 100755 --- a/my_furhat_backend/utils/gpu_utils.py +++ b/my_furhat_backend/utils/gpu_utils.py @@ -1,93 +1,89 @@ """ GPU Utilities Module -This module provides utilities for managing GPU resources, monitoring memory usage, -and handling model device placement in PyTorch applications. +Design choice: keep this lightweight and resilient. +- Prefer PyTorch when available; otherwise fall back to CPU/psutil so the backend + can still run in Ollama-only mode without hard failures. +- Avoid extra dependencies beyond psutil (for RAM stats) to keep the footprint small. Functions: - setup_gpu: Configure and return GPU device information. - move_model_to_device: Move a PyTorch model to the specified device. - print_gpu_status: Display current GPU/CPU memory usage. - clear_gpu_cache: Clear GPU memory cache. + setup_gpu: Detect CUDA presence and report device/memory info. + move_model_to_device: Safely move a model if torch is available. + print_gpu_status: Debug helper for quick memory snapshots. + clear_gpu_cache: Torch empty_cache wrapper; no-op if CUDA/torch missing. """ -import torch -import psutil from typing import Optional, Dict, Any +import psutil + +try: + import torch # type: ignore[import] +except Exception as e: # noqa: BLE001 + print(f"[gpu_utils] Torch unavailable, using CPU-only utilities: {e}") + torch = None # type: ignore[assignment] + def setup_gpu() -> Dict[str, Any]: """ - Set up GPU configuration and return device information. - - Returns: - Dict[str, Any]: Dictionary containing: - - cuda_available (bool): Whether CUDA is available - - device (torch.device): The device to use (CUDA or CPU) - - device_name (str): Name of the device - - memory_info (Dict): Memory usage information in MB + Detect CUDA and gather basic device/memory info. + + Chosen over heavier GPU libs to minimize dependencies; psutil covers CPU stats, + torch (if present) covers CUDA stats. Returns a dict so callers can log/branch + without importing torch themselves. """ - device_info = { - "cuda_available": torch.cuda.is_available(), + device_info: Dict[str, Any] = { + "cuda_available": False, "device": None, "device_name": None, - "memory_info": None + "memory_info": None, } - - if device_info["cuda_available"]: + + if torch is not None and torch.cuda.is_available(): + device_info["cuda_available"] = True device_info["device"] = torch.device("cuda") device_info["device_name"] = torch.cuda.get_device_name() device_info["memory_info"] = { "allocated": torch.cuda.memory_allocated() / 1024**2, # MB - "cached": torch.cuda.memory_reserved() / 1024**2, # MB - "max_allocated": torch.cuda.max_memory_allocated() / 1024**2 # MB + "cached": torch.cuda.memory_reserved() / 1024**2, # MB + "max_allocated": torch.cuda.max_memory_allocated() / 1024**2, # MB } else: - device_info["device"] = torch.device("cpu") + device_info["device"] = None device_info["device_name"] = "CPU" device_info["memory_info"] = { "total": psutil.virtual_memory().total / 1024**2, # MB "available": psutil.virtual_memory().available / 1024**2, # MB - "used": psutil.virtual_memory().used / 1024**2 # MB + "used": psutil.virtual_memory().used / 1024**2, # MB } - + return device_info -def move_model_to_device(model: Any, device: Optional[torch.device] = None) -> Any: +def move_model_to_device(model: Any, device: Optional["torch.device"] = None) -> Any: # type: ignore[name-defined] """ - Move a model to the specified device (GPU/CPU). - - Args: - model (Any): The PyTorch model to move. - device (Optional[torch.device]): Device to move the model to. If None, - will use GPU if available, otherwise CPU. - - Returns: - Any: The model moved to the specified device. + Move a model to a target device if torch is present. + + Why this approach: keeps callers simple and safe—if torch is absent or the + model lacks `.to()`, we return the model unchanged instead of failing. """ + if torch is None: + # No-op if torch is unavailable + return model + if device is None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - + if hasattr(model, "to"): return model.to(device) return model def print_gpu_status() -> None: """ - Print current GPU/CPU status and memory usage. - - If CUDA is available, prints GPU information including: - - CUDA device number - - Device name - - Memory allocated - - Memory cached - - Maximum memory allocated - - Otherwise, prints CPU memory information including: - - Total memory - - Available memory - - Used memory + Print a quick memory snapshot for debugging (GPU if available, else CPU). + + Chosen to avoid extra tooling; uses torch/psutil only, so it works on both + CUDA and CPU-only setups without additional installs. """ - if torch.cuda.is_available(): + if torch is not None and torch.cuda.is_available(): print("\n=== GPU Status ===") print(f"CUDA Device: {torch.cuda.current_device()}") print(f"Device Name: {torch.cuda.get_device_name()}") @@ -103,23 +99,11 @@ def print_gpu_status() -> None: def clear_gpu_cache() -> None: """ - Clear GPU memory cache to free up resources and prevent memory leaks. - - This function: - 1. Checks if CUDA is available on the system - 2. Calls torch.cuda.empty_cache() to release unused GPU memory - 3. Prints a confirmation message when the cache is cleared - - This is particularly useful when: - - Switching between large models - - After processing large batches of data - - When experiencing out-of-memory errors - - Before starting memory-intensive operations - - Note: - This only clears the cache of unused memory. It does not free - memory that is still in use by active tensors or models. + Clear unused GPU cache via torch.cuda.empty_cache (no-op if torch/CUDA missing). + + Kept minimal to avoid surprises on CPU-only deployments; chosen over custom + allocators because torch already manages its own caching. """ - if torch.cuda.is_available(): + if torch is not None and torch.cuda.is_available(): torch.cuda.empty_cache() - print("GPU cache cleared") \ No newline at end of file + print("GPU cache cleared") \ No newline at end of file diff --git a/my_furhat_backend/utils/ollama_bootstrap.py b/my_furhat_backend/utils/ollama_bootstrap.py new file mode 100644 index 0000000..da15725 --- /dev/null +++ b/my_furhat_backend/utils/ollama_bootstrap.py @@ -0,0 +1,117 @@ +""" +Utility helpers to bootstrap the local Ollama runtime on backend startup. + +Design choice: keep startup resilient and dependency-light. +- Prefer to reuse an already running Ollama instance; only try to start it if unreachable. +- Pull the configured model once per process to avoid repeated downloads. +- Avoid tight coupling: we shell out to `ollama` binary instead of embedding the server. +""" + +from __future__ import annotations + +import logging +import shutil +import subprocess +import time +from typing import Optional + +import requests + +from my_furhat_backend.config.settings import config + +logger = logging.getLogger(__name__) + +DEFAULT_BASE_URL = config.get("OLLAMA_BASE_URL", "http://localhost:11434") +DEFAULT_MODEL = config.get("OLLAMA_MODEL", "llama3.2:latest") + +_bootstrap_attempted = False + + +def ensure_ollama_ready( + model: Optional[str] = None, + base_url: Optional[str] = None, +) -> None: + """ + Ensure an Ollama server is reachable and the requested model is pulled. + + Why this approach: + - Skip work if already attempted (idempotent per process). + - Best-effort: continue without Ollama if binary/server is missing, so the + rest of the backend can still serve non-LLM endpoints. + """ + global _bootstrap_attempted + if _bootstrap_attempted: + return + _bootstrap_attempted = True + + model = model or DEFAULT_MODEL + base_url = base_url or DEFAULT_BASE_URL + + if not model: + logger.info("OLLAMA_MODEL not configured; skipping Ollama bootstrap.") + return + + if not _ping_server(base_url): + if not _start_server(): + logger.warning("Could not start Ollama server; continuing without it.") + return + + if not _ping_server(base_url, timeout=3): + logger.warning("Ollama server still unreachable at %s.", base_url) + return + + _pull_model(model) + + +def _ping_server(base_url: str, timeout: float = 2.0) -> bool: + try: + requests.get(f"{base_url}/api/tags", timeout=timeout).raise_for_status() + return True + except Exception: + return False + + +def _start_server() -> bool: + ollama_binary = shutil.which("ollama") + if not ollama_binary: + logger.warning("Cannot start Ollama server because 'ollama' binary is missing.") + return False + + logger.info("Starting Ollama server via '%s serve'.", ollama_binary) + try: + subprocess.Popen( + [ollama_binary, "serve"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + start_new_session=True, + ) + except Exception as exc: + logger.warning("Failed to launch Ollama server: %s", exc) + return False + + for _ in range(10): + time.sleep(1) + if _ping_server(DEFAULT_BASE_URL): + logger.info("Ollama server is now reachable.") + return True + + logger.warning("Timed out waiting for Ollama server to become reachable.") + return False + + +def _pull_model(model: str) -> None: + ollama_binary = shutil.which("ollama") + if not ollama_binary: + logger.warning("Cannot pull Ollama model because 'ollama' binary is missing.") + return + + logger.info("Ensuring Ollama model '%s' is pulled.", model) + try: + subprocess.run( + [ollama_binary, "pull", model], + check=True, + ) + logger.info("Model '%s' is available locally.", model) + except subprocess.CalledProcessError as exc: + logger.warning("Failed to pull Ollama model '%s': %s", model, exc) + diff --git a/my_furhat_backend/utils/qa_pairs.py b/my_furhat_backend/utils/qa_pairs.py new file mode 100644 index 0000000..b52e88c --- /dev/null +++ b/my_furhat_backend/utils/qa_pairs.py @@ -0,0 +1,111 @@ +""" +Utility helpers for loading question–answer pairs from a JSON file. + +Design choice: simple JSON + in-memory cache to keep startup fast and avoid +additional dependencies. QA pairs are static and small, so no DB/index needed. + +Expected format: +{ + "qa_pairs": [ + {"question": "...", "answer": "..."}, + ... + ] +} +""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional + +from my_furhat_backend.config.settings import config + +logger = logging.getLogger(__name__) + + +@dataclass +class QAPair: + index: int + question: str + answer: str + + +_CACHE: List[QAPair] | None = None + + +def _qa_json_path() -> Path: + """ + Resolve the path to qa_pairs.json using DOCUMENTS_PATH from settings. + Keeping it relative to config allows swapping datasets without code changes. + """ + base = Path(config["DOCUMENTS_PATH"]) + return base / "qa_pairs.json" + + +def load_qa_pairs(force_reload: bool = False) -> List[QAPair]: + """ + Load all QA pairs from qa_pairs.json with in-memory caching. + + Chosen approach: simple lazy cache in-process; avoids repeated disk I/O and + keeps runtime dependencies minimal (no DB/FS watchers). Use force_reload=True + if the file changes while the process is alive. + """ + global _CACHE + + if _CACHE is not None and not force_reload: + return _CACHE + + path = _qa_json_path() + if not path.is_file(): + logger.warning("qa_pairs.json not found at %s", path) + _CACHE = [] + return _CACHE + + try: + data = json.loads(path.read_text(encoding="utf-8")) + except Exception as exc: # noqa: BLE001 + logger.error("Failed to load qa_pairs.json from %s: %s", path, exc) + _CACHE = [] + return _CACHE + + pairs = data.get("qa_pairs") + if not isinstance(pairs, list): + logger.warning("qa_pairs.json at %s does not contain a 'qa_pairs' list", path) + _CACHE = [] + return _CACHE + + result: List[QAPair] = [] + for idx, item in enumerate(pairs): + if not isinstance(item, dict): + continue + question = str(item.get("question") or "").strip() + answer = str(item.get("answer") or "").strip() + if not question or not answer: + continue + result.append(QAPair(index=idx, question=question, answer=answer)) + + _CACHE = result + logger.info("Loaded %d QA pairs from %s", len(_CACHE), path) + return _CACHE + + +def get_qa_pair(index: int) -> Optional[QAPair]: + pairs = load_qa_pairs() + if 0 <= index < len(pairs): + return pairs[index] + return None + + +def get_random_qa_pair() -> Optional[QAPair]: + import random + + pairs = load_qa_pairs() + if not pairs: + return None + return random.choice(pairs) + + + diff --git a/my_furhat_backend/utils/util.py b/my_furhat_backend/utils/util.py index 3946762..5481860 100755 --- a/my_furhat_backend/utils/util.py +++ b/my_furhat_backend/utils/util.py @@ -1,6 +1,10 @@ """ -This module provides utility functions for cleaning, formatting, parsing, and summarizing text responses, -particularly for handling conversation transcripts and responses from language models. +Utility functions for cleaning, formatting, and parsing text and prompts. + +Design choices: +- Pure-Python (re/json/os) to avoid extra dependencies. +- Provide forgiving cleaners/parsers so upstream code remains simple and robust + when LLM outputs include stray chat markers or malformed JSON. Functions: clean_hc_response(response_text: str) -> str diff --git a/pyproject.toml b/pyproject.toml index 0114e26..8ab1273 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,8 @@ dependencies = [ "langchain (>=0.3.23,<0.4.0)", "langchain-community (>=0.3.21,<0.4.0)", "langchain-chroma (>=0.2.2,<0.3.0)", - "pypdf (>=5.4.0,<6.0.0)" + "pypdf (>=5.4.0,<6.0.0)", + "rank-bm25 (>=0.2.2,<0.3.0)" ] [project.scripts] diff --git a/tests/rag_llm_eval.py b/tests/rag_llm_eval.py new file mode 100644 index 0000000..3e4a296 --- /dev/null +++ b/tests/rag_llm_eval.py @@ -0,0 +1,588 @@ +import json +import os +import time +from dataclasses import dataclass, asdict +from typing import Any, Dict, List, Optional, Tuple + +import requests + + +BASE_URL = os.getenv("TEST_BACKEND_URL", os.getenv("BACKEND_URL", "http://localhost:8000")).rstrip("/") +ASK_ENDPOINT = f"{BASE_URL}/ask" +REQUEST_TIMEOUT = float(os.getenv("RAG_EVAL_TIMEOUT", "60")) + + +@dataclass +class EvalQuestion: + question: str + expected_keywords: List[str] + id: Optional[str] = None + category: Optional[str] = None + + +@dataclass +class EvalMetrics: + question: str + id: Optional[str] + category: Optional[str] + answer: Optional[str] + error: Optional[str] + true_positives: int + false_positives: int + false_negatives: int + precision: Optional[float] + recall: Optional[float] + f1: Optional[float] + exact_match: bool + latency_sec: Optional[float] + + +def _normalize(text: str) -> str: + return text.lower().strip() + + +def compute_keyword_metrics(answer: str, expected_keywords: List[str]) -> Tuple[int, int, int, Optional[float], Optional[float], Optional[float], bool]: + """ + Compute TP/FP/FN and precision/recall/F1 for a single answer against expected keywords. + Matching is done via simple, case-insensitive substring search. + """ + if not expected_keywords: + return 0, 0, 0, None, None, None, False + + answer_norm = _normalize(answer) + expected_norm = [_normalize(k) for k in expected_keywords] + + tps = 0 + fps = 0 + fns = 0 + + for kw in expected_norm: + if kw and kw in answer_norm: + tps += 1 + else: + fns += 1 + + # Very simple FP estimate: count predicted unique keyword-like tokens + # that don't correspond to any expected keyword. This is conservative + # and mainly useful to catch obvious hallucinations. + fps = 0 + + precision = None + recall = None + f1 = None + + if tps + fps > 0: + precision = tps / float(tps + fps) + if tps + fns > 0: + recall = tps / float(tps + fns) + if precision is not None and recall is not None and (precision + recall) > 0: + f1 = 2 * precision * recall / (precision + recall) + + exact_match = precision == 1.0 and recall == 1.0 and tps > 0 + return tps, fps, fns, precision, recall, f1, exact_match + + +def call_backend(question: str) -> Tuple[Optional[str], Optional[str], Optional[float]]: + """ + Call the /ask endpoint and return (answer, error_message, latency_sec). + """ + payload: Dict[str, Any] = {"content": question} + start = time.time() + try: + resp = requests.post(ASK_ENDPOINT, json=payload, timeout=REQUEST_TIMEOUT) + latency = time.time() - start + resp.raise_for_status() + data = resp.json() + # FastAPI wrapper returns {"response": "..."}; fallback to raw data if needed + if isinstance(data, dict) and "response" in data: + answer = str(data["response"]) + else: + answer = str(data) + return answer, None, latency + except Exception as exc: # noqa: BLE001 + latency = time.time() - start + return None, str(exc), latency + + +def load_eval_questions() -> List[EvalQuestion]: + """ + Return the list of evaluation questions. + + This list intentionally contains 50+ questions, many of which reuse the + same ground-truth keyword sets (partners, board members, etc.) with + different phrasings. This provides broader coverage while keeping the + expected answers simple and verifiable. + """ + research_partners = [ + "NTNU", + "Norwegian Computing Center", + "SINTEF", + "University of Oslo", + "University of Stavanger", + ] + industrial_partners = [ + "ANEO", + "Cognite", + "Digital Norway", + "DNB", + "DNV", + "Kongsberg Digital", + "NRK", + "Schibsted", + "SpareBank 1 SMN", + "Statnett", + "Telenor", + ] + exec_members = [ + "Gøril Forbord", + "Odd Erik Gundersen", + "John Markus Lervik", + "Liv Dingsør", + "Karl Aksel Festø", + "Frank Børre Pedersen", + "Stein-Roar Skånhaug Bjørnstad", + "Anders Løland", + "Pål Nedregotten", + "Ingelin Steinsland", + "Odd Are Svensen", + "Trond Runar Hagen", + "Astrid Undheim", + "Arild Nebb Ervik", + "Nenad Keseric", + "Dagfinn Myhre", + "Stephan Oepen", + "Tom Ryen", + ] + + raw_questions: List[Dict[str, Any]] = [ + # Core partner questions + { + "id": "partners_research", + "category": "partners", + "question": "Who are the research partners listed in the NorwAI annual report?", + "expected_keywords": research_partners, + }, + { + "id": "partners_research_alt_1", + "category": "partners", + "question": "List all the research partners that collaborate with NorwAI.", + "expected_keywords": research_partners, + }, + { + "id": "partners_research_alt_2", + "category": "partners", + "question": "Which universities and research institutes are research partners in NorwAI?", + "expected_keywords": research_partners, + }, + { + "id": "partners_research_partial_1", + "category": "partners", + "question": "Name three of NorwAI's research partners.", + "expected_keywords": research_partners[:3], + }, + { + "id": "partners_research_partial_2", + "category": "partners", + "question": "Give two examples of universities that are research partners in NorwAI.", + "expected_keywords": [ + "NTNU", + "University of Oslo", + "University of Stavanger", + ], + }, + # Industrial partners questions + { + "id": "partners_industrial", + "category": "partners", + "question": "Which industrial partners are part of NorwAI?", + "expected_keywords": industrial_partners, + }, + { + "id": "partners_industrial_alt_1", + "category": "partners", + "question": "List all the industrial partners mentioned in the NorwAI annual report.", + "expected_keywords": industrial_partners, + }, + { + "id": "partners_industrial_alt_2", + "category": "partners", + "question": "Which companies make up the industrial partner group in NorwAI?", + "expected_keywords": industrial_partners, + }, + { + "id": "partners_industrial_partial_1", + "category": "partners", + "question": "Name three industrial partners that participate in NorwAI.", + "expected_keywords": industrial_partners[:3], + }, + { + "id": "partners_industrial_partial_2", + "category": "partners", + "question": "Give two examples of media or energy companies among NorwAI's industrial partners.", + "expected_keywords": [ + "NRK", + "Schibsted", + "ANEO", + ], + }, + # Mixed partner questions + { + "id": "partners_overview_1", + "category": "partners", + "question": "Which organizations are highlighted as key research partners and industrial partners in NorwAI?", + "expected_keywords": research_partners + industrial_partners, + }, + { + "id": "partners_energy", + "category": "partners", + "question": "Which partners from the energy sector are mentioned in the NorwAI consortium?", + "expected_keywords": [ + "ANEO", + "Statnett", + ], + }, + { + "id": "partners_media", + "category": "partners", + "question": "Which media organizations are listed as NorwAI partners?", + "expected_keywords": [ + "NRK", + "Schibsted", + ], + }, + { + "id": "partners_finance", + "category": "partners", + "question": "Which financial organizations are partners in NorwAI?", + "expected_keywords": [ + "DNB", + "SpareBank 1 SMN", + ], + }, + { + "id": "partners_digital", + "category": "partners", + "question": "Name two technology or digital-focused industrial partners in NorwAI.", + "expected_keywords": [ + "Cognite", + "Kongsberg Digital", + "Digital Norway", + ], + }, + # Executive board questions + { + "id": "exec_chair", + "category": "governance", + "question": "Who chaired the executive board in 2025?", + "expected_keywords": ["Sven Størmer Thaulow"], + }, + { + "id": "exec_chair_alt_1", + "category": "governance", + "question": "What is the name of the chair of the NorwAI executive board for 2025?", + "expected_keywords": ["Sven Størmer Thaulow"], + }, + { + "id": "exec_members", + "category": "governance", + "question": "Name two members of the executive board and their organizations.", + "expected_keywords": exec_members, + }, + { + "id": "exec_members_alt_1", + "category": "governance", + "question": "List several members of the NorwAI executive board mentioned in the annual report.", + "expected_keywords": exec_members, + }, + { + "id": "exec_members_aneo", + "category": "governance", + "question": "Which executive board members are associated with Aneo?", + "expected_keywords": [ + "Gøril Forbord", + "Odd Erik Gundersen", + ], + }, + { + "id": "exec_members_media", + "category": "governance", + "question": "Which executive board members come from media organizations such as NRK or Schibsted?", + "expected_keywords": [ + "Trond Runar Hagen", + "Pål Nedregotten", + ], + }, + { + "id": "exec_members_research", + "category": "governance", + "question": "Name two executive board members who represent research institutions.", + "expected_keywords": [ + "Stephan Oepen", + "Ingelin Steinsland", + "Odd Are Svensen", + "Anders Løland", + ], + }, + { + "id": "exec_members_energy", + "category": "governance", + "question": "Which executive board members represent the energy sector partners?", + "expected_keywords": [ + "Gøril Forbord", + "Odd Erik Gundersen", + "Astrid Undheim", + ], + }, + # Additional governance wording variants + { + "id": "exec_board_overview_1", + "category": "governance", + "question": "Who are some of the key members of the NorwAI executive board?", + "expected_keywords": exec_members, + }, + { + "id": "exec_board_overview_2", + "category": "governance", + "question": "Give examples of executive board members and their roles in NorwAI.", + "expected_keywords": exec_members, + }, + # Generic document / overview questions with broad keywords + { + "id": "overview_mission", + "category": "overview", + "question": "What is the main mission or goal of NorwAI as described in the annual report?", + "expected_keywords": ["NorwAI"], + }, + { + "id": "overview_research_focus", + "category": "overview", + "question": "What are the main research focus areas highlighted in the NorwAI annual report?", + "expected_keywords": ["research", "NorwAI"], + }, + { + "id": "overview_industry_collaboration", + "category": "overview", + "question": "How does NorwAI describe the collaboration between research partners and industrial partners?", + "expected_keywords": ["partners", "research", "industrial"], + }, + { + "id": "overview_ai_norway", + "category": "overview", + "question": "How does the report describe NorwAI's role in advancing AI in Norway?", + "expected_keywords": ["NorwAI", "AI", "Norway"], + }, + # Education / talent questions with simple keywords + { + "id": "education_talent_1", + "category": "education", + "question": "What does the report say about education or talent development activities in NorwAI?", + "expected_keywords": ["education", "students", "talent"], + }, + { + "id": "education_courses", + "category": "education", + "question": "Does the annual report mention any courses, workshops, or training activities related to NorwAI?", + "expected_keywords": ["course", "workshop", "training"], + }, + # Activities / events with broad keywords + { + "id": "activities_events_1", + "category": "activities", + "question": "Which events or workshops are highlighted in the NorwAI annual report?", + "expected_keywords": ["workshop", "conference", "seminar"], + }, + { + "id": "activities_industry_projects", + "category": "activities", + "question": "What industry projects or use cases are described in the NorwAI report?", + "expected_keywords": ["project", "use case"], + }, + # Impact / results questions + { + "id": "impact_publications", + "category": "impact", + "question": "Does the report mention scientific publications or research outputs from NorwAI?", + "expected_keywords": ["publication", "paper", "journal"], + }, + { + "id": "impact_innovation", + "category": "impact", + "question": "What does the NorwAI annual report say about innovation or industrial impact?", + "expected_keywords": ["innovation", "impact"], + }, + { + "id": "impact_societal", + "category": "impact", + "question": "Is there any discussion of societal impact or ethical considerations in the NorwAI report?", + "expected_keywords": ["societal", "ethics"], + }, + # Governance structure (non-name) questions + { + "id": "governance_structure", + "category": "governance", + "question": "How is NorwAI's governance structure described in the annual report?", + "expected_keywords": ["Executive Board", "Scientific Advisory Board"], + }, + { + "id": "governance_roles", + "category": "governance", + "question": "Which leadership roles are highlighted in NorwAI's governance (for example, Chair, Research Director, Center Director)?", + "expected_keywords": ["Chair", "Research Director", "Center Director"], + }, + # Simple control questions with minimal expectations + { + "id": "control_norwai_name", + "category": "control", + "question": "What does the name NorwAI refer to in the context of the annual report?", + "expected_keywords": ["NorwAI"], + }, + { + "id": "control_year", + "category": "control", + "question": "Which year does the NorwAI annual report primarily describe?", + "expected_keywords": ["2024"], + }, + { + "id": "control_total_partners", + "category": "control", + "question": "In general terms, how many research and industrial partners are part of the NorwAI consortium?", + "expected_keywords": ["partners"], + }, + # Extra partner/board variants to push question count over 50 + { + "id": "partners_list_all", + "category": "partners", + "question": "Provide a combined list of NorwAI's research and industrial partners.", + "expected_keywords": research_partners + industrial_partners, + }, + { + "id": "exec_members_examples", + "category": "governance", + "question": "Give examples of at least three executive board members mentioned in the NorwAI annual report.", + "expected_keywords": exec_members[:5], + }, + { + "id": "partners_ntnu_role", + "category": "partners", + "question": "What role does NTNU play as a research partner in NorwAI?", + "expected_keywords": ["NTNU"], + }, + { + "id": "partners_telenor_role", + "category": "partners", + "question": "How is Telenor described as an industrial partner in the NorwAI consortium?", + "expected_keywords": ["Telenor"], + }, + { + "id": "partners_dnv_role", + "category": "partners", + "question": "What does the report say about DNV's participation in NorwAI?", + "expected_keywords": ["DNV"], + }, + { + "id": "partners_nrk_role", + "category": "partners", + "question": "How is NRK's involvement in NorwAI characterized in the annual report?", + "expected_keywords": ["NRK"], + }, + ] + return [EvalQuestion(**q) for q in raw_questions] + + +def run_evaluation() -> Dict[str, Any]: + questions = load_eval_questions() + results: List[EvalMetrics] = [] + + total_tp = total_fp = total_fn = 0 + num_with_keywords = 0 + num_exact_match = 0 + + for q in questions: + answer, error, latency = call_backend(q.question) + + if error is not None or answer is None: + metrics = EvalMetrics( + question=q.question, + id=q.id, + category=q.category, + answer=answer, + error=error, + true_positives=0, + false_positives=0, + false_negatives=0, + precision=None, + recall=None, + f1=None, + exact_match=False, + latency_sec=latency, + ) + else: + tps, fps, fns, prec, rec, f1, exact = compute_keyword_metrics(answer, q.expected_keywords) + metrics = EvalMetrics( + question=q.question, + id=q.id, + category=q.category, + answer=answer, + error=None, + true_positives=tps, + false_positives=fps, + false_negatives=fns, + precision=prec, + recall=rec, + f1=f1, + exact_match=exact, + latency_sec=latency, + ) + if q.expected_keywords: + num_with_keywords += 1 + total_tp += tps + total_fp += fps + total_fn += fns + if exact: + num_exact_match += 1 + + results.append(metrics) + # Small delay to avoid hammering the backend + time.sleep(1.0) + + micro_precision = None + micro_recall = None + micro_f1 = None + + if total_tp + total_fp > 0: + micro_precision = total_tp / float(total_tp + total_fp) + if total_tp + total_fn > 0: + micro_recall = total_tp / float(total_tp + total_fn) + if micro_precision is not None and micro_recall is not None and (micro_precision + micro_recall) > 0: + micro_f1 = 2 * micro_precision * micro_recall / (micro_precision + micro_recall) + + summary = { + "num_questions": len(questions), + "num_with_keywords": num_with_keywords, + "exact_match_count": num_exact_match, + "exact_match_rate": num_exact_match / num_with_keywords if num_with_keywords else None, + "micro_precision": micro_precision, + "micro_recall": micro_recall, + "micro_f1": micro_f1, + "backend_url": BASE_URL, + } + + return { + "summary": summary, + "results": [asdict(r) for r in results], + } + + +if __name__ == "__main__": + """ + Run this script directly to evaluate the RAG + LLM pipeline. + + Example: + TEST_BACKEND_URL=http://localhost:8000 python -m tests.rag_llm_eval + """ + output = run_evaluation() + print(json.dumps(output["summary"], indent=2, ensure_ascii=False)) + print() + print(json.dumps(output["results"], indent=2, ensure_ascii=False)) + + diff --git a/tests/test_biometrics.py b/tests/test_biometrics.py new file mode 100644 index 0000000..e508ba2 --- /dev/null +++ b/tests/test_biometrics.py @@ -0,0 +1,56 @@ +from types import SimpleNamespace +from unittest.mock import MagicMock + +import numpy as np + +from my_furhat_backend.perception import face, voice + + +def _make_face_user(user_id: str, vector: np.ndarray): + return SimpleNamespace( + id=user_id, + face_embedding=face.serialize_embedding(vector.astype(np.float32)), + ) + + +def _make_voice_user(user_id: str, vector: np.ndarray): + return SimpleNamespace( + id=user_id, + voice_embedding=voice.serialize_embedding(vector.astype(np.float32)), + ) + + +def test_match_face_embedding_returns_best_user(): + db = MagicMock() + query = db.query.return_value + filtered = query.filter.return_value + + target = np.array([1.0, 0.0, 0.0], dtype=np.float32) + users = [ + _make_face_user("user-a", np.array([0.9, 0.0, 0.0])), + _make_face_user("user-b", np.array([0.0, 1.0, 0.0])), + ] + filtered.all.return_value = users + + match = face.match_face_embedding(db, target, threshold=0.4) + + assert match is not None + user, score = match + assert user.id == "user-a" + assert score > 0.8 + + +def test_match_voice_embedding_returns_none_without_users(): + db = MagicMock() + db.query.return_value.filter.return_value.all.return_value = [] + + result = voice.match_voice_embedding(db, np.array([1.0, 0.0], dtype=np.float32)) + + assert result is None + + +def test_extract_voice_embedding_returns_none_without_encoder(monkeypatch): + monkeypatch.setattr(voice, "_voice_encoder", None) + + assert voice.extract_voice_embedding(b"fake-bytes") is None + diff --git a/tests/test_perception_ws.py b/tests/test_perception_ws.py index 6d8d80c..457e172 100644 --- a/tests/test_perception_ws.py +++ b/tests/test_perception_ws.py @@ -1,10 +1,16 @@ import asyncio import json +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + import pytest from my_furhat_backend.perception import session_state from my_furhat_backend.perception.websocket_handler import ( + AUDIO_PREFIX, + VIDEO_PREFIX, _generate_new_user_id, + _handle_binary_message, _handle_text_message, _send_identity_update, ) @@ -79,3 +85,74 @@ def test_handle_text_message_without_session_sends_error(): payload = json.loads(ws.sent_text[-1]) assert payload["type"] == "error" + +def test_handle_binary_message_video_sends_identity_update(): + """ + Simulate a single video frame and verify that an identity_update + payload is pushed back over the websocket. + """ + ws = DummyWebSocket() + db = MagicMock() + session_id = "session-video" + + # Ensure session exists + session_state.get_or_create_session(session_id) + + dummy_user = SimpleNamespace(id="user-vid", name="Ava", languages_json={"en": 1.0}) + + # Patch the low-level face handling so we don't depend on real models + with patch( + "my_furhat_backend.perception.websocket_handler._update_identity_from_face", + return_value=dummy_user, + ): + frame_bytes = b"\x00\x01\x02\x03" + data = bytes([VIDEO_PREFIX]) + frame_bytes + asyncio.run( + _handle_binary_message( + ws, + db, + data=data, + current_session_id=session_id, + ) + ) + + assert ws.sent_text, "Expected identity_update after video frame" + payload = json.loads(ws.sent_text[-1]) + assert payload["type"] == "identity_update" + assert payload["payload"]["user_id"] == "user-vid" + + +def test_handle_binary_message_audio_sends_identity_update(): + """ + Simulate a single audio chunk and verify that an identity_update + payload is pushed back over the websocket. + """ + ws = DummyWebSocket() + db = MagicMock() + session_id = "session-audio" + + # Ensure session exists + session_state.get_or_create_session(session_id) + + dummy_user = SimpleNamespace(id="user-aud", name="Ava", languages_json={"en": 1.0}) + + with patch( + "my_furhat_backend.perception.websocket_handler._update_identity_from_voice", + return_value=dummy_user, + ): + audio_bytes = b"\x10\x20\x30\x40" + data = bytes([AUDIO_PREFIX]) + audio_bytes + asyncio.run( + _handle_binary_message( + ws, + db, + data=data, + current_session_id=session_id, + ) + ) + + assert ws.sent_text, "Expected identity_update after audio chunk" + payload = json.loads(ws.sent_text[-1]) + assert payload["type"] == "identity_update" + assert payload["payload"]["user_id"] == "user-aud" + diff --git a/tests/test_session_state.py b/tests/test_session_state.py new file mode 100644 index 0000000..171ba75 --- /dev/null +++ b/tests/test_session_state.py @@ -0,0 +1,40 @@ +import pytest + +from my_furhat_backend.perception import session_state + + +@pytest.fixture(autouse=True) +def reset_sessions(): + session_state._sessions.clear() # type: ignore[attr-defined] + yield + session_state._sessions.clear() # type: ignore[attr-defined] + + +def test_get_or_create_session_returns_same_instance(): + state_a = session_state.get_or_create_session("session-1") + state_a.user_id = "user-123" + + state_b = session_state.get_or_create_session("session-1") + + assert state_a is state_b + assert state_b.user_id == "user-123" + + +def test_update_session_user_assigns_and_returns_state(): + updated = session_state.update_session_user("session-42", "user-42") + + assert updated.user_id == "user-42" + assert session_state.get_or_create_session("session-42").user_id == "user-42" + + +def test_update_session_language_dist_overwrites_previous_distribution(): + session_state.update_session_language_dist("session-2", {"en": 1.0}) + state = session_state.update_session_language_dist("session-2", {"en": 0.4, "no": 0.6}) + + assert state.language_dist == {"en": 0.4, "no": 0.6} + + +def test_increment_turn_index_counts_from_one(): + assert session_state.increment_turn_index("session-turns") == 1 + assert session_state.increment_turn_index("session-turns") == 2 + diff --git a/tests/test_websocket_handler.py b/tests/test_websocket_handler.py new file mode 100644 index 0000000..bff4b47 --- /dev/null +++ b/tests/test_websocket_handler.py @@ -0,0 +1,143 @@ +import asyncio +import json +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +import pytest + +from my_furhat_backend.perception import session_state +from my_furhat_backend.perception.websocket_handler import ( + _handle_text_message, + _send_error, +) + + +class DummyWebSocket: + def __init__(self): + self.sent_text = [] + + async def send_text(self, message: str): + self.sent_text.append(message) + + +@pytest.fixture(autouse=True) +def reset_sessions(): + session_state._sessions.clear() # type: ignore[attr-defined] + yield + session_state._sessions.clear() # type: ignore[attr-defined] + + +def test_send_error_wraps_payload(): + ws = DummyWebSocket() + + asyncio.run(_send_error(ws, "session-err", "Something went wrong")) + + assert ws.sent_text, "Expected error payload to be sent" + payload = json.loads(ws.sent_text[-1]) + assert payload["type"] == "error" + assert payload["payload"]["session_id"] == "session-err" + assert payload["payload"]["message"] == "Something went wrong" + + +def test_handle_text_message_turn_updates_language_distribution(): + ws = DummyWebSocket() + db = MagicMock() + session_id = "session-turn" + user = SimpleNamespace(id="user-1", name="Ava", languages_json={"en": 1.0}) + + hello = json.dumps({"type": "hello", "payload": {"session_id": session_id}}) + asyncio.run(_handle_text_message(ws, db, hello, None)) + + with patch( + "my_furhat_backend.perception.websocket_handler.crud.get_user_by_id", + return_value=user, + ), patch( + "my_furhat_backend.perception.websocket_handler.crud.get_or_create_conversation", + return_value=MagicMock(), + ) as get_conv, patch( + "my_furhat_backend.perception.websocket_handler.crud.create_turn" + ) as create_turn, patch( + "my_furhat_backend.perception.websocket_handler.crud.update_user_languages" + ) as update_lang: + turn_payload = json.dumps( + { + "type": "turn", + "payload": { + "session_id": session_id, + "user_text": "Hei der", + "language": "no", + "robot_text": "Hei! Hvordan går det?", + }, + } + ) + + asyncio.run(_handle_text_message(ws, db, turn_payload, session_id)) + + state = session_state.get_or_create_session(session_id) + assert state.language_dist.get("no", 0) > 0 + assert "no" in state.language_dist + + +def test_handle_text_message_name_update_updates_user(monkeypatch): + ws = DummyWebSocket() + db = MagicMock() + session_id = "session-name" + user = SimpleNamespace(id="user-1", name=None, languages_json={"en": 1.0}) + + asyncio.run( + _handle_text_message( + ws, + db, + json.dumps({"type": "hello", "payload": {"session_id": session_id}}), + None, + ) + ) + + with patch( + "my_furhat_backend.perception.websocket_handler.crud.get_user_by_id", + return_value=user, + ), patch( + "my_furhat_backend.perception.websocket_handler.crud.update_user_name", + side_effect=lambda _db, existing, new_name: setattr(existing, "name", new_name), + ) as update_name: + name_payload = json.dumps( + { + "type": "name_update", + "payload": {"session_id": session_id, "name": "Ava"}, + } + ) + asyncio.run(_handle_text_message(ws, db, name_payload, session_id)) + + update_name.assert_called_once() + payload = json.loads(ws.sent_text[-1]) + assert payload["type"] == "identity_update" + assert payload["payload"]["name"] == "Ava" + + +def test_handle_text_message_unknown_type_sends_error(): + ws = DummyWebSocket() + db = MagicMock() + session_id = "session-unknown" + + asyncio.run( + _handle_text_message( + ws, + db, + json.dumps({"type": "hello", "payload": {"session_id": session_id}}), + None, + ) + ) + + asyncio.run( + _handle_text_message( + ws, + db, + json.dumps({"type": "foobar", "payload": {"session_id": session_id}}), + session_id, + ) + ) + + payload = json.loads(ws.sent_text[-1]) + assert payload["type"] == "error" + assert "Unknown message type" in payload["payload"]["message"] +