From 8c84c24ee92904ba8d6d922b8e5986fb27447174 Mon Sep 17 00:00:00 2001 From: Richard Guo Date: Wed, 10 May 2023 13:38:32 -0400 Subject: [PATCH] transfer python bindings code --- gpt4all-bindings/python/.gitignore | 164 ++++++++++ gpt4all-bindings/python/LICENSE.txt | 19 ++ gpt4all-bindings/python/MANIFEST.in | 1 + gpt4all-bindings/python/README.md | 41 +++ .../python/docs/assets/favicon.ico | Bin 0 -> 15406 bytes gpt4all-bindings/python/docs/assets/nomic.png | Bin 0 -> 25814 bytes gpt4all-bindings/python/docs/css/custom.css | 5 + gpt4all-bindings/python/docs/gpt4all_api.md | 6 + gpt4all-bindings/python/docs/index.md | 22 ++ gpt4all-bindings/python/gpt4all/__init__.py | 2 + gpt4all-bindings/python/gpt4all/gpt4all.py | 280 ++++++++++++++++++ gpt4all-bindings/python/gpt4all/pyllmodel.py | 241 +++++++++++++++ gpt4all-bindings/python/makefile | 16 + gpt4all-bindings/python/mkdocs.yml | 76 +++++ gpt4all-bindings/python/setup.py | 89 ++++++ .../python/{placeholder => tests/__init__.py} | 0 gpt4all-bindings/python/tests/test_gpt4all.py | 62 ++++ .../python/tests/test_pyllmodel.py | 44 +++ 18 files changed, 1068 insertions(+) create mode 100644 gpt4all-bindings/python/.gitignore create mode 100644 gpt4all-bindings/python/LICENSE.txt create mode 100644 gpt4all-bindings/python/MANIFEST.in create mode 100644 gpt4all-bindings/python/README.md create mode 100644 gpt4all-bindings/python/docs/assets/favicon.ico create mode 100644 gpt4all-bindings/python/docs/assets/nomic.png create mode 100644 gpt4all-bindings/python/docs/css/custom.css create mode 100644 gpt4all-bindings/python/docs/gpt4all_api.md create mode 100644 gpt4all-bindings/python/docs/index.md create mode 100644 gpt4all-bindings/python/gpt4all/__init__.py create mode 100644 gpt4all-bindings/python/gpt4all/gpt4all.py create mode 100644 gpt4all-bindings/python/gpt4all/pyllmodel.py create mode 100644 gpt4all-bindings/python/makefile create mode 100644 gpt4all-bindings/python/mkdocs.yml create mode 100644 gpt4all-bindings/python/setup.py rename gpt4all-bindings/python/{placeholder => tests/__init__.py} (100%) create mode 100644 gpt4all-bindings/python/tests/test_gpt4all.py create mode 100644 gpt4all-bindings/python/tests/test_pyllmodel.py diff --git a/gpt4all-bindings/python/.gitignore b/gpt4all-bindings/python/.gitignore new file mode 100644 index 00000000..970db3ec --- /dev/null +++ b/gpt4all-bindings/python/.gitignore @@ -0,0 +1,164 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Cython +/*.c +*DO_NOT_MODIFY/ \ No newline at end of file diff --git a/gpt4all-bindings/python/LICENSE.txt b/gpt4all-bindings/python/LICENSE.txt new file mode 100644 index 00000000..ac07e380 --- /dev/null +++ b/gpt4all-bindings/python/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2023 Nomic, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/gpt4all-bindings/python/MANIFEST.in b/gpt4all-bindings/python/MANIFEST.in new file mode 100644 index 00000000..ffee2b3e --- /dev/null +++ b/gpt4all-bindings/python/MANIFEST.in @@ -0,0 +1 @@ +recursive-include gpt4all/llmodel_DO_NOT_MODIFY * \ No newline at end of file diff --git a/gpt4all-bindings/python/README.md b/gpt4all-bindings/python/README.md new file mode 100644 index 00000000..fef14bad --- /dev/null +++ b/gpt4all-bindings/python/README.md @@ -0,0 +1,41 @@ +# Python GPT4All + +This package contains a set of Python bindings that runs the `llmodel` C-API. + + +# Local Installation Instructions + +TODO: Right now instructions in main README still depend on Qt6 setup. To setup Python bindings, we just need `llmodel` to be built which is much simpler. However, in the future, the below installation instructions should be sequentially organized such that we expect the main README's instructions were followed first. + +1. Setup `llmodel` + +``` +git clone --recurse-submodules https://github.com/nomic-ai/gpt4all-chat +cd gpt4all-chat/llmodel/ +mkdir build +cd build +cmake .. +cmake --build . --parallel +``` +Confirm that `libllmodel.dylib` exists in `gpt4all-chat/llmodel/build`. + +2. Setup Python package + +``` +cd ../../bindings/python +pip3 install -r requirements.txt +pip3 install -e . +``` + +3. Test it out! In a Python script or console: + +```python + +from gpt4all import GPT4All + +gptj = GPT4All("ggml-gpt4all-j-v1.3-groovy") +messages = [{"role": "user", "content": "Name 3 colors"}] +gptj.chat_completion(messages) + +``` + diff --git a/gpt4all-bindings/python/docs/assets/favicon.ico b/gpt4all-bindings/python/docs/assets/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..8fb029aacf4fd73361ece4c3b7b0c95a0e7c2b06 GIT binary patch literal 15406 zcmeHNc}Nyp6d!x9AktDHBBS^!F{K45Gg=f|ZGjeAX%#AaF41a#6a^`YV*OKrwjeFG zkf{Yh3#HK?7DWX?q^4$@6;^6ugk|T=@AU4+{l;+|$B$2X&ViY^bMHO({N~JE&OPTU zikI@2;_IuZPlxi)--;5hD2l`J`#soCQF7G0(9r+NgA}EtzoG=IbyOCW<9ELL&?Wkx z(63*=2$xmL%gdufhYoQYX=!P6>Cz=a9_Y-OGxX`xC%S(9IvqQ9j4!R6ix)3aY-}v^ zU$}6APM<#Q!e3ikOV_Slqx0v_Q)OkPmYu*qckWzM{H>VP4DipJH?I}{V^5KE&5aRVdJzl&>b`&&v%$3Wzw#rM(=W&+@{=~UK;Do!-o&KX!7LA zJWt~VbN;MZvuNwqtz1@LUr(DhZQ``JMMp<-vaGC(wr$(S>FVlgiin8dtE#F>RBiK# zwc){o2fW_+`};fP&!0b^d8SXF&QHk4x-Jv&fl5nDojA{)J(}Jv<>i!*wGK3M=1hL-$rs-*%g5RWd*$A}dsI+Rz`RB`As=|%Dc?w@Tqfkh*7Ec7 zbKCdJGK|tf{>qgrY2w6*9g;tL_H1riPya?TFI|Nr8}3vR+)^3hiy z&y4(^KYtQzG?2UGqtEW%z1x(0jMaey2l99YUQlQCziZbn9+iH_(`f3{sr2&YOD;FM$+m)!E$YiQ1@esEow4pU!c3>!%w2RXq6cCvi&4o-ydxRwgzks*cz}kU~8beXh7a)VXp+Aj+T%w z*AwN)llNL8Ut8a(T|Iu$PJ7Si&z}=~bzi=G;i}k=2LuGTd;@$bGiJ=7;lqd1#*G`D zb>ZXKzJ0qh9xKa2JQqFe|G_-kTfA{7-Cu}|{k$&*|c_~8#nJXK4Gxh`J3*qL2iTuebh zK~DTNH8t$7J9X+5K_+|+Z{NOk);F7w@#8-t#@ee_FBkmqB_h5Jg8nlyGB|C7AF*X4 z{I6cUq6G^U5d7rt-@oUyUi_B@8Qf(2-@kvS`1p8&Uq>DTz>nBH2r&nD;742nKC3rx z-q6R7A8GaK)nq0GuDQ_8-hYX7#U!%WR&EALm#wNA~U8x6}QpG)~;RKYW*@h|J2ph5ym2@EA&5p{CL{CcdupqmoHx?_{l*yi{r`u|L)y88ai|+ z>rHDvn9)D%Ao%Tt@7~k+Z{EB~At51j_6}lMSjRn$A9e%chxq?i^bb2A zJUpDdy}j8U@HBqdDtq?q;W)b${7;`gWg7;zBy6Pmsfq{V>llK&UA0Hncd(eRu{Bdz{tn;s5zp@&aFJDemrcCje z{()bdC9;Bl*sx)|2NiuFD=Uj_5Ud-5jQS7OY@B(rYCi)%#v8~Aew_2cS!O{`o;;yG zefscTLgX3Y$NmN95UlQB(D(1(zt5uD!f&*H?Afy?&-nr?*7C7q#~R{y=l&6X0daO) z4?p(rxbEJ)OOqx|BJKWt!h{I|IzJm58@WGWFDFQ=f3bhZf5YAav3jffcdWY~K73$N zz(08KU>Ezjyu6&-M?c@PWedU11PT9%uz5IBy}=gRzkfgPZ$+M-{{(&q?DsI=!Vh7^ ze}eX9zm$=mNvc#G9K{_ZDQ;ou+Q{u_24=Hr{Y2>&m?^_GR7 z`1k~x9X4&V0E2h$-VF~A9~c;DY;3HotYk14=gysriHW&(?OIh;)$`}i-@JKq`SRtr zZ{IdGHPzJAE zO=o9k5n*8~E32fWq>mpzDk>?l+3YiC&gkjsMMg#<2$GhTW^QgiciucYo&M<2qoJXp zg9i^vN=T@xsumU&E?&I2y1M$ri4!MJo@{S#-??+=yLayn95}#Yv0PnU@87?F<;s=$ z^XD5G8Ch6ZJbwIm=B!yGBO^zT9t{l*6%rDvudmnD)g2rh%*@P`m6c^OnGgg;M@Rel z`Ptdog@lBxSh2#!#>U;<-O|!hO-=32ojV^sd=M28xpe8$g9i^98XEfg`lO_!UcP)e zIy$PUsX1%*>_dkRJ$(4^#*G{Q{PRyjLPAPPijR-a_3PJ%hlkg!S<}|mrlhP~P*8C8 z?AdMGw#CNAmXwrqc6R#u`l_p|8yFarm6Zhq1UNZ4xwyEjTenV9Qu683r!6fl$;ruK zVPQHtI@`Bz-?eL3U|?WkVxpj+U`|fX#fum7^Yb@u+}P34v1-+-O`A5=*4DPRwq|5x zWM^lanwoCdut8ZxB`z+GLZJi)2XEfIIVdRT`0?YrckfP5PtVHAl2=ezzkdDfIdiP7 zttG_8U%YrBCMxRh?>|po{`KqEMMXs@ist3ztz5bC!i5XX&CR!N-I_CZE)2uHy}f(( z>{+^W>5?T&jvP56ARyr7<;Bm>udS_ZW@aWVuD=o3;-@29?6&g#{@wqZ_n*4oWq;^LcUAUq_vZa67j00{( zd|LNnJnF6&mE3uB#kT~0Q8V0OxAN+uAK4+ox=lZ58BR05T)cVjw9F2LrT!LD zX>kp=a?J==Mp5R|VYe1mOgKBFzV5g3KS^{{i%N47S~8fTH^;{476;!ilsPtej^)uL+RE-Z8X_a|`auUSr+RpEgcUro*5gR1BbyM%C}bPd9}i7dkv3LBv3 z(p(1E!tg>iTd=R;p7JESJNb<{GF)oe*j9|it;WKH=_kjPkBsQD40UI0f=Z;hWiHlG zpVsf=Lk2-*PinRsMtF0Kc4auSs_uW@M~~8tbGwEaERGP*VhvFAktA%*YW^oC=c4S2 zZwYULG+M@(g>aJn5=d6-^~EG#`o0;LFh!-&J=s+6(x|QWQ3nUST%lvItvfQox4cP# zcEum*QTfq7^TCY=vXX&O-laDOCLuQEF;%uuZKPU9-kM9;3)AT2d@7pm{Pn)<>t4iJQBZ%qb3N6Msewul zEKq$os%n>8D3b^O@^j4SWO8rD&x1ckjemdaLoX8b<$U_$BWl}!yZ)xFH57FT{p|Yc z+fPMjewNk5KBg{Oa%gRfujzTo&H`9>2Q(?^3*KGlZ{N!1GcZxQ4V@j?xkrbAY5((X zTwZQ*9Hy;(46d!N#yZ_mxHhO+h%G&8jikM})(_hZ&ieU00)h0F*>6R9%7;2e_ctPG z<}B_LWbLf)3#&D}>57>GbQc-t=3qe|j@!Z-bAS7U*5fuEn5Kp+LvSU+=~;C zX|{jU+Y^nx2jx0ljq9mLfOKr{O}KqO{=1?#+Aa&JJ#lexqneEny4+<@o%n7SdQ2I1MPc+FP%?dHra9eNp-Zmn zq)+^}JwGj(PGe(>H&1_KeouW>NrG<(w2sB4=a?Mu;%r}3dT{1rr=A5jV8_$YCw!|q zjs9B4r-LDB>sVbH>60xdu{#c*ki2yh58iI`M_DU-qpYA-_CGI9(s!*c%|#_T*(usZ;=8vxIv&WuB6P+b&$+uFBXf)4;ngAU`8F^0l?~mWreN6K zz4!)fc^di-_EZubKX-dkvCh5wNNr-0r;J?g4J_zs_-4KFEy-bvy-H&TD5k(N=a#=2 z{0!}{54ak84>67*Uc8POXs)I#Pg+W!5JC@ZS>U!(+`i!=_Ne;9tIy8+A1=L^eh-1n z7TJZOk+RqI-$fZZz4SVP-BJG7P^GT?WnZXyD_j2is|cxa2@lUQ?8jAAxSMw23R1i1 z_cyKnqB%iV81V6j_3rL=_Ald^hMCT@MlY$9x+`xy<;yWxcjXgO+c#RqSlIcVzRCjH zF8f<>weB9XR<=oJ&!T}ig|Sul+5YJB`8ndHaM$47D;A+)`+hiXWmbt(UDhm#ezgJm zQnp5ZrNaF^q1BjCX5rZqxM$3*aou-&726h9!Fu>2Ft{#&Sg}ZdJD?(Tvi{kddfJ6! z$b{uD-L%a^9L|ClQr*nbt)eEC~WNy$DhBk zt$#IaqjD_Nhvjlr_4`EzJQ0?p)xT72!yet>w3d%j?fM=O$h31N+=hV5W~Ry#iU|(UMXmH8tAD_4k{L zS(AEKtBlU-qrza@SZx1oEMr5Bv$46GMlDyv+9J~*pQmXSxQA7BZ@+l($2V0`+v&fL zuG{{!D`;+F9<|fMOK<1m1A2Nc(2`91zJv6#p*YozA>yHCWyx98NAt|4}zKp8mjo_X~d%Rv&P5xgw|i!K{Y1&LS5pCpPuFmJ70=tFn@hk2yZw&$5t7kl!1`j4)h`*iE0(z}13xRM3un?z|X zypGMxxB*-L^WmiTgc;;_K2X1`0w zg2N9z)3w`%!t?If*lTnhR}2>ylg*dc*bH5ZhRy%& z{xc}0_R6LU*zpfw#wDay>h{_}&*170+jm5}DU4oE+lxL_8C_(iacHrx?6~+<`(F?o zeSFE6m9;Lm>ef(cP|VFUmyonmeVG?H_D#a7Pd7kM1In@2GTbeIsa;V22wHby?hw?+ z`JS;5{WdB3QI{1xIq&NitCX*NnKQ;*im0*kcS3ujp(jAxC9vQ%% z`Q9A4D&;p?(mDOUvJY?7j^50cNU^{v2<5 zFqm_)o&BWwd3Sc1KrkvY1VX&P)z1VHZ;2qygG8#Xi3-ty&X{VyZ|>%PSj+& zc)0&Q#j}XChg&zY4=S_>!k&e!Yb$Sq&|-d`t-Y4uaW-3V2$^4uC@ zSxQ?ILKRlq42^~t#}vRi>cNlRMsk`5eeUjU@zz&BBv;ARn9&s(f-XD@9?*B!?mARF?xYpsc0Y!0bMLkd8x#qeZek4Hd_P{91k9*y@i zSzPse`IsHzV)r`PT7a@6$7XzZ%S}$Ve(R<4+F!?mX&A+dzPhh1yd`JO*xlZ*p4dMN zeK>~)jo!&?WMK*U5sLfP9T$_gy7G05Iqy()sxFdt4>3P(XmG4_0lloNhk3;^3;R)# z9&movW@v){GW%eg#Q67!-=2>iH?**|Kc9x8(F-@@FBieD_zwk@c?ZsJ{&*Gf>$nY|j3H0o_sz~4%3m!Un)BDuV$XQ;P=%`X=s*~o##mn-zLsD=J8B; zei0uh+FgqWT-g9GWIyn>h9<+pPH3%1-hBJGcq?@CG!z07a{SEvC20ES zti9^3lu{?d%ZTKv-bQ8A8oFp?>&-cqsU{gBzCjJuZD@Bb7;t@0F0~Od{2{dS4D<1D zhwTk)zBG3wK#7z^c3Y7zlZ$Wv5058q+9#aa0Nr-&d+Vdq?5`6H;PB?x`nmA0=cd+k z)zHt!0+WhhT0PtgFp$;RwiK-R&RG+=x9YoFDN%mtLxoZFa;>m;{EYnNsLM#wUVehu zZlvc#lB|17@A3`Md7}f~oHdyXB@+n7JK1zN{_?ujgH^Yj6EOKhNGltg+NlKo)p#{x zLC9)Iw3V{J&Ka4wYJWkPB+r*_Z&#PN7r#=2At*k(iaejTAyQ zvEFp)Oid5o9GwP#op6$+Ja|!9)_HY;qf%I{9{x#kbP2Qje0Jq&LN<}uKT_N=Qe++o zRAhfqmr$bwbDl?asu`h1#WvU9CYf4aZYg-_pt_pPu4e{Z?~&5Cg*?CSy&TV5YMm5U z038&X8|p*nIL|;n(jH)N~v%%v_KAr z87Vw?xac!J>pgRjo4Uq3zN;v0J{p#sbTg_nDTuFZ}mg#4Zv zTx@?S#6;={cyYFO=@0)>FH)BI@Z+%iUvsGYOAkI}IziP=I2hCfYh}E;mPlB@KBS1+ zyG)T#$#d`0yV>@N-tpZW1S+YT8oA?YQ!QC_k#b~(pa1T zF!0OPG=#8+b1Wx%wbL5X9@^P{nI>I)CBUX1B1Hzh%SfAE?nB?1X+p^L-;X)-d5_;A zMXLZCS9jxVY=-QTln7M^pdE^%b4Zt&%c_dDr|=$6X1(y|Aa1iYa1+?o?i#)?)nr02 z9qbEM((hfSO2!!eIoRltsmE^q z7woQul5!}759}TzJW?L?lU8YbTv%z)upAdeAvM!{LA=l!8%Rov87v7#4(jqo;9qwr z~Q5xB&MZaqO7kR@_>O{n{daQ&xR zxGP+Tw~l-vRHKHN-`E*9EBO4o^DFT=HUc2oQ+ON6QPmDzuBYy0M(<6*PCWRWwE>#^ z@{F8FLnbm?x>iI(bzYqC9QgF>qnq>KsOMjl`Hv)H@-)O;8>IY%7WRc!SMx)+HQU(8 zdy;xJG+k=<1Eo>*Y&a>GH!uJBpGD2bM(ds!$Ey_2w}I9YYv%JdbjMR6-UKAq%b4WF zoJ)AZD+9LJ<)Ezdl-c#FaZQseZL4=Ye;&Mn?1QbE{WxYF3j3uKiWg~f%^|yOis#-~ zT*G8oF9;2TmQuKt8*7<{KfSAE^RFrmlts)_@#W2HXY1ANOWiEsCbBRj@6Rap;MC~z zx_ymX1$1N=-n#W?^s=hrElTq?#DmXA=Y_v4p2(#ie$H}$CVOH@mKlhoUaufXJ_K8! z^ZAGrlLKm@FnSFbZRs^5*NZ+)$NUH)qkMFp`pc^D*4YM?>XO7q`N*F2`@QJV2a|o5 zoDj*Gk7Kr=lc|R?aw9T;UF?8<8~j-F$X`|-o$Pses_sq>tOk_Qk@2mG!L+*j&OCy} zRq0>%dw{YMnRxLsx$ab}Yl4yMY+w3~1Eg>5fZ_$_{gA~qn(g5vFmEHFonpB3<$J=g zBUvCWJpWl%)!aQzf`*NHsLo!5nZG@!!Q49IefvEwy zY!7L$)!2L~?kApB&w+l(6MZ7XO5XqplZrTQG|1JFLtD}?1NUJGsbc)+Fpps$i6Z>XE1xP$-~S@6et_~aim z1O-p`N)i&B!+d19TX=T6qB-NkhbAJXFTr!A4IVjwMck)BwL9PMoa3d5>TYOpMaiy` zuV&ml5u~<_x6%bHPBjb-irZPG)c5PV#XP}@3%FN;xn|5~q!e=Cn24Ck7y z5YZ;w7QB$hi*5_OIg9%Lxr^%=@^0yiCNvyEpV`7SAnLw6G~bAuj|GYe;Gft7?Wn z{McV7kSB>*sKPI`jnK2&K8x#^k2KyHAbh#3gfQ_Hot>{o6dhQb6h(&~{?(pX9%O-`SfS9+^Jro0!G86s`Ib@qe^TRn* zk<|$FZD>(tJ=LAb{aEN5L0V9xm+Uyd(R1Dgao7vt&K)v%NV~Ol9=|UU6C;@1mX@v^ zDm<^whLa;!rPRY$c?7DVVItKHQwzMDKJr{ZyQXh3NqhJNqt>7PJ+*O;)bBP#9Rm zEY1O*^*(85x7Mvp!=sb)vAQ+xmkGCKU@}75wNK_Z#n2W2x#eL;>)~fSeb7)9(cK1* z=aW*uf!#4ZcKP`Xp-iQ|q2Z{`Kb*Hy#!g0jSl?4%+sMn%0v(jGL&byAnteB7j`>?3x@}|fh9s*H){*Clyo;TiJugL;8_~kf zjdBv;DZZ03+`K^zh)B&1dL`_bLYRE8sE)8Bk;VCa;{&}GHiHT)p%gBPNY2F(UvNi% zZrw&AMY^NFvQ1jm_3Gmt*qQ*GOe9YIvUR^)8iGS`O0jio0|4c}_l#9bM;;py+ zqQ-fA`nW*7n}VyIa7pe0aPR!#gGvdWja`~xYKF$#4a#uO z{o%0%@c8y#Ze#=nY`+d2m*zhGJ2a*jvdvqK5lxi%4N2IpHt=XpZig5Ht_HtyP$x{#Qcm_vPZEs-V(Fn<2jI(e1V*i9pUn2uDHTu7UG6%nzcf%c^#15bvP>W+iKj_yS_!QB1{LMql5jg zXGsY1F9$g!7x@CpXxzzDULOTpyiE`F+M~t*y+Fne$czI2a#FZ;d{vF#;Fl&6EZuv2%-xhu(4L6Sa$ZMWM$vE4uO+51X2ZsiRinbjWW=@ zfe}TlZ(0wx;xTTEvQ8^AT3Im!+Nnki?S{1fJ7hIAM5%{Y)1K=baZJD!Z2|CA=F~G2dRRo~woRandwiaVQH$IoBmPS}kM}g^ShQgTZm$>v2RWsKemW4g)HnRup~ubNm5L%Fyp~UjzdM{xjsDm=Nh>J zl(t$wAUBIf6ZDRcjKs7BH|9ul_w&!u8P--O@cVjHJt|Q&Mu1-$>?49ysE#J%AQG1t z2*|Oa|7=!cL^2=Ot%K_PIgA_!(O7Mxe?ZOyg_)!%nyjKIt`BTb4Kb##mmVgHm*-~` z2l-c#^6bc%AcY=z3i~D2UTvh__K*gf#TAg7Z$s6iErU!`J!aFw0DY&h1oXC^dJ^8% z^IZ}*uMm$T&qB6+pC9pqXdwktOCD7*y?&aI-xknd-ksq#Q>*~=;Q&aaN6fDn z*noE$aQ2US(X%hm@U?-`r7h=lD{7t~e7!UKoFCupL>#+=!yz_L%!`okbpyQddS9y@X~@sDVI>2u5a$4}5`A%OH)Oe-2%JT-(%{cUKjAOp01>nx#o zzU|Ef;NQD<4r%7#tm|HHiJF{)E8^jvO>G>=c*0{^8-eI+)$oZRJ{4v+j;ySaScBJ8 zwdZdHEG^R^5&}Sp<1LbyrkD_(@8rEXFzs@;gW6uE=+CHSgkF!Jt5p@nw}Cd3JUAR= zJj-xkP$M@M#cMC%4FkzjE~|fg0@J&*F`)hl&IOi%&zA*)aP$O@*K?l0 zwI>oTaT*H+77Nm`R58+c)CW>@YBQe%K_8Ia!74cBHxtn-a6K5-tiqrEPc znoVH1`Q{)zFUI)W*YZ6C%H5AtD2!gFe%eIrat6!0BObA69GC6P6_yu`8b|Js#%2ZS2>4POrG?r-4^#~iw3q#k4sFaP5MaJmSf*S*60U>kj>hF^a*+3aQb3TnYi)eOG@r2swzJ~~qiMiA!z(Ffe zt2vr1%N^lqyrGq?*3%dbB{hA66ojIAP?^4 z(O`on%W$ReY7M03hPI41ykJp?#Zg>}e5*yzV{w+*BwzMW9N;S%ESiT*2s!wD$BimX zAa*`s&n9>zp(M@|D*)X!8ncSBYh)ecu)1StE>t=*1lj55I74k()M9i~tnpz8ik{iqxfcZ_^=I3U51!{hbToyWlqt9cQ z-PuE{90=GR;|K7@TuRCeG0kNl%yqf4{hN;Ql;%g|o^*aADMm5i$`#aaE}tga=7xoT z&2n3qOQ!EiV9d>5^aS|#0lUMHMi3}?*>T`Hi~~%3dk77iQE>G2#Z?*tzIQHNTt~!_ zanSD7pGg8ZZhp?WE`d4u1nOs5FZm_3?IMwSKrtZXxL-)%IuOkWD^;&o5AXluG1d)7 z#!dO6#E>$xYLP?07;f+fF382PfAKS_wp0&~kjUv25f*Jknw9ckKFX#kT58`KZIM4=jRa-PJ2=ZG1 z3bm05C5I!QaT!c5Uv+|*dzF=X9C#M!?Sh)9p*>57&*Bw4^f6e_>28AJzy?MEQM@if zCLRste&7W~ZKPIQv*svnC|g0H3O8t`v7SFLG*vBwdHRZIN`?ig@&A!#e!iA4lVe+UAKkp@+>`)KnlK zeByfOgKDFRRKbtRQ(bqYs|_ZrQzj#?6CMUij5lP3SO=;)e-1}dXL4f5ri~XHHCr$J-)W&Ipmp~8>dOj2` z&4V82n1hFa(7{Xnxzr=ogZK#~uC2>hM0OdrL5e^mN4+`AXno_H>XL}zI~Q8q2$$H0 z95&LQSpfR0pIX_vV7PR}nBdV(mf=Kby6BCArYI(jW&yZN#hM2)+IW$29|DQ*fRFD| zV`Kqaw}oEfR)D6qx$>4ZKfbYPlO*m`AanNU=6{N!4U{li*ioy|U?B4cpP8D66iZz(`SqEMmZ5rf!>36_K{~Q06sVUqp?s}KmvHT9NL$$T_MC|`PiFa zA4hIa;QH5gcxx$LKt^7o-Hai3%_iHrcp*uYd}Jv-U*-S5Q=UW`nxaBwEl=N1&4riizL zR`gLHjwqP@De4)Uw6D&LZ@lE4k2zXau0D~h2NA_^)()r>_})8EUSuG8@?_5jlo#NH zVka%~B8|2hsJjSbpB$-3GJID_)W;S#u^U1XBNyewG(jU8RKNiTKR-?g3mU8F?bHx_ zNYv<>os@u4@279nA_D-uPAD}VEHHT3`M;aqK!#l+{FqUa<;LXDZs-v&7Tf{1TDrYN zyY>Z>yWYai17vy~h%S`H)!Un`Qk>AYlMvlITy!Qe@A9Xohym)6q~p zfOGNEHULG1wpGt0U-|A+>QCNVz+K}OC<5ERe5TP>0ah!pU*|`TXokO_YvI91G7g(5 zLvDJNRi#zlc1oT^!0X@1KGt9tUG93^gV2?{R}%)(_$A;CJ0aLMu|#H1 z+lf1bB39rHj)lLX4e?QcfwP6bnZ^1Aie+jKO=vo};iO6mqUx$A5k(MlaZUPiLiZM# zZ}>i|8(ihaPaGK39$;Pn0IAYZ^c0v2&G|~UW2~X?K*_p*iz-awG_;79sdsoaedYO& z3sh=_tw}i9WNu5M_PK~2U;} z#8{wvm=>q$O;j=9jKf8`eTOJO#TP(m_AL$OEAQr4c4#LXC4blc)WsG}crMc~XyHP( zF|w$QJP!#+sWE;*d`)u$2_C=+gceZpMxbOl6yDa(uMj8o`eW7&9ex#}WCb;(7DG%? zOcPN2smt}fe<^;Ap9K^(3stvh&Xi+ZWIm2yz;yN6^hfrMS2hyo1M8uD0Si|?0D1uv zLTE|sNRXm$>ZoKy2XfVrG-u@ProDHQj4GJ#c7s+LPL3r~frl%w9m9*W0Us=%K8X9z z90?&%fyp~CyCw|E2gE~btKaNq#PDXDn+n#Lf>;U`==b8x-djJ*D0lk!pVj-l=ySl} zj%kDI(LKPUPXW<`k8a}qjtYFbtYv;#@(5Ai-`@!B=lS~?%yB6_3Eb{bMZY-;ARCAy ztz2pau#`2x+Am=26C+1w(3j9P367VcC4%jd6cHIvxXLFmqeIgnU8jqN7l{sDGM1q5 z{M-gLMjSAQ9N0HOMd$UEvD2jLS~I$V>QsR0tRa}I?(EtyA@zeiN!kQCPEixIawy8A z=+ITV;@2MWA)>W+>M~Xg!U4GDDzyXYk?tN_7o;~toU}^n0W2$_9vHC_keY%8t*^YD zpdvlBYCPX5ipWR4eLx6W=preu77w4SXk%}oU4tXn&=deUwh-MCGkKJ@9Gs(pM&*5y z8hp|jETz)xh+z4k?!hvzD*fgSM5HiA6?jT-W(FJMgH_;Y5D#1A$TlFEW}wbFc+5Fh-+3|0kWy`HVMgk1(9wKEHZ&eugQ1zcocqZS6#c*wSfq{a?P z6-FL&zir(E|V*w8X%K(%|L>N70?E3xtv7Vwi}}@>SQx9Lzz8 z=wQn%a7NubHu7ef-`V@Mpsj?<1rJYdVtL|t63nG_vMbYLD_OpOy88+|A`@6u`#rN)p^79ij*tj&p*et*y=X~i zx7Vs#OVaH+GVTGl1D7KCcS7m))OsF}@?%w*IvjZL>TQGsFuL_nh4jRM))voM^`;en z&b5o+t&u(p0Pjly?=KN3E4yjd=`d1lMbs#uHhgL&_CDzAW#5-%W_yGz1IQ4s@Z5b% zx!uhY*ffPgUJkMDOF?DqCbOV;Xn#%uPT0X-ItPs5{RuEOk)U0h_TRB$&?f|AZ3)`Om#2@xeC@`p zB0kW)$4jh@Jt-Xdmv2=nZEpyX?dMZ0%u+RS)$2O~)qv`wixKy+Co52MKxlAdkN>{h`vtph#l3BkQbmI1J^1GFRy7;xm@F2ggH zp`_+Z76l)nF}b>LGGwKPJ|`TCkLR5)-ro%k^9h%Z?vT|2oU}Ky0|bw0dG?y~R_#&M zdEf$i5X}T7oCeM&ztkwT3Wl3PSlB@ z>8~PkQ5n?TWuR^4U=68ewD;Fz(uB|k7;rfd+Cy;Gu@Onr?>)>rjk|9SSxjZU&fp>A z&#PIPhYIeR@?wAq)9DCU&=D+X13`8Lck|7|2);qbU@hbQyZX6;zQDd%OhahhWVg&W z0^m!sc))fD`6qdp~2K5s{SRtGbnUXoKqe40fpNC z*|5Eh{k=l##u46tDmsT3`9P`hEdSf9$LD_c<$vGDyDjpm#TCRka5Nmh)&hk+i@!sm z=SZflpu3{iWp+vwgGzW?JKNObA@}nVax+yloo^ew|I-%tTwp|5*j(k{J>d?IWYlEJK6d!}t|qyzglR3G<$>Vg+~IMuJy&kY|`l*cfncT#$0;WJ@9U z>=&QRp=}fKJSzQLU4H>8*8wVR(dXfxY99*@4-$tDk{YOr@I{r|Gk1Whx}Xrwc%w}U z3-m2MnCeLH7T3b*?_l@*pnqAqoRF0UdVueuN+>WAP$)ZWhd85mdy1HZ(_LkzhLg`h z6VGq6IjKI2;psHe8sbBIu z0fcS~>F}MhS1FVE&$}E7co%rsK9D`UJ8xP05S1|2+Np$57f_ z--Cn*&OHCFOt!I$K!$0hoN~_)pZi$S45jjKqm)VQ042{9UEjSq>lkC1c{+lQVBska zjw3y)1t$FNnX;LQvjfu?1G$dQ@ncltEM+XYwVltrrm0Uhn4oK?+COk!224bvQKs_*{ z4BWI(LpO^NjrC9MY-I+<1k+a$2x3`4)}`|Kyx=8=hG|q_duXUPeT94M|6|7@EC3G% z(|5dq7|+l&4U{ee*9oR}1X(Vz_qD9RL!HcWksSa_f-?;%zfBRkt0$*WljWG~@B*h%tV?1^N-5<`}|R zu>G8N(Wl`n1HlRQUqdK?@i+1A*^~=aVRt0kGnstV1kjt+Wlxfnr^Y0PRhoRD<#d(WO(fA9o!O57uh8p;YhZG3lQw2nkUVa`hIf1nT7XkNX zr<-P2Rq?|sw3vrZg5ZkHU*?6tAXc-eBfvm7ORCg^=RI5fF2lP{FRzoIi_QdP(QRliy?FL6kQv)lQRZ7hs zEf4Eh(q{02^ zzjOfC_4H1vk68Q$=m&ovG^HPbba8-}E*M^*13&hAuB!xIDon)%Bg6)@?W;kK<9(Y*94<^5e;js4;Kfrq)e>y|}vGeBK*bEWH=XYhQH%+6E;my%xwEP^7Xq=wo zuV6CZRZy?u?*gP^1Z+P}kZ1e0kP=_)d)XP0|C$1NnwO6V$i(($Yv89!3;ra-es5Xa znKw?`v)`Mez}RY!EWkcm{>iuZ+Snen{J~u6Kcxpjiaa&z;q14c$Q%Dh5$AbB%wqh@ zt=K=12k@iew~IgBRp(!usrkqPr0_4F1pOUPM)JQVpD2K-k&eW7WoIx7u%vfuc_-I$ z;5*-TZUO!&+Vriu{B}_#m^H5W$7@p*uWhG*TFQSqXt>u-@%MT=NPI9h<+z*B1L9vV z;CHpSZptaplT63YhPzsf*Qgo1*=-{LlHOtNl8-bw#{A<;?{MMDY@t zo;)2`;0RxQ`V)Uvuxj%GePH41A+vrUO;Mh~g1~QBaZriB&vJT@1h1Qtum8!X!cNfP zh=bYFqewM4cO5bS-y;6?#vnxn7-WQjJ^#-2ZZtl?5)ewWCAOAe)GYF z)7@?-)X@EG!Sr1kkTglA61dwrE&R~U>W|ZE6Q}Rvhw?{T#sQQBJi!x$-Jeptuq17uL4Tp{K&Yz>4G_fvG*c}4lq0nr_0$55*1Xboi69^ zh_UR@O8|C3@*W2QRI_R)j#=+@xsDSo|FHxMVKiNUIWirPf;5zVpwTlTC4KtNbHD{E zn^)5{w|OlTEV$fw`nF5GO20R6AUcVvhq5k>E#UFi-t79}U)zF+Y^KEhsn=WqZlq2z zoOK%07C9CKQ0!D52d$#hv?lQR_*5E;QAD8GL)}1;fVT?3n~5X|Ouzu>PEFSva0l<< zye58Iy8`kXnJEHOkXP5CIzQH%H}}ueePe%Rq!jG(>LDM$_}ITT$@gKG7e0FG^bBngf5q4 zs*t_<%V3wQ;#0f4DyJJOc^S|CL)4{1DMJ3pu|;T~5HSOI?usau)1l#HyeR}eA_foC z(2keKmZM>kL&j3XytPQt&2PJq9?|I!0ztG29GaNJ;H)6RhvNV+|2e=!C<|UlwX=7V z&s!ixn*PuXjPq$@o7HocyQpvL*|ja9Lv}bA)pKFMm{2MUr>`G;`WFr5Uv||XQ@Sg`glG7gmLF9jDzA)t(YXauu2WMd!0=r~MKm7AR zgm?gApa?(%2l)UR(0%=E6Ivq8q=3VXKXSJ~)A^@A0K`{)XPU}-QRV*9bPgGY=L}KB z{yw>a7z+~*fH_0JoNwXxiGZ-oSyc<0A-TVTUB%)GGPr7l-4d#qfrho^2XYSn>!GiY z4tKh~t|L7ZFH^q1`&~yS^qNEYz`Ee>0tmM!Oa?dO;wwaI<)^<0ghR)?`-b~O^i?DN za=9I)O8n;mXl9U>zX4iR>FNYE@%QDE7xFbh1ssik+aZ5we=&AYsK%beh`3-?TG(Nx zC4+~k&0+YVqIW!DJ%Oy?HUD`~rSqQ$slm930PS;txW=gG4yvIUGN3TMxa$vn>QNJ@|kh{G!T4 z#ILPnc*%hkol8r=XN~`PurKmI54vf;3(3IH>Ka?J0O`4x|4n{yuQD!15cR1*Y7JY% zdF!*IBlxzGag*5K1}w|RpGU$Ff=3r<;=d0b^G|;x&Wp1ovs3btq|l`kgxszGuPMeO z4N&_Nr14LZCCvQ}mhjx3?CJ3SjS0@&^sfg&qSGG$Jyx2Q%nJ}paK}$vd3yZDs#-uk zSP$6)D;pCc)Z}Rij(~jB?l1gn#e!a(RN6`Fzw!`mX5akR10bLW`QMCW`DQHst6B;T zDiS0f0EsOF6073bStW)E%-=+IRB#Tc5!f-h(8^7JU7C6V`1te(fCY`PT{^F=)#}&3 zM0#Wzn31+c(h-h5;0Iw&;lLZK6n-7rQqc98RvI09%vSvb)+w@b)5#LpCUTQ;Hxm)} z3M{lO+KhuoZ%!NSs%Pvm6~3=)o)vV>p6|txB9@(SQfpgdP}0f2YqAKu3oc-#<50h4 zo}LqQSN+aM@VhjTc0#C15-1Fvp$6l_bJ84(9(3O!_D>Ody?bw)PdKAP$8VRdU<=#8 z4xabfQSE`yOT_%Wilv!mL+y)k8@)FK39=!k&tnZrS0 zuT8+O_k^gAR(P+9l|!5b#dP^AF}Y>oeR&=6k&Zc;*6X3h!vMa8UUfFf1mD+%e)33j zr1S4mm<&IrsLl*Fl>0{jTqgYz_v_f)bUVv6i4<=wP%GuZ9T7&8Nw&KtufLWAMQ}9W zED6*XIJwz(Pg`hgC>wM>qDWLF7(=Ha?Pq zp5?@?90CX7pyi9MRn42owa8XVv~w#T(vOe_mzZt|$GH2--m;qft?PBtc|J;ngCsL2K89pe6wOSRTAL|0NQ|mY7s&oVXqFbsP#&6$9KQmhUK6E-M92|66G6 zEBJjU!AaFFv`|Tx8LV_f_R%$8PG;gZkyqP#XMXsSF=h&4_bD%ALNOu#?!J)Bwgl7P zyV;&A&S2V~eeRq|ON7eAYOvAuTasl>;!gf5KYZcdX^=7{T9{Q@V1~_S!RRvXkMz05 zitDA&Z|wNqlZS?o9=UVde^&JhUNaDN9udRAyDh}!3;8iwva!dV^G$krkp3C)jfrH9 zq~tYUS!iUkNTT+(RNy%4NgSwmz}j6C%5nwYyQm{D`&Hw9n>O(*>QHI-n`TxQq;hdM zvV3u7UEbda$|U(;H%-|vV8#hB{*{}+{0hEf_c&Z+}rpI@t4Px z_J#Xa*v8@v*g_x%+{VCfEp;-&E8pE%Jj()Vh6dELq^{i#$F%$CgoewKl0^Ur*)!G(X# zD7!QGYIc?v-SeyK*CH>@PW_(6JKvd|O8H%I j#hd$5Bz3DL**g^>vTwt4UwW3_V z9g*2R>O21oCZBnk?{}ph__eriGFJ8I<;}{+$~)xER=s-Z0Cez~87+G?fIZxBZ>{9Z z-cXO2v*0_c{@Tq}5a(1|?(f1t-_T*p!7ZS?oh{(QQBZ3Ypjhk3bd8#M+^w)!-@?8Q zx`v(2uJ?D|iC>g*TuKMUI!5@(mobju<+%wzhOfz#%t=L`l?M(cq33a-SMsFoeZR;`@M#t*)#b`jq7@u+jXr)bhU8noYUdfC4+(5|EITW|A#6K<7A_< zMN8STjYJ}3Q#x5O)+LqLO-!G48J9_oq*+~D<}hw;YE?t`*v6tr%0x4U#v!6wng|(& z&QOf4=`_TgL&MCz{)zqheLm+rpXc*F?{oXU-*?)hna(B5r>}uTes#IS+xpwSROg31 zOoLCkjvGU@i0iH~Y|p*etrRW8SjVTR9C1RYlp|3-B~K^z8e;J*)Gb{av4``)a<(dY z^L1+Ciz|)-u2SLrM<0Xb1v{H{b48pd4S?&BDbIq>&Quiy9 zxEaxyTGi#ZC1=MNf^3(#ENigg^TaODWDsvC&Hmy+|9RIAgb~y9&||e^U+5xE$ztqr z!fsvu66j8&L@+2|JqWf5_pO%ce=hLk5vvGe+7kok@=^rdw~!mXiczWv0x}dZ?r2Kc zu0BqPz%(J@;Qq!^e8}g=;7P2qqj@@=&Q9Runa9op4`lI6a$hcN*5fY#M9Wh(fjy+- z!X16tjR0`(5sE#?A)jr%V!sI1zm2?otqRx&BQ$+#M~YiuWEpVyT8S_7UELZzd%wQm zwb+_W^hUk}R?;RGc!*PUJ5KTLj1R?s>jfX)oH@mGDTYc*@h<-{1dUnm5UV@T+iz8jT>j`~`_2;7NvKD4LkZ=HaF^=O+pbeLIBk+F<^y`X(X2&pT^fdvME4;m5cJ9mrWO(yMw5T28?}RWT3{D*_2)NHjWQj zVR`hOFw(8j@)G_1?D-1Nz4!KgWO=f1K@1mCHMUbHGiHfTk^unywv@c4GQ!xzR|gh_ zW?RJ*=$dNl*~sj$;h}Mwc@DdHT>F{V?lP9vNFR! zJEKpLFEI^H2U0ee689*)bb7+~bD46bkuQvth!Jv5Z4Mc)^Ow zHI7+{^Mk=Njw+FR#CgqcLdhnOITq#)tndf&H@E3!GgybP3wEIA+L@0tk);B?b!=Ix zw7gailx6Q*>z36;YJ->^Uy`*%synCt*|dtx2fY}dB3GbmHp_64YoqW4*N;Nxsh-8p{~bnJJUYSp)SZ{xD=Au)hy9vq(sbd!`!dQlMcj(4<=<&jVmPYjcDH$22;RV`KF{;)-h%9}ACh=h`Y=U2n0 z7aOK6V4Iza!(P(XN@z!9_al<4lZHPCQkTJ|^ z=W+^HPlQM{akHN={{93IH0WXQB)I8G@`J^C?fY8_Ny2y)L{ekrGf=7~t^Rn4K5e#! z47xqdMwX+v|J) literal 0 HcmV?d00001 diff --git a/gpt4all-bindings/python/docs/css/custom.css b/gpt4all-bindings/python/docs/css/custom.css new file mode 100644 index 00000000..c9d9f76f --- /dev/null +++ b/gpt4all-bindings/python/docs/css/custom.css @@ -0,0 +1,5 @@ +/* Remove the `In` and `Out` block in rendered Jupyter notebooks */ +.md-container .jp-Cell-outputWrapper .jp-OutputPrompt.jp-OutputArea-prompt, +.md-container .jp-Cell-inputWrapper .jp-InputPrompt.jp-InputArea-prompt { + display: none !important; +} \ No newline at end of file diff --git a/gpt4all-bindings/python/docs/gpt4all_api.md b/gpt4all-bindings/python/docs/gpt4all_api.md new file mode 100644 index 00000000..9e0ca997 --- /dev/null +++ b/gpt4all-bindings/python/docs/gpt4all_api.md @@ -0,0 +1,6 @@ +# GPT4All API +The `GPT4All` provides a universal API to call all GPT4All models and +introduces additional helpful functionality such as downloading models. + +::: gpt4all.gpt4all.GPT4All + diff --git a/gpt4all-bindings/python/docs/index.md b/gpt4all-bindings/python/docs/index.md new file mode 100644 index 00000000..1e5dabd2 --- /dev/null +++ b/gpt4all-bindings/python/docs/index.md @@ -0,0 +1,22 @@ +# GPT4All + +In this package, we introduce Python bindings built around GPT4All's C/C++ ecosystem. + +## Quickstart + +```bash +pip install gpt4all +``` + +In Python, run the following commands to retrieve a GPT4All model and generate a response +to a prompt. + +**Download Note*:* +By default, models are stored in `~/.cache/gpt4all/` (you can change this with `model_path`). If the file already exists, model download will be skipped. + +```python +import gpt4all +gptj = gpt4all.GPT4All("ggml-gpt4all-j-v1.3-groovy") +messages = [{"role": "user", "content": "Name 3 colors"}] +gptj.chat_completion(messages) +``` \ No newline at end of file diff --git a/gpt4all-bindings/python/gpt4all/__init__.py b/gpt4all-bindings/python/gpt4all/__init__.py new file mode 100644 index 00000000..4040ad63 --- /dev/null +++ b/gpt4all-bindings/python/gpt4all/__init__.py @@ -0,0 +1,2 @@ +from .pyllmodel import LLModel # noqa +from .gpt4all import GPT4All # noqa diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py new file mode 100644 index 00000000..871a4b39 --- /dev/null +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -0,0 +1,280 @@ +""" +Python only API for running all GPT4All models. +""" +import json +import os +from pathlib import Path +from typing import Dict, List + +import requests +from tqdm import tqdm + +from . import pyllmodel + +# TODO: move to config +DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\") + +class GPT4All(): + """Python API for retrieving and interacting with GPT4All models + + Attribuies: + model: Pointer to underlying C model. + """ + + def __init__(self, model_name: str, model_path: str = None, model_type: str = None, allow_download=True): + """ + Constructor + + Args: + model_name: Name of GPT4All or custom model. Including ".bin" file extension is optional but encouraged. + model_path: Path to directory containing model file or, if file does not exist, where to download model. + Default is None, in which case models will be stored in `~/.cache/gpt4all/`. + model_type: Model architecture to use - currently, only options are 'llama' or 'gptj'. Only required if model + is custom. Note that these models still must be built from llama.cpp or GPTJ ggml architecture. + Default is None. + allow_download: Allow API to download models from gpt4all.io. Default is True. + """ + self.model = None + + # Model type provided for when model is custom + if model_type: + self.model = GPT4All.get_model_from_type(model_type) + # Else get model from gpt4all model filenames + else: + self.model = GPT4All.get_model_from_name(model_name) + + # Retrieve model and download if allowed + model_dest = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download) + self.model.load_model(model_dest) + + @staticmethod + def list_models(): + """ + Fetch model list from https://gpt4all.io/models/models.json + + Returns: + Model list in JSON format. + """ + response = requests.get("https://gpt4all.io/models/models.json") + model_json = json.loads(response.content) + return model_json + + @staticmethod + def retrieve_model(model_name: str, model_path: str = None, allow_download = True): + """ + Find model file, and if it doesn't exist, download the model. + + Args: + model_name: Name of model. + model_path: Path to find model. Default is None in which case path is set to + ~/.cache/gpt4all/. + allow_download: Allow API to download model from gpt4all.io. Default is True. + + Returns: + Model file destination. + """ + model_path = model_path.replace("\\", "\\\\") + model_filename = model_name + if ".bin" not in model_filename: + model_filename += ".bin" + + # Validate download directory + if model_path == None: + model_path = DEFAULT_MODEL_DIRECTORY + if not os.path.exists(DEFAULT_MODEL_DIRECTORY): + try: + os.makedirs(DEFAULT_MODEL_DIRECTORY) + except: + raise ValueError("Failed to create model download directory at ~/.cache/gpt4all/. \ + Please specify download_dir.") + + if os.path.exists(model_path): + model_dest = os.path.join(model_path, model_filename).replace("\\", "\\\\") + if os.path.exists(model_dest): + print("Found model file.") + return model_dest + + # If model file does not exist, download + elif allow_download: + # Make sure valid model filename before attempting download + model_match = False + for item in GPT4All.list_models(): + if model_filename == item["filename"]: + model_match = True + break + if not model_match: + raise ValueError(f"Model filename not in model list: {model_filename}") + return GPT4All.download_model(model_filename, model_path) + else: + raise ValueError("Failed to retrieve model") + else: + raise ValueError("Invalid model directory") + + @staticmethod + def download_model(model_filename, model_path): + def get_download_url(model_filename): + return f"https://gpt4all.io/models/{model_filename}" + + # Download model + download_path = os.path.join(model_path, model_filename).replace("\\", "\\\\") + download_url = get_download_url(model_filename) + + response = requests.get(download_url, stream=True) + total_size_in_bytes = int(response.headers.get("content-length", 0)) + block_size = 1048576 # 1 MB + progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True) + with open(download_path, "wb") as file: + for data in response.iter_content(block_size): + progress_bar.update(len(data)) + file.write(data) + progress_bar.close() + + # Validate download was successful + if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes: + raise RuntimeError( + "An error occurred during download. Downloaded file may not work." + ) + + print("Model downloaded at: " + download_path) + return download_path + + def generate(self, prompt: str, **generate_kwargs): + """ + Surfaced method of running generate without accessing model object. + """ + return self.model.generate(prompt, **generate_kwargs) + + def chat_completion(self, + messages: List[Dict], + default_prompt_header: bool = True, + default_prompt_footer: bool = True, + verbose: bool = True) -> str: + """ + Format list of message dictionaries into a prompt and call model + generate on prompt. Returns a response dictionary with metadata and + generated content. + + Args: + messages: Each dictionary should have a "role" key + with value of "system", "assistant", or "user" and a "content" key with a + string value. Messages are organized such that "system" messages are at top of prompt, + and "user" and "assistant" messages are displayed in order. Assistant messages get formatted as + "Reponse: {content}". + default_prompt_header: If True (default), add default prompt header after any user specified system messages and + before user/assistant messages. + default_prompt_footer: If True (default), add default footer at end of prompt. + verbose: If True (default), print full prompt and generated response. + + Returns: + Response dictionary with: + "model": name of model. + "usage": a dictionary with number of full prompt tokens, number of + generated tokens in response, and total tokens. + "choices": List of message dictionary where "content" is generated response and "role" is set + as "assistant". Right now, only one choice is returned by model. + + """ + + full_prompt = self._build_prompt(messages, + default_prompt_header=default_prompt_header, + default_prompt_footer=default_prompt_footer) + + if verbose: + print(full_prompt) + + response = self.model.generate(full_prompt) + + if verbose: + print(response) + + response_dict = { + "model": self.model.model_name, + "usage": {"prompt_tokens": len(full_prompt), + "completion_tokens": len(response), + "total_tokens" : len(full_prompt) + len(response)}, + "choices": [ + { + "message": { + "role": "assistant", + "content": response + } + } + ] + } + + return response_dict + + @staticmethod + def _build_prompt(messages: List[Dict], + default_prompt_header=True, + default_prompt_footer=False) -> str: + full_prompt = "" + + for message in messages: + if message["role"] == "system": + system_message = message["content"] + "\n" + full_prompt += system_message + + if default_prompt_header: + full_prompt += """### Instruction: + The prompt below is a question to answer, a task to complete, or a conversation + to respond to; decide which and write an appropriate response. + \n### Prompt: """ + + for message in messages: + if message["role"] == "user": + user_message = "\n" + message["content"] + full_prompt += user_message + if message["role"] == "assistant": + assistant_message = "\n### Response: " + message["content"] + full_prompt += assistant_message + + if default_prompt_footer: + full_prompt += "\n### Response:" + + return full_prompt + + @staticmethod + def get_model_from_type(model_type: str) -> pyllmodel.LLModel: + # This needs to be updated for each new model + # TODO: Might be worth converting model_type to enum + + if model_type == "gptj": + return pyllmodel.GPTJModel() + elif model_type == "llama": + return pyllmodel.LlamaModel() + else: + raise ValueError(f"No corresponding model for model_type: {model_type}") + + @staticmethod + def get_model_from_name(model_name: str) -> pyllmodel.LLModel: + # This needs to be updated for each new model + + # NOTE: We are doing this preprocessing a lot, maybe there's a better way to organize + if ".bin" not in model_name: + model_name += ".bin" + + GPTJ_MODELS = [ + "ggml-gpt4all-j-v1.3-groovy.bin", + "ggml-gpt4all-j-v1.2-jazzy.bin", + "ggml-gpt4all-j-v1.1-breezy.bin", + "ggml-gpt4all-j.bin" + ] + + LLAMA_MODELS = [ + "ggml-gpt4all-l13b-snoozy.bin", + "ggml-vicuna-7b-1.1-q4_2.bin", + "ggml-vicuna-13b-1.1-q4_2.bin", + "ggml-wizardLM-7B.q4_2.bin", + "ggml-stable-vicuna-13B.q4_2.bin" + ] + + if model_name in GPTJ_MODELS: + return pyllmodel.GPTJModel() + elif model_name in LLAMA_MODELS: + return pyllmodel.LlamaModel() + else: + err_msg = f"""No corresponding model for provided filename {model_name}. + If this is a custom model, make sure to specify a valid model_type. + """ + raise ValueError(err_msg) diff --git a/gpt4all-bindings/python/gpt4all/pyllmodel.py b/gpt4all-bindings/python/gpt4all/pyllmodel.py new file mode 100644 index 00000000..c08b7919 --- /dev/null +++ b/gpt4all-bindings/python/gpt4all/pyllmodel.py @@ -0,0 +1,241 @@ +from io import StringIO +import pkg_resources +import ctypes +import os +import platform +import re +import sys + +# TODO: provide a config file to make this more robust +LLMODEL_PATH = os.path.join("llmodel_DO_NOT_MODIFY", "build") + +def load_llmodel_library(): + system = platform.system() + + def get_c_shared_lib_extension(): + if system == "Darwin": + return "dylib" + elif system == "Linux": + return "so" + elif system == "Windows": + return "dll" + else: + raise Exception("Operating System not supported") + + c_lib_ext = get_c_shared_lib_extension() + + llmodel_file = "libllmodel" + '.' + c_lib_ext + llama_file = "libllama" + '.' + c_lib_ext + llama_dir = str(pkg_resources.resource_filename('gpt4all', os.path.join(LLMODEL_PATH, llama_file))) + llmodel_dir = str(pkg_resources.resource_filename('gpt4all', os.path.join(LLMODEL_PATH, llmodel_file))) + + # For windows + llama_dir = llama_dir.replace("\\", "\\\\") + print(llama_dir) + llmodel_dir = llmodel_dir.replace("\\", "\\\\") + print(llmodel_dir) + + llama_lib = ctypes.CDLL(llama_dir, mode=ctypes.RTLD_GLOBAL) + llmodel_lib = ctypes.CDLL(llmodel_dir) + + return llmodel_lib, llama_lib + + +llmodel, llama = load_llmodel_library() + +# Define C function signatures using ctypes +llmodel.llmodel_gptj_create.restype = ctypes.c_void_p +llmodel.llmodel_gptj_destroy.argtypes = [ctypes.c_void_p] +llmodel.llmodel_llama_create.restype = ctypes.c_void_p +llmodel.llmodel_llama_destroy.argtypes = [ctypes.c_void_p] + +llmodel.llmodel_loadModel.argtypes = [ctypes.c_void_p, ctypes.c_char_p] +llmodel.llmodel_loadModel.restype = ctypes.c_bool +llmodel.llmodel_isModelLoaded.argtypes = [ctypes.c_void_p] +llmodel.llmodel_isModelLoaded.restype = ctypes.c_bool + +class LLModelPromptContext(ctypes.Structure): + _fields_ = [("logits", ctypes.POINTER(ctypes.c_float)), + ("logits_size", ctypes.c_size_t), + ("tokens", ctypes.POINTER(ctypes.c_int32)), + ("tokens_size", ctypes.c_size_t), + ("n_past", ctypes.c_int32), + ("n_ctx", ctypes.c_int32), + ("n_predict", ctypes.c_int32), + ("top_k", ctypes.c_int32), + ("top_p", ctypes.c_float), + ("temp", ctypes.c_float), + ("n_batch", ctypes.c_int32), + ("repeat_penalty", ctypes.c_float), + ("repeat_last_n", ctypes.c_int32), + ("context_erase", ctypes.c_float)] + +ResponseCallback = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_int32, ctypes.c_char_p) +RecalculateCallback = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_bool) + +llmodel.llmodel_prompt.argtypes = [ctypes.c_void_p, + ctypes.c_char_p, + ResponseCallback, + ResponseCallback, + RecalculateCallback, + ctypes.POINTER(LLModelPromptContext)] + + +class LLModel: + """ + Base class and universal wrapper for GPT4All language models + built around llmodel C-API. + + Attributes + ---------- + model: llmodel_model + Ctype pointer to underlying model + model_type : str + Model architecture identifier + """ + + model_type: str = None + + def __init__(self): + self.model = None + self.model_name = None + + def __del__(self): + pass + + def load_model(self, model_path: str) -> bool: + """ + Load model from a file. + + Parameters + ---------- + model_path : str + Model filepath + + Returns + ------- + True if model loaded successfully, False otherwise + """ + llmodel.llmodel_loadModel(self.model, model_path.encode('utf-8')) + filename = os.path.basename(model_path) + self.model_name = os.path.splitext(filename)[0] + + if llmodel.llmodel_isModelLoaded(self.model): + return True + else: + return False + + def generate(self, + prompt: str, + logits_size: int = 0, + tokens_size: int = 0, + n_past: int = 0, + n_ctx: int = 1024, + n_predict: int = 128, + top_k: int = 40, + top_p: float = .9, + temp: float = .1, + n_batch: int = 8, + repeat_penalty: float = 1.2, + repeat_last_n: int = 10, + context_erase: float = .5) -> str: + """ + Generate response from model from a prompt. + + Parameters + ---------- + prompt: str + Question, task, or conversation for model to respond to + add_default_header: bool, optional + Whether to add a prompt header (default is True) + add_default_footer: bool, optional + Whether to add a prompt footer (default is True) + verbose: bool, optional + Whether to print prompt and response + + Returns + ------- + Model response str + """ + + prompt = prompt.encode('utf-8') + prompt = ctypes.c_char_p(prompt) + + # Change stdout to StringIO so we can collect response + old_stdout = sys.stdout + collect_response = StringIO() + sys.stdout = collect_response + + context = LLModelPromptContext( + logits_size=logits_size, + tokens_size=tokens_size, + n_past=n_past, + n_ctx=n_ctx, + n_predict=n_predict, + top_k=top_k, + top_p=top_p, + temp=temp, + n_batch=n_batch, + repeat_penalty=repeat_penalty, + repeat_last_n=repeat_last_n, + context_erase=context_erase + ) + + llmodel.llmodel_prompt(self.model, + prompt, + ResponseCallback(self._prompt_callback), + ResponseCallback(self._response_callback), + RecalculateCallback(self._recalculate_callback), + context) + + response = collect_response.getvalue() + sys.stdout = old_stdout + + # Remove the unnecessary new lines from response + response = re.sub(r"\n(?!\n)", "", response).strip() + + return response + + # Empty prompt callback + @staticmethod + def _prompt_callback(token_id, response): + return True + + # Empty response callback method that just prints response to be collected + @staticmethod + def _response_callback(token_id, response): + print(response.decode('utf-8')) + return True + + # Empty recalculate callback + @staticmethod + def _recalculate_callback(is_recalculating): + return is_recalculating + + +class GPTJModel(LLModel): + + model_type = "gptj" + + def __init__(self): + super().__init__() + self.model = llmodel.llmodel_gptj_create() + + def __del__(self): + if self.model is not None: + llmodel.llmodel_gptj_destroy(self.model) + super().__del__() + + +class LlamaModel(LLModel): + + model_type = "llama" + + def __init__(self): + super().__init__() + self.model = llmodel.llmodel_llama_create() + + def __del__(self): + if self.model is not None: + llmodel.llmodel_llama_destroy(self.model) + super().__del__() diff --git a/gpt4all-bindings/python/makefile b/gpt4all-bindings/python/makefile new file mode 100644 index 00000000..5da5df44 --- /dev/null +++ b/gpt4all-bindings/python/makefile @@ -0,0 +1,16 @@ +SHELL:=/bin/bash -o pipefail +ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) +PYTHON:=python3 + +venv: + if [ ! -d $(ROOT_DIR)/env ]; then $(PYTHON) -m venv $(ROOT_DIR)/env; fi + +documentation: + rm -rf ./site && mkdocs build + +wheel: + rm -rf dist/ build/ gpt4all/llmodel_DO_NOT_MODIFY; python setup.py bdist_wheel; + +clean: + rm -rf {.pytest_cache,env,gpt4all.egg-info} + find . | grep -E "(__pycache__|\.pyc|\.pyo$\)" | xargs rm -rf \ No newline at end of file diff --git a/gpt4all-bindings/python/mkdocs.yml b/gpt4all-bindings/python/mkdocs.yml new file mode 100644 index 00000000..2175bec0 --- /dev/null +++ b/gpt4all-bindings/python/mkdocs.yml @@ -0,0 +1,76 @@ +site_name: GPT4All Python Documentation +repo_url: https://github.com/nomic-ai/gpt4all +repo_name: nomic-ai/gpt4all +site_url: https://docs.nomic.ai # TODO: change +edit_uri: edit/main/docs/ +site_description: Python bindings for GPT4All +copyright: Copyright © 2023 Nomic, Inc +use_directory_urls: false + +nav: + - 'index.md' + - 'API Reference': + - 'gpt4all_api.md' + +theme: + name: material + palette: + primary: white + logo: assets/nomic.png + favicon: assets/favicon.ico + features: + - navigation.instant + - navigation.tracking + - navigation.sections +# - navigation.tabs +# - navigation.tabs.sticky + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.details + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg + options: + custom_icons: + - docs/overrides/.icons + - tables + - admonition + - codehilite: + css_class: highlight + +extra_css: + - css/custom.css + +plugins: + - mkdocstrings: + handlers: + python: + options: + show_root_heading: True + heading_level: 4 + show_root_full_path: false + docstring_section_style: list + #- material/social: + # cards_font: Roboto + + #- mkdocs-jupyter: + # ignore_h1_titles: True + # show_input: True + +extra: + generator: false + analytics: + provider: google + property: G-NPXC8BYHJV + #social: + # - icon: fontawesome/brands/twitter + # link: https://twitter.com/nomic_ai + # - icon: material/fruit-pineapple + # link: https://www.youtube.com/watch?v=628eVJgHD6I \ No newline at end of file diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py new file mode 100644 index 00000000..f862ec7e --- /dev/null +++ b/gpt4all-bindings/python/setup.py @@ -0,0 +1,89 @@ +from setuptools import setup, find_packages +import os +import platform +import shutil + +package_name = "gpt4all" + +# Define the location of your prebuilt C library files +SRC_CLIB_DIRECtORY = os.path.join("..", "..", "llmodel") +SRC_CLIB_BUILD_DIRECTORY = os.path.join("..", "..", "llmodel", "build") + +LIB_NAME = "llmodel" + +DEST_CLIB_DIRECTORY = os.path.join(package_name, f"{LIB_NAME}_DO_NOT_MODIFY") +DEST_CLIB_BUILD_DIRECTORY = os.path.join(DEST_CLIB_DIRECTORY, "build") + +system = platform.system() + +def get_c_shared_lib_extension(): + + if system == "Darwin": + return "dylib" + elif system == "Linux": + return "so" + elif system == "Windows": + return "dll" + else: + raise Exception("Operating System not supported") + +lib_ext = get_c_shared_lib_extension() + +def copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir): + files_copied = 0 + + if not os.path.exists(dest_dir): + os.mkdir(dest_dir) + os.mkdir(dest_build_dir) + + for dirpath, _, filenames in os.walk(src_dir): + for item in filenames: + # copy over header files to dest dir + s = os.path.join(dirpath, item) + if item.endswith(".h"): + d = os.path.join(dest_dir, item) + shutil.copy2(s, d) + files_copied += 1 + if item.endswith(lib_ext): + s = os.path.join(dirpath, item) + d = os.path.join(dest_build_dir, item) + shutil.copy2(s, d) + files_copied += 1 + + return files_copied + + +# NOTE: You must provide correct path to the prebuilt llmodel C library. +# Specifically, the llmodel.h and C shared library are needed. +copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY, + DEST_CLIB_DIRECTORY, + DEST_CLIB_BUILD_DIRECTORY) + +setup( + name=package_name, + version="0.1.9", + description="Python bindings for GPT4All", + author="Richard Guo", + author_email="richard@nomic.ai", + url="https://pypi.org/project/gpt4all/", + classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires='>=3.8', + packages=find_packages(), + install_requires=['requests', 'tqdm'], + extras_require={ + 'dev': [ + 'pytest', + 'twine', + 'mkdocs-material', + 'mkautodoc', + 'mkdocstrings[python]', + 'mkdocs-jupyter' + ] + }, + package_data={'llmodel': [os.path.join(DEST_CLIB_DIRECTORY, "*")]}, + include_package_data=True +) \ No newline at end of file diff --git a/gpt4all-bindings/python/placeholder b/gpt4all-bindings/python/tests/__init__.py similarity index 100% rename from gpt4all-bindings/python/placeholder rename to gpt4all-bindings/python/tests/__init__.py diff --git a/gpt4all-bindings/python/tests/test_gpt4all.py b/gpt4all-bindings/python/tests/test_gpt4all.py new file mode 100644 index 00000000..33303136 --- /dev/null +++ b/gpt4all-bindings/python/tests/test_gpt4all.py @@ -0,0 +1,62 @@ +import pytest + +from gpt4all.gpt4all import GPT4All + +def test_invalid_model_type(): + model_type = "bad_type" + with pytest.raises(ValueError): + GPT4All.get_model_from_type(model_type) + +def test_valid_model_type(): + model_type = "gptj" + assert GPT4All.get_model_from_type(model_type).model_type == model_type + +def test_invalid_model_name(): + model_name = "bad_filename.bin" + with pytest.raises(ValueError): + GPT4All.get_model_from_name(model_name) + +def test_valid_model_name(): + model_name = "ggml-gpt4all-l13b-snoozy" + model_type = "llama" + assert GPT4All.get_model_from_name(model_name).model_type == model_type + model_name += ".bin" + assert GPT4All.get_model_from_name(model_name).model_type == model_type + +def test_build_prompt(): + messages = [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello there." + }, + { + "role": "assistant", + "content": "Hi, how can I help you?" + }, + { + "role": "user", + "content": "Reverse a list in Python." + } + ] + + expected_prompt = """You are a helpful assistant.\ + \n### Instruction: + The prompt below is a question to answer, a task to complete, or a conversation + to respond to; decide which and write an appropriate response.\ + ### Prompt:\ + Hello there.\ + Response: Hi, how can I help you?\ + Reverse a list in Python.\ + ### Response:""" + + print(expected_prompt) + + full_prompt = GPT4All._build_prompt(messages, default_prompt_footer=True, default_prompt_header=True) + + print("\n\n\n") + print(full_prompt) + assert len(full_prompt) == len(expected_prompt) diff --git a/gpt4all-bindings/python/tests/test_pyllmodel.py b/gpt4all-bindings/python/tests/test_pyllmodel.py new file mode 100644 index 00000000..2208c425 --- /dev/null +++ b/gpt4all-bindings/python/tests/test_pyllmodel.py @@ -0,0 +1,44 @@ +from io import StringIO +import sys + +from gpt4all import pyllmodel + +# TODO: Integration test for loadmodel and prompt. +# # Right now, too slow b/c it requries file download. + +def test_create_gptj(): + gptj = pyllmodel.GPTJModel() + assert gptj.model_type == "gptj" + +def test_create_llama(): + llama = pyllmodel.LlamaModel() + assert llama.model_type == "llama" + +def prompt_unloaded_gptj(): + gptj = pyllmodel.GPTJModel() + old_stdout = sys.stdout + collect_response = StringIO() + sys.stdout = collect_response + + gptj.prompt("hello there") + + response = collect_response.getvalue() + sys.stdout = old_stdout + + response = response.strip() + assert response == "GPT-J ERROR: prompt won't work with an unloaded model!" + +def prompt_unloaded_llama(): + llama = pyllmodel.LlamaModel() + old_stdout = sys.stdout + collect_response = StringIO() + sys.stdout = collect_response + + llama.prompt("hello there") + + response = collect_response.getvalue() + sys.stdout = old_stdout + + response = response.strip() + assert response == "LLAMA ERROR: prompt won't work with an unloaded model!" + \ No newline at end of file