Merge branch 'main' into py-release-280

This commit is contained in:
Jared Van Bortel 2024-08-05 13:27:22 -04:00
commit a0c57d9dbd
3 changed files with 37 additions and 27 deletions

View File

@ -2,8 +2,7 @@
GPT4All on the command-line. GPT4All on the command-line.
## Documentation More details on the [wiki](https://github.com/nomic-ai/gpt4all/wiki/Python-CLI).
<https://docs.gpt4all.io/gpt4all_cli.html>
## Quickstart ## Quickstart
@ -34,11 +33,11 @@ python -m pip install --user --upgrade gpt4all typer
# run the CLI # run the CLI
python app.py repl python app.py repl
``` ```
By default, it will automatically download the `groovy` model to `.cache/gpt4all/` in your user By default, it will automatically download the `Mistral Instruct` model to `.cache/gpt4all/` in your
directory, if necessary. user directory, if necessary.
If you have already saved a model beforehand, specify its path with the `-m`/`--model` argument, If you have already saved a model beforehand, specify its path with the `-m`/`--model` argument,
for example: for example:
```shell ```shell
python app.py repl --model /home/user/my-gpt4all-models/gpt4all-13b-snoozy-q4_0.gguf python app.py repl --model /home/user/my-gpt4all-models/mistral-7b-instruct-v0.1.Q4_0.gguf
``` ```

View File

@ -6,6 +6,7 @@ import platform
import re import re
import subprocess import subprocess
import sys import sys
import textwrap
import threading import threading
from enum import Enum from enum import Enum
from queue import Queue from queue import Queue
@ -28,6 +29,16 @@ if TYPE_CHECKING:
EmbeddingsType = TypeVar('EmbeddingsType', bound='list[Any]') EmbeddingsType = TypeVar('EmbeddingsType', bound='list[Any]')
# Detect Rosetta 2
if platform.system() == "Darwin" and platform.processor() == "i386":
if subprocess.run(
"sysctl -n sysctl.proc_translated".split(), check=True, capture_output=True, text=True,
).stdout.strip() == "1":
raise RuntimeError(textwrap.dedent("""\
Running GPT4All under Rosetta is not supported due to CPU feature requirements.
Please install GPT4All in an environment that uses a native ARM64 Python interpreter.
"""))
# Find CUDA libraries from the official packages # Find CUDA libraries from the official packages
cuda_found = False cuda_found = False
if platform.system() in ('Linux', 'Windows'): if platform.system() in ('Linux', 'Windows'):

View File

@ -346,7 +346,7 @@
<location filename="../qml/AddModelView.qml" line="549"/> <location filename="../qml/AddModelView.qml" line="549"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/AddModelView.qml" line="549"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/AddModelView.qml" line="549"/>
<source>Download progressBar</source> <source>Download progressBar</source>
<translation>barra di avanzamento dello scaricamento</translation> <translation>Barra di avanzamento dello scaricamento</translation>
</message> </message>
<message> <message>
<location filename="../qml/AddModelView.qml" line="550"/> <location filename="../qml/AddModelView.qml" line="550"/>
@ -569,7 +569,7 @@
<location filename="../qml/ApplicationSettings.qml" line="216"/> <location filename="../qml/ApplicationSettings.qml" line="216"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="216"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="216"/>
<source>The compute device used for text generation.</source> <source>The compute device used for text generation.</source>
<translation>Dispositivo di elaborazione utilizzato per la generazione del testo.</translation> <translation>Il dispositivo di calcolo utilizzato per la generazione del testo.</translation>
</message> </message>
<message> <message>
<location filename="../qml/ApplicationSettings.qml" line="234"/> <location filename="../qml/ApplicationSettings.qml" line="234"/>
@ -577,7 +577,7 @@
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="234"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="234"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="289"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="289"/>
<source>Application default</source> <source>Application default</source>
<translation>Modello predefinito</translation> <translation>Applicazione predefinita</translation>
</message> </message>
<message> <message>
<location filename="../qml/ApplicationSettings.qml" line="267"/> <location filename="../qml/ApplicationSettings.qml" line="267"/>
@ -601,7 +601,7 @@
<location filename="../qml/ApplicationSettings.qml" line="326"/> <location filename="../qml/ApplicationSettings.qml" line="326"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="326"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="326"/>
<source>Generate suggested follow-up questions at the end of responses.</source> <source>Generate suggested follow-up questions at the end of responses.</source>
<translation>Genera domande di approfondimento suggerite alla fine delle risposte.</translation> <translation>Genera le domande di approfondimento suggerite alla fine delle risposte.</translation>
</message> </message>
<message> <message>
<location filename="../qml/ApplicationSettings.qml" line="338"/> <location filename="../qml/ApplicationSettings.qml" line="338"/>
@ -655,7 +655,7 @@
<location filename="../qml/ApplicationSettings.qml" line="393"/> <location filename="../qml/ApplicationSettings.qml" line="393"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="393"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="393"/>
<source>Send chats and feedback to the GPT4All Open-Source Datalake.</source> <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>
<translation>Invia chat e commenti al Datalake open source GPT4All.</translation> <translation>Invia chat e commenti al Datalake Open Source GPT4All.</translation>
</message> </message>
<message> <message>
<location filename="../qml/ApplicationSettings.qml" line="426"/> <location filename="../qml/ApplicationSettings.qml" line="426"/>
@ -722,7 +722,7 @@
<location filename="../qml/ApplicationSettings.qml" line="558"/> <location filename="../qml/ApplicationSettings.qml" line="558"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="558"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ApplicationSettings.qml" line="558"/>
<source>Manually check for an update to GPT4All.</source> <source>Manually check for an update to GPT4All.</source>
<translation>Verifica manualmente la presenza di un aggiornamento a GPT4All.</translation> <translation>Verifica manualmente l&apos;aggiornamento di GPT4All.</translation>
</message> </message>
<message> <message>
<location filename="../qml/ApplicationSettings.qml" line="567"/> <location filename="../qml/ApplicationSettings.qml" line="567"/>
@ -742,7 +742,7 @@
<message> <message>
<location filename="../chat.cpp" line="38"/> <location filename="../chat.cpp" line="38"/>
<source>Server Chat</source> <source>Server Chat</source>
<translation>Chatta del server</translation> <translation>Chat del server</translation>
</message> </message>
</context> </context>
<context> <context>
@ -1136,7 +1136,7 @@ modello per iniziare</translation>
<location filename="../qml/ChatView.qml" line="1389"/> <location filename="../qml/ChatView.qml" line="1389"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ChatView.qml" line="1389"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ChatView.qml" line="1389"/>
<source>Suggested follow-ups</source> <source>Suggested follow-ups</source>
<translation>Seguiti suggeriti</translation> <translation>Approfondimenti suggeriti</translation>
</message> </message>
<message> <message>
<location filename="../qml/ChatView.qml" line="1665"/> <location filename="../qml/ChatView.qml" line="1665"/>
@ -1267,7 +1267,7 @@ modello per iniziare</translation>
<location filename="../qml/CollectionsDrawer.qml" line="70"/> <location filename="../qml/CollectionsDrawer.qml" line="70"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/CollectionsDrawer.qml" line="70"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/CollectionsDrawer.qml" line="70"/>
<source>Warning: searching collections while indexing can return incomplete results</source> <source>Warning: searching collections while indexing can return incomplete results</source>
<translation>Attenzione: la ricerca nelle raccolte durante l&apos;indicizzazione può restituire risultati incompleti</translation> <translation>Avviso: la ricerca nelle raccolte durante l&apos;indicizzazione può restituire risultati incompleti</translation>
</message> </message>
<message numerus="yes"> <message numerus="yes">
<location filename="../qml/CollectionsDrawer.qml" line="87"/> <location filename="../qml/CollectionsDrawer.qml" line="87"/>
@ -1283,8 +1283,8 @@ modello per iniziare</translation>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/CollectionsDrawer.qml" line="87"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/CollectionsDrawer.qml" line="87"/>
<source>%n word(s)</source> <source>%n word(s)</source>
<translation> <translation>
<numerusform></numerusform> <numerusform>%n parola</numerusform>
<numerusform></numerusform> <numerusform>%n parole</numerusform>
</translation> </translation>
</message> </message>
<message> <message>
@ -1498,7 +1498,7 @@ modello per iniziare</translation>
<location filename="../qml/LocalDocsSettings.qml" line="112"/> <location filename="../qml/LocalDocsSettings.qml" line="112"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/LocalDocsSettings.qml" line="112"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/LocalDocsSettings.qml" line="112"/>
<source>Use Nomic Embed API</source> <source>Use Nomic Embed API</source>
<translation>Utilizza l&apos;API di incorporamento di Nomic Embed</translation> <translation>Utilizza l&apos;API di incorporamento Nomic Embed</translation>
</message> </message>
<message> <message>
<location filename="../qml/LocalDocsSettings.qml" line="113"/> <location filename="../qml/LocalDocsSettings.qml" line="113"/>
@ -1649,7 +1649,7 @@ modello per iniziare</translation>
<location filename="../qml/LocalDocsView.qml" line="231"/> <location filename="../qml/LocalDocsView.qml" line="231"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/LocalDocsView.qml" line="231"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/LocalDocsView.qml" line="231"/>
<source>Indexing progressBar</source> <source>Indexing progressBar</source>
<translation>Barra di avanzamento dell&apos;indicizzazione</translation> <translation>Barra di avanzamento indicizzazione</translation>
</message> </message>
<message> <message>
<location filename="../qml/LocalDocsView.qml" line="232"/> <location filename="../qml/LocalDocsView.qml" line="232"/>
@ -1930,13 +1930,13 @@ modello per iniziare</translation>
<location filename="../qml/ModelSettings.qml" line="298"/> <location filename="../qml/ModelSettings.qml" line="298"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelSettings.qml" line="298"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelSettings.qml" line="298"/>
<source>Suggested FollowUp Prompt</source> <source>Suggested FollowUp Prompt</source>
<translation>Prompt di proseguimento suggerito</translation> <translation>Prompt di approfondimento suggerito</translation>
</message> </message>
<message> <message>
<location filename="../qml/ModelSettings.qml" line="299"/> <location filename="../qml/ModelSettings.qml" line="299"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelSettings.qml" line="299"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelSettings.qml" line="299"/>
<source>Prompt used to generate suggested follow-up questions.</source> <source>Prompt used to generate suggested follow-up questions.</source>
<translation>Prompt utilizzato per generare domande di proseguimento suggerite.</translation> <translation>Prompt utilizzato per generare domande di approfondimento suggerite.</translation>
</message> </message>
<message> <message>
<location filename="../qml/ModelSettings.qml" line="352"/> <location filename="../qml/ModelSettings.qml" line="352"/>
@ -2233,7 +2233,7 @@ NOTA: non ha effetto finché non si ricarica il modello.</translation>
<location filename="../qml/ModelsView.qml" line="301"/> <location filename="../qml/ModelsView.qml" line="301"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelsView.qml" line="301"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelsView.qml" line="301"/>
<source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source> <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>
<translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;AVVERTENZA: non consigliato per il tuo hardware. Il modello richiede più memoria (%1 GB) di quella disponibile nel sistema (%2).&lt;/strong&gt;&lt;/font&gt;</translation> <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;AVVISO: non consigliato per il tuo hardware. Il modello richiede più memoria (%1 GB) di quella disponibile nel sistema (%2).&lt;/strong&gt;&lt;/font&gt;</translation>
</message> </message>
<message> <message>
<location filename="../qml/ModelsView.qml" line="399"/> <location filename="../qml/ModelsView.qml" line="399"/>
@ -2293,7 +2293,7 @@ NOTA: non ha effetto finché non si ricarica il modello.</translation>
<location filename="../qml/ModelsView.qml" line="345"/> <location filename="../qml/ModelsView.qml" line="345"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelsView.qml" line="345"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/ModelsView.qml" line="345"/>
<source>Download progressBar</source> <source>Download progressBar</source>
<translation>barra di avanzamento dello scaricamento</translation> <translation>Barra di avanzamento dello scaricamento</translation>
</message> </message>
<message> <message>
<location filename="../qml/ModelsView.qml" line="346"/> <location filename="../qml/ModelsView.qml" line="346"/>
@ -2386,13 +2386,13 @@ NOTA: non ha effetto finché non si ricarica il modello.</translation>
<location filename="../qml/MyFancyLink.qml" line="42"/> <location filename="../qml/MyFancyLink.qml" line="42"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/MyFancyLink.qml" line="42"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/MyFancyLink.qml" line="42"/>
<source>Fancy link</source> <source>Fancy link</source>
<translation></translation> <translation>Mio link</translation>
</message> </message>
<message> <message>
<location filename="../qml/MyFancyLink.qml" line="43"/> <location filename="../qml/MyFancyLink.qml" line="43"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/MyFancyLink.qml" line="43"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/MyFancyLink.qml" line="43"/>
<source>A stylized link</source> <source>A stylized link</source>
<translation>Un link stilizzato</translation> <translation>Un link d&apos;esempio</translation>
</message> </message>
</context> </context>
<context> <context>
@ -2410,7 +2410,7 @@ NOTA: non ha effetto finché non si ricarica il modello.</translation>
<location filename="../qml/MySettingsTab.qml" line="62"/> <location filename="../qml/MySettingsTab.qml" line="62"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/MySettingsTab.qml" line="62"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/MySettingsTab.qml" line="62"/>
<source>Restore Defaults</source> <source>Restore Defaults</source>
<translation>Riprista valori predefiniti</translation> <translation>Riprista i valori predefiniti</translation>
</message> </message>
<message> <message>
<location filename="../qml/MySettingsTab.qml" line="66"/> <location filename="../qml/MySettingsTab.qml" line="66"/>
@ -2425,7 +2425,7 @@ NOTA: non ha effetto finché non si ricarica il modello.</translation>
<location filename="../qml/NetworkDialog.qml" line="39"/> <location filename="../qml/NetworkDialog.qml" line="39"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/NetworkDialog.qml" line="39"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/NetworkDialog.qml" line="39"/>
<source>Contribute data to the GPT4All Opensource Datalake.</source> <source>Contribute data to the GPT4All Opensource Datalake.</source>
<translation>Contribuisci coni tuoi dati al Data Lake Open Source di GPT4All.</translation> <translation>Contribuisci coni tuoi dati al Datalake Open Source di GPT4All.</translation>
</message> </message>
<message> <message>
<location filename="../qml/NetworkDialog.qml" line="55"/> <location filename="../qml/NetworkDialog.qml" line="55"/>
@ -2720,7 +2720,7 @@ NOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di
<location filename="../qml/SwitchModelDialog.qml" line="22"/> <location filename="../qml/SwitchModelDialog.qml" line="22"/>
<location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/SwitchModelDialog.qml" line="22"/> <location filename="../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/SwitchModelDialog.qml" line="22"/>
<source>&lt;b&gt;Warning:&lt;/b&gt; changing the model will erase the current conversation. Do you wish to continue?</source> <source>&lt;b&gt;Warning:&lt;/b&gt; changing the model will erase the current conversation. Do you wish to continue?</source>
<translation>&lt;b&gt;Attenzione:&lt;/b&gt; la modifica del modello cancellerà la conversazione corrente. Vuoi continuare?</translation> <translation>&lt;b&gt;Avviso:&lt;/b&gt; la modifica del modello cancellerà la conversazione corrente. Vuoi continuare?</translation>
</message> </message>
<message> <message>
<location filename="../qml/SwitchModelDialog.qml" line="33"/> <location filename="../qml/SwitchModelDialog.qml" line="33"/>