qml: improve chats with missing models and model settings layout (#2520)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-07-08 17:01:30 -04:00 committed by GitHub
parent 11b58a1a15
commit 4853adebd9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 66 additions and 41 deletions

View File

@ -204,7 +204,7 @@ Window {
anchors.top: parent.top
anchors.bottom: parent.bottom
anchors.left: parent.left
width: 16 + 52 * theme.fontScale
width: 68 * theme.fontScale
color: theme.viewBarBackground
ColumnLayout {

View File

@ -47,6 +47,10 @@ Rectangle {
return ModelList.modelInfo(currentChat.modelInfo.id).name;
}
function currentModelInstalled() {
return currentModelName() !== "" && ModelList.modelInfo(currentChat.modelInfo.id).installed;
}
PopupDialog {
id: modelLoadingErrorPopup
anchors.centerIn: parent
@ -322,7 +326,7 @@ Rectangle {
visible: currentChat.modelLoadingError === ""
&& !currentChat.trySwitchContextInProgress
&& !currentChat.isCurrentlyLoading
&& (currentChat.isModelLoaded || currentModelName() !== "")
&& (currentChat.isModelLoaded || currentModelInstalled())
source: "qrc:/gpt4all/icons/regenerate.svg"
backgroundColor: theme.textColor
backgroundColorHovered: theme.styledTextColor
@ -358,15 +362,17 @@ Rectangle {
rightPadding: 10
text: {
if (ModelList.selectableModels.count === 0)
return qsTr("No model installed...")
return qsTr("No model installed.")
if (currentChat.modelLoadingError !== "")
return qsTr("Model loading error...")
return qsTr("Model loading error.")
if (currentChat.trySwitchContextInProgress === 1)
return qsTr("Waiting for model...")
if (currentChat.trySwitchContextInProgress === 2)
return qsTr("Switching context...")
if (currentModelName() === "")
return qsTr("Choose a model...")
if (!currentModelInstalled())
return qsTr("Not found: %1").arg(currentModelName())
if (currentChat.modelLoadingPercentage === 0.0)
return qsTr("Reload \u00B7 ") + currentModelName()
if (currentChat.isCurrentlyLoading)
@ -1519,7 +1525,7 @@ Rectangle {
&& currentChat.modelLoadingError === ""
&& !currentChat.trySwitchContextInProgress
&& !currentChat.isCurrentlyLoading
&& currentModelName() !== ""
&& currentModelInstalled()
Image {
anchors.verticalCenter: parent.verticalCenter

View File

@ -303,17 +303,26 @@ MySettingsTab {
helpText: qsTr("Number of input and output tokens the model sees.")
Layout.row: 0
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
Item {
Layout.row: 0
Layout.column: 1
Layout.fillWidth: true
Layout.maximumWidth: 200
Layout.margins: 0
height: contextLengthField.height
MyTextField {
id: contextLengthField
anchors.left: parent.left
anchors.verticalCenter: parent.verticalCenter
visible: !root.currentModelInfo.isOnline
text: root.currentModelInfo.contextLength
font.pixelSize: theme.fontSizeLarge
color: theme.textColor
ToolTip.text: qsTr("Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.")
ToolTip.visible: hovered
Layout.row: 0
Layout.column: 1
Connections {
target: MySettings
function onContextLengthChanged() {
@ -346,6 +355,7 @@ MySettingsTab {
Accessible.name: contextLengthLabel.text
Accessible.description: ToolTip.text
}
}
MySettingsLabel {
id: tempLabel
@ -353,6 +363,7 @@ MySettingsTab {
helpText: qsTr("Randomness of model output. Higher -> more variation.")
Layout.row: 1
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
@ -398,6 +409,7 @@ MySettingsTab {
helpText: qsTr("Nucleus Sampling factor. Lower -> more predicatable.")
Layout.row: 2
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: topPField
@ -442,6 +454,7 @@ MySettingsTab {
helpText: qsTr("Minimum token probability. Higher -> more predictable.")
Layout.row: 3
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: minPField
@ -488,6 +501,7 @@ MySettingsTab {
helpText: qsTr("Size of selection pool for tokens.")
Layout.row: 2
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: topKField
@ -534,6 +548,7 @@ MySettingsTab {
helpText: qsTr("Maximum response length, in tokens.")
Layout.row: 0
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: maxLengthField
@ -579,6 +594,7 @@ MySettingsTab {
helpText: qsTr("The batch size used for prompt processing.")
Layout.row: 1
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: batchSizeField
@ -625,6 +641,7 @@ MySettingsTab {
helpText: qsTr("Repetition penalty factor. Set to 1 to disable.")
Layout.row: 4
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: repeatPenaltyField
@ -669,6 +686,7 @@ MySettingsTab {
helpText: qsTr("Number of previous tokens used for penalty.")
Layout.row: 3
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: repeatPenaltyTokenField
@ -714,6 +732,7 @@ MySettingsTab {
helpText: qsTr("Number of model layers to load into VRAM.")
Layout.row: 4
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: gpuLayersField