mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Fix several typos in the codebase (#6151)
This commit is contained in:
parent
2c5a9eb597
commit
5993904acf
@ -18,13 +18,13 @@ In the **Prompt** menu, you can select from some predefined prompts defined unde
|
|||||||
|
|
||||||
### Output
|
### Output
|
||||||
|
|
||||||
Four tabs can be found:
|
Five tabs can be found:
|
||||||
|
|
||||||
* **Raw**: where the raw text generated by the model appears.
|
* **Raw**: where the raw text generated by the model appears.
|
||||||
* **Markdown**: it contains a "Render" button. You can click on it at any time to render the current output as markdown. This is particularly useful for models that generate LaTeX equations like GALACTICA.
|
* **Markdown**: it contains a "Render" button. You can click on it at any time to render the current output as markdown. This is particularly useful for models that generate LaTeX equations like GALACTICA.
|
||||||
* **HTML**: displays the output in an HTML style that is meant to be easier to read. Its style is defined under `text-generation-webui/css/html_readable_style.css`.
|
* **HTML**: displays the output in an HTML style that is meant to be easier to read. Its style is defined under `text-generation-webui/css/html_readable_style.css`.
|
||||||
* **Logits**: when you click on "Get next token probabilities", this tab displays the 50 most likely next tokens and their probabilities based on your current input. If "Use samplers" is checked, the probabilities will be the ones after the sampling parameters in the "Parameters" > "Generation" tab are applied. Otherwise, they will be the raw probabilities generated by the model.
|
* **Logits**: when you click on "Get next token probabilities", this tab displays the 50 most likely next tokens and their probabilities based on your current input. If "Use samplers" is checked, the probabilities will be the ones after the sampling parameters in the "Parameters" > "Generation" tab are applied. Otherwise, they will be the raw probabilities generated by the model.
|
||||||
* **Tokens**: allows you to tokenize your prompt and see the ID numbers for the individuals tokens.
|
* **Tokens**: allows you to tokenize your prompt and see the ID numbers for the individual tokens.
|
||||||
|
|
||||||
## Notebook tab
|
## Notebook tab
|
||||||
|
|
||||||
|
@ -219,7 +219,7 @@ print()
|
|||||||
|
|
||||||
### Environment variables
|
### Environment variables
|
||||||
|
|
||||||
The following environment variables can be used (they take precendence over everything else):
|
The following environment variables can be used (they take precedence over everything else):
|
||||||
|
|
||||||
| Variable Name | Description | Example Value |
|
| Variable Name | Description | Example Value |
|
||||||
|------------------------|------------------------------------|----------------------------|
|
|------------------------|------------------------------------|----------------------------|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
These files is a mirror of the documentation at:
|
These files are a mirror of the documentation at:
|
||||||
|
|
||||||
# https://github.com/oobabooga/text-generation-webui/wiki
|
# https://github.com/oobabooga/text-generation-webui/wiki
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ def add_lora_autogptq(lora_names):
|
|||||||
if len(lora_names) > 1:
|
if len(lora_names) > 1:
|
||||||
logger.warning('AutoGPTQ can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')
|
logger.warning('AutoGPTQ can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')
|
||||||
if not shared.args.no_inject_fused_attention:
|
if not shared.args.no_inject_fused_attention:
|
||||||
logger.warning('Fused Atttention + AutoGPTQ may break Lora loading. Disable it.')
|
logger.warning('Fused Attention + AutoGPTQ may break Lora loading. Disable it.')
|
||||||
|
|
||||||
peft_config = GPTQLoraConfig(
|
peft_config = GPTQLoraConfig(
|
||||||
inference_mode=True,
|
inference_mode=True,
|
||||||
|
@ -32,7 +32,7 @@ def clone_or_pull_repository(github_url):
|
|||||||
yield f"Cloning {github_url}..."
|
yield f"Cloning {github_url}..."
|
||||||
clone_output = subprocess.check_output(["git", "clone", github_url, repo_path], stderr=subprocess.STDOUT)
|
clone_output = subprocess.check_output(["git", "clone", github_url, repo_path], stderr=subprocess.STDOUT)
|
||||||
new_extensions.add(repo_name)
|
new_extensions.add(repo_name)
|
||||||
yield f"The extension `{repo_name}` has been downloaded.\n\nPlease close the the web UI completely and launch it again to be able to load it."
|
yield f"The extension `{repo_name}` has been downloaded.\n\nPlease close the web UI completely and launch it again to be able to load it."
|
||||||
return clone_output.decode()
|
return clone_output.decode()
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
return str(e)
|
return str(e)
|
||||||
|
Loading…
Reference in New Issue
Block a user