mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Sort the imports
This commit is contained in:
parent
7d97287e69
commit
a717fd709d
@ -7,6 +7,7 @@ import transformers
|
||||
|
||||
import modules.shared as shared
|
||||
|
||||
|
||||
# Copied from https://github.com/PygmalionAI/gradio-ui/
|
||||
class _SentinelTokenStoppingCriteria(transformers.StoppingCriteria):
|
||||
|
||||
|
@ -12,7 +12,8 @@ import modules.extensions as extensions_module
|
||||
import modules.shared as shared
|
||||
from modules.extensions import apply_extensions
|
||||
from modules.html_generator import generate_chat_html
|
||||
from modules.text_generation import encode, generate_reply, get_max_prompt_length
|
||||
from modules.text_generation import (encode, generate_reply,
|
||||
get_max_prompt_length)
|
||||
|
||||
|
||||
# This gets the new line characters right.
|
||||
|
@ -8,11 +8,10 @@ import numpy as np
|
||||
import torch
|
||||
import transformers
|
||||
from accelerate import infer_auto_device_map, init_empty_weights
|
||||
from peft import PeftModel
|
||||
from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
|
||||
BitsAndBytesConfig)
|
||||
|
||||
from peft import PeftModel
|
||||
|
||||
import modules.shared as shared
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
@ -15,9 +15,9 @@ import modules.extensions as extensions_module
|
||||
import modules.shared as shared
|
||||
import modules.ui as ui
|
||||
from modules.html_generator import generate_chat_html
|
||||
from modules.LoRA import add_lora_to_model
|
||||
from modules.models import load_model, load_soft_prompt
|
||||
from modules.text_generation import generate_reply
|
||||
from modules.LoRA import add_lora_to_model
|
||||
|
||||
# Loading custom settings
|
||||
settings_file = None
|
||||
|
Loading…
Reference in New Issue
Block a user