From 13fe38eb278ae3165aead4fa498abed933f8da40 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Thu, 4 Apr 2024 16:10:47 -0700
Subject: [PATCH] Remove specialized code for gpt-4chan
---
css/html_4chan_style.css | 73 ------------------------------------
docs/08 - Additional Tips.md | 23 ------------
modules/html_generator.py | 59 -----------------------------
modules/models.py | 4 +-
modules/text_generation.py | 20 +---------
prompts/GPT-4chan.txt | 6 ---
6 files changed, 3 insertions(+), 182 deletions(-)
delete mode 100644 css/html_4chan_style.css
delete mode 100644 prompts/GPT-4chan.txt
diff --git a/css/html_4chan_style.css b/css/html_4chan_style.css
deleted file mode 100644
index afbfb537..00000000
--- a/css/html_4chan_style.css
+++ /dev/null
@@ -1,73 +0,0 @@
-#parent #container {
- background-color: #eef2ff;
- padding: 17px;
-}
-
-#parent #container .reply {
- background-color: rgb(214 218 240);
- border-bottom: 1px solid rgb(183 197 217);
- border-image: none 100% 1 0 stretch;
- border-left: 0 none rgb(0 0 0);
- border-right: 1px solid rgb(183 197 217);
- color: rgb(0 0 0);
- display: table;
- font-family: arial, helvetica, sans-serif;
- font-size: 13.3333px;
- margin: 4px 0;
- overflow: hidden hidden;
- padding: 4px 2px;
-}
-
-#parent #container .number {
- color: rgb(0 0 0);
- font-family: arial, helvetica, sans-serif;
- font-size: 13.3333px;
- width: 342.65px;
- margin-right: 7px;
-}
-
-#parent #container .op {
- color: rgb(0 0 0);
- font-family: arial, helvetica, sans-serif;
- font-size: 13.3333px;
- margin: 4px 0 8px;
- overflow: hidden hidden;
-}
-
-#parent #container .op blockquote {
- margin-left: 0 !important;
-}
-
-#parent #container .name {
- color: rgb(17 119 67);
- font-family: arial, helvetica, sans-serif;
- font-size: 13.3333px;
- font-weight: 700;
- margin-left: 7px;
-}
-
-#parent #container .quote {
- color: rgb(221 0 0);
- font-family: arial, helvetica, sans-serif;
- font-size: 13.3333px;
- text-decoration: underline solid rgb(221 0 0);
- text-decoration-thickness: auto;
-}
-
-#parent #container .greentext {
- color: rgb(120 153 34);
- font-family: arial, helvetica, sans-serif;
- font-size: 13.3333px;
-}
-
-#parent #container blockquote {
- margin: 0 !important;
- margin-block: 1em 1em;
- margin-inline: 40px 40px;
- margin: 13.33px 40px !important;
-}
-
-#parent #container .message_4chan {
- color: black;
- border: none;
-}
\ No newline at end of file
diff --git a/docs/08 - Additional Tips.md b/docs/08 - Additional Tips.md
index 7ad00ee3..89675cca 100644
--- a/docs/08 - Additional Tips.md
+++ b/docs/08 - Additional Tips.md
@@ -13,29 +13,6 @@ Source: https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1126
This file will be automatically detected the next time you start the web UI.
-## GPT-4chan
-
-[GPT-4chan](https://huggingface.co/ykilcher/gpt-4chan) has been shut down from Hugging Face, so you need to download it elsewhere. You have two options:
-
-* Torrent: [16-bit](https://archive.org/details/gpt4chan_model_float16) / [32-bit](https://archive.org/details/gpt4chan_model)
-* Direct download: [16-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model_float16/) / [32-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model/)
-
-The 32-bit version is only relevant if you intend to run the model in CPU mode. Otherwise, you should use the 16-bit version.
-
-After downloading the model, follow these steps:
-
-1. Place the files under `models/gpt4chan_model_float16` or `models/gpt4chan_model`.
-2. Place GPT-J 6B's config.json file in that same folder: [config.json](https://huggingface.co/EleutherAI/gpt-j-6B/raw/main/config.json).
-3. Download GPT-J 6B's tokenizer files (they will be automatically detected when you attempt to load GPT-4chan):
-
-```
-python download-model.py EleutherAI/gpt-j-6B --text-only
-```
-
-When you load this model in default or notebook modes, the "HTML" tab will show the generated text in 4chan format:
-
-![Image3](https://github.com/oobabooga/screenshots/raw/main/gpt4chan.png)
-
## Using LoRAs with GPTQ-for-LLaMa
This requires using a monkey patch that is supported by this web UI: https://github.com/johnsmith0031/alpaca_lora_4bit
diff --git a/modules/html_generator.py b/modules/html_generator.py
index 278f1632..2be53fc8 100644
--- a/modules/html_generator.py
+++ b/modules/html_generator.py
@@ -16,8 +16,6 @@ image_cache = {}
with open(Path(__file__).resolve().parent / '../css/html_readable_style.css', 'r') as f:
readable_css = f.read()
-with open(Path(__file__).resolve().parent / '../css/html_4chan_style.css', 'r') as css_f:
- _4chan_css = css_f.read()
with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r') as f:
instruct_css = f.read()
@@ -118,63 +116,6 @@ def generate_basic_html(string):
return string
-def process_post(post, c):
- t = post.split('\n')
- number = t[0].split(' ')[1]
- if len(t) > 1:
- src = '\n'.join(t[1:])
- else:
- src = ''
- src = re.sub('>', '>', src)
- src = re.sub('(>>[0-9]*)', '\\1', src)
- src = re.sub('\n', '
\n', src)
- src = f'