This commit is contained in:
Dull Bananas 2024-05-22 20:34:35 +00:00
parent 5fca4ea918
commit d71cfaa503
11 changed files with 410 additions and 336 deletions

View File

@ -60,12 +60,13 @@ fn replaceable_schema() -> String {
const REPLACEABLE_SCHEMA_PATH: &str = "crates/db_schema/replaceable_schema";
struct MigrationHarnessWrapper<'a, 'b> {
struct MigrationHarnessWrapper<'a> {
conn: &'a mut PgConnection,
options: &'b Options,
#[cfg(test)]
enable_diff_check: bool,
}
impl<'a, 'b> MigrationHarnessWrapper<'a, 'b> {
impl<'a> MigrationHarnessWrapper<'a> {
fn run_migration_inner(
&mut self,
migration: &dyn Migration<Pg>,
@ -82,13 +83,13 @@ impl<'a, 'b> MigrationHarnessWrapper<'a, 'b> {
}
}
impl<'a, 'b> MigrationHarness<Pg> for MigrationHarnessWrapper<'a, 'b> {
impl<'a> MigrationHarness<Pg> for MigrationHarnessWrapper<'a> {
fn run_migration(
&mut self,
migration: &dyn Migration<Pg>,
) -> diesel::migration::Result<MigrationVersion<'static>> {
#[cfg(test)]
if self.options.enable_diff_check {
if self.enable_diff_check {
let before = diff_check::get_dump();
self.run_migration_inner(migration)?;
@ -129,71 +130,60 @@ impl<'a, 'b> MigrationHarness<Pg> for MigrationHarnessWrapper<'a, 'b> {
}
}
#[derive(Default)]
pub struct Options {
enable_forbid_diesel_cli_trigger: bool,
#[cfg(test)]
enable_diff_check: bool,
revert: bool,
run: bool,
amount: Option<u64>,
}
impl Default for Options {
fn default() -> Self {
Options {
enable_forbid_diesel_cli_trigger: false,
enable_diff_check: false,
revert: false,
run: true,
amount: None,
}
}
limit: Option<u64>,
}
impl Options {
#[cfg(test)]
fn enable_forbid_diesel_cli_trigger(mut self) -> Self {
self.enable_forbid_diesel_cli_trigger = true;
self
}
#[cfg(test)]
fn enable_diff_check(mut self) -> Self {
self.enable_diff_check = true;
self
}
pub fn revert(mut self, amount: Option<u64>) -> Self {
self.revert = true;
self.run = false;
self.amount = amount;
pub fn run(mut self) -> Self {
self.run = true;
self
}
pub fn redo(mut self, amount: Option<u64>) -> Self {
pub fn revert(mut self) -> Self {
self.revert = true;
self.run = true;
self.amount = amount;
self
}
pub fn limit(mut self, limit: u64) -> Self {
self.limit = Some(limit);
self
}
}
// TODO return struct with field `ran_replaceable_schema`
pub fn run(options: Options) -> LemmyResult<()> {
/// Checked by tests
#[derive(PartialEq, Eq, Debug)]
pub enum Branch {
EarlyReturn,
ReplaceableSchemaRebuilt,
ReplaceableSchemaNotRebuilt,
}
pub fn run(options: Options) -> LemmyResult<Branch> {
let db_url = SETTINGS.get_database_url();
// Migrations don't support async connection, and this function doesn't need to be async
//let mut conn = PgConnection::establish(&db_url).context("Error connecting to database")?;
let mut conn = PgConnection::establish(&db_url)?;
// If possible, skip locking the migrations table and recreating the "r" schema, so
// If possible, skip getting a lock and recreating the "r" schema, so
// lemmy_server processes in a horizontally scaled setup can start without causing locks
if !options.revert
&& options.run
&& options.amount.is_none()
&& options.limit.is_none()
&& !conn
.has_pending_migration(migrations())
.map_err(convert_err)?
//.map_err(|e| anyhow!("Couldn't check pending migrations: {e}"))?)
{
// The condition above implies that the migration that creates the previously_run_sql table was already run
let sql_unchanged = exists(
@ -203,19 +193,14 @@ pub fn run(options: Options) -> LemmyResult<()> {
let schema_exists = exists(pg_namespace::table.find("r"));
if select(sql_unchanged.and(schema_exists)).get_result(&mut conn)? {
return Ok(());
return Ok(Branch::EarlyReturn);
}
}
// Disable the trigger that prevents the Diesel CLI from running migrations
if !options.enable_forbid_diesel_cli_trigger {
conn.batch_execute("SET lemmy.enable_migrations TO 'on';")?;
}
// Block concurrent attempts to run migrations (lock is automatically released at the end
// because the connection is not reused)
// Block concurrent attempts to run migrations until `conn` is closed, and disable the
// trigger that prevents the Diesel CLI from running migrations
info!("Waiting for lock...");
conn.batch_execute("SELECT pg_advisory_lock(0)")?;
conn.batch_execute("SELECT pg_advisory_lock(0);")?;
info!("Running Database migrations (This may take a long time)...");
// Drop `r` schema, so migrations don't need to be made to work both with and without things in
@ -225,7 +210,7 @@ pub fn run(options: Options) -> LemmyResult<()> {
run_selected_migrations(&mut conn, &options).map_err(convert_err)?;
// Only run replaceable_schema if newest migration was applied
if (options.run && options.amount.is_none())
let output = if (options.run && options.limit.is_none())
|| !conn
.has_pending_migration(migrations())
.map_err(convert_err)?
@ -243,11 +228,15 @@ pub fn run(options: Options) -> LemmyResult<()> {
}
run_replaceable_schema(&mut conn)?;
}
Branch::ReplaceableSchemaRebuilt
} else {
Branch::ReplaceableSchemaNotRebuilt
};
info!("Database migrations complete.");
Ok(())
Ok(output)
}
fn run_replaceable_schema(conn: &mut PgConnection) -> LemmyResult<()> {
@ -281,11 +270,15 @@ fn run_selected_migrations(
conn: &mut PgConnection,
options: &Options,
) -> diesel::migration::Result<()> {
let mut wrapper = MigrationHarnessWrapper { conn, options };
let mut wrapper = MigrationHarnessWrapper {
conn,
#[cfg(test)]
enable_diff_check: options.enable_diff_check,
};
if options.revert {
if let Some(amount) = options.amount {
for _ in 0..amount {
if let Some(limit) = options.limit {
for _ in 0..limit {
wrapper.revert_last_migration(migrations())?;
}
} else {
@ -294,8 +287,8 @@ fn run_selected_migrations(
}
if options.run {
if let Some(amount) = options.amount {
for _ in 0..amount {
if let Some(limit) = options.limit {
for _ in 0..limit {
wrapper.run_next_migration(migrations())?;
}
} else {
@ -307,19 +300,24 @@ fn run_selected_migrations(
}
/// Makes `diesel::migration::Result` work with `anyhow` and `LemmyError`
fn convert_err(
err: Box<dyn std::error::Error + Send + Sync>,
//) -> impl std::error::Error + Send + Sync + 'static {
) -> anyhow::Error {
anyhow::anyhow!(err)
fn convert_err(e: Box<dyn std::error::Error + Send + Sync>) -> anyhow::Error {
anyhow!(e)
}
#[cfg(test)]
mod tests {
use super::*;
use super::{
Branch::{EarlyReturn, ReplaceableSchemaNotRebuilt, ReplaceableSchemaRebuilt},
*,
};
use lemmy_utils::{error::LemmyResult, settings::SETTINGS};
use serial_test::serial;
/// Reduces clutter
fn o() -> Options {
Options::default()
}
#[test]
#[serial]
fn test_schema_setup() -> LemmyResult<()> {
@ -329,18 +327,37 @@ mod tests {
// Start with consistent state by dropping everything
conn.batch_execute("DROP OWNED BY CURRENT_USER;")?;
// Check for mistakes in down.sql files
run(Options::default().enable_diff_check())?;
// Run all migrations and check for mistakes in down.sql files
assert_eq!(
run(o().run().enable_diff_check())?,
ReplaceableSchemaRebuilt
);
// TODO also don't drop r, and maybe just directly call the migrationharness method here
run(Options::default().revert(None))?;
// Check for early return
assert_eq!(run(o().run())?, EarlyReturn);
// Test `limit`
assert_eq!(run(o().revert().limit(1))?, ReplaceableSchemaNotRebuilt);
assert_eq!(
conn
.pending_migrations(migrations())
.map_err(convert_err)?
.len(),
1
);
assert_eq!(run(o().run().limit(1))?, ReplaceableSchemaRebuilt);
// Revert all migrations
assert_eq!(run(o().revert())?, ReplaceableSchemaNotRebuilt);
// This should throw an error saying to use lemmy_server instead of diesel CLI
assert!(matches!(
run(Options::default().enable_forbid_diesel_cli_trigger()),
conn.run_pending_migrations(migrations()),
Err(e) if e.to_string().contains("lemmy_server")
));
// Previous run shouldn't stop this one from working
run(Options::default())?;
// Diesel CLI's way of running migrations shouldn't break the custom migration runner
assert_eq!(run(o().run())?, ReplaceableSchemaRebuilt);
Ok(())
}

View File

@ -1,7 +1,7 @@
use lemmy_utils::settings::SETTINGS;
use std::{
borrow::Cow,
collections::BTreeSet,
collections::btree_set::{self, BTreeSet},
fmt::Write,
process::{Command, Stdio},
};
@ -36,196 +36,228 @@ pub fn get_dump() -> String {
const PATTERN_LEN: usize = 19;
pub fn check_dump_diff(mut after: String, mut before: String, label: &str) {
// Performance optimization
if after == before {
return;
}
// Ignore timestamp differences by removing timestamps
for dump in [&mut before, &mut after] {
for index in 0.. {
let Some(byte) = dump.as_bytes().get(index) else {
break;
};
if !byte.is_ascii_digit() {
continue;
}
// Check for this pattern: 0000-00-00 00:00:00
let Some((
&[a0, a1, a2, a3, b0, a4, a5, b1, a6, a7, b2, a8, a9, b3, a10, a11, b4, a12, a13],
remaining,
)) = dump
.get(index..)
.and_then(|s| s.as_bytes().split_first_chunk::<PATTERN_LEN>())
else {
break;
};
if [a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13]
.into_iter()
.all(|byte| byte.is_ascii_digit())
&& [b0, b1, b2, b3, b4] == *b"-- ::"
{
// Replace the part of the string that has the checked pattern and an optional fractional part
let len_after = if let Some((b'.', s)) = remaining.split_first() {
1 + s.iter().position(|c| !c.is_ascii_digit()).unwrap_or(0)
} else {
0
};
// Length of replacement string is likely to match previous string
// (usually there's up to 6 digits after the decimal point)
dump.replace_range(
index..(index + PATTERN_LEN + len_after),
"AAAAAAAAAAAAAAAAAAAAAAAAAA",
);
}
}
}
let [before_chunks, after_chunks] =
[&before, &after].map(|dump| chunks(dump).collect::<BTreeSet<_>>());
let only_b = before_chunks
.difference(&after_chunks)
.copied()
.map(process_chunk)
.collect::<BTreeSet<_>>();
let only_a = after_chunks
.difference(&before_chunks)
.copied()
.map(process_chunk)
.collect::<BTreeSet<_>>();
// todo dont collect only_in_before?
let [mut only_in_before, mut only_in_after] =
[only_b.difference(&only_a), only_a.difference(&only_b)].map(|chunks| {
chunks
.map(|chunk| {
(
&**chunk,
// Used for ignoring formatting differences, especially indentation level, when
// determining which item in `only_in_before` corresponds to which item in `only_in_after`
chunk.replace([' ', '\t', '\r', '\n'], ""),
)
})
.collect::<Vec<_>>()
let normalized_chunk_vecs = [&before, &after]
// Remove identical items
.map(|dump| chunks(dump).collect::<BTreeSet<_>>())
.differences()
// Remove items without unwanted types of differences (if migrations are correct, then this removes everything)
.map(|chunks| {
chunks.map(|&i| normalize_chunk(i)).collect::<BTreeSet<_>>()
});
let [mut only_in_before, mut only_in_after] = normalized_chunk_vecs
.differences()
.map(|chunks| chunks.map(|i| &**i).collect::<Vec<_>>());
if only_in_before.is_empty() && only_in_after.is_empty() {
return;
}
// Build the panic message
let after_has_more = only_in_before.len() < only_in_after.len();
// outer iterator in the loop below should not be the one with empty strings, otherwise the empty strings
// would be equally similar to any other chunk
let (chunks_gt, chunks_lt) = if after_has_more {
only_in_before.resize_with(only_in_after.len(), Default::default);
(&mut only_in_after, &only_in_before)
let [mut chunks, mut other_chunks] = if after_has_more {
[only_in_before, only_in_after]
} else {
only_in_after.resize_with(only_in_before.len(), Default::default);
(&mut only_in_before, &only_in_after)
[only_in_after, only_in_before]
};
let mut output = label.to_owned();
// todo rename variables
for (before_chunk, before_chunk_filtered) in chunks_lt {
let default = Default::default();
//panic!("{:?}",(before_chunk.clone(),chunks_lt.clone()));
let (most_similar_chunk_index, (most_similar_chunk, _)) = chunks_gt
.iter()
.enumerate()
.max_by_key(|(_, (after_chunk, after_chunk_filtered))| {
if after_chunk
.split_once(|c: char| c.is_lowercase())
.unwrap_or_default()
.0
!= before_chunk
.split_once(|c: char| c.is_lowercase())
.unwrap_or_default()
.0
{
0
} else {
diff::chars(after_chunk_filtered, &before_chunk_filtered)
.into_iter()
.filter(|i| {
matches!(i, diff::Result::Both(c, _)
// `is_lowercase` increases accuracy for some trigger function diffs
if c.is_lowercase() || c.is_numeric())
})
.count()
}
})
.unwrap_or((0, &default));
let diffs = chunks
.into_iter()
.chain(std::iter::repeat(""))
.map_while(|chunk| {
let (most_similar_chunk_index, most_similar_chunk) = other_chunks
.iter()
.enumerate()
.max_by_key(|(_, other_chunk)| {
if sql_command_name(&chunk) != sql_command_name(&other_chunk) {
0
} else {
similarity(&chunk, &other_chunk)
}
})?;
output.push('\n');
let lines = if !after_has_more {
diff::lines(&before_chunk, most_similar_chunk)
} else {
diff::lines(most_similar_chunk, &before_chunk)
};
for line in lines {
match line {
diff::Result::Left(s) => write!(&mut output, "\n- {s}"),
diff::Result::Right(s) => write!(&mut output, "\n+ {s}"),
diff::Result::Both(s, _) => write!(&mut output, "\n {s}"),
}
.expect("failed to build string");
}
//write!(&mut output, "\n{most_similar_chunk_filtered}");
if !chunks_gt.is_empty() {
chunks_gt.swap_remove(most_similar_chunk_index);
}
}
// should have all been removed
assert_eq!(chunks_gt.len(), 0);
panic!("{output}");
let lines = if after_has_more {
diff::lines(most_similar_chunk, &chunk)
} else {
diff::lines(&chunk, most_similar_chunk)
};
other_chunks.swap_remove(most_similar_chunk_index);
Some(
lines
.into_iter()
.map(|line| {
Cow::Owned(match line {
diff::Result::Left(s) => format!("- {s}\n"),
diff::Result::Right(s) => format!("+ {s}\n"),
diff::Result::Both(s, _) => format!(" {s}\n"),
})
})
.chain([Cow::Borrowed("\n")])
.collect::<String>(),
)
});
panic!(
"{}",
std::iter::once(format!("{label}\n\n"))
.chain(diffs)
.collect::<String>()
);
}
fn process_chunk<'a>(result: &'a str) -> Cow<'a, str> {
if result.starts_with("CREATE TABLE ") {
// Allow column order to change
let mut lines = result
.lines()
.map(|line| line.strip_suffix(',').unwrap_or(line))
.collect::<Vec<_>>();
lines.sort_unstable_by_key(|line| -> (u8, &str) {
let placement = match line.chars().next() {
trait Differences<T> {
fn differences(&self) -> [btree_set::Difference<'_, T>; 2];
}
impl<T: Ord> Differences<T> for [BTreeSet<T>; 2] {
/// Items only in `a`, and items only in `b`
fn differences(&self) -> [btree_set::Difference<'_, T>; 2] {
let [a, b] = self;
[a.difference(&b), b.difference(&a)]
}
}
fn sql_command_name(chunk: &str) -> &str {
chunk
.split_once(|c: char| c.is_lowercase())
.unwrap_or_default()
.0
}
fn similarity(chunk: &str, other_chunk: &str) -> usize {
diff::chars(chunk, other_chunk)
.into_iter()
.filter(|i| {
match i {
diff::Result::Both(c, _) => {
// Prevent whitespace from affecting similarity level
!c.is_whitespace()
&& (
// Increase accuracy for some trigger function diffs
c.is_lowercase()
// Preserve differences in names that contain a number
|| c.is_numeric()
)
}
_ => false,
}
})
.count()
}
fn normalize_chunk<'a>(chunk: &'a str) -> Cow<'a, str> {
let mut chunk = Cow::Borrowed(chunk);
let stripped_lines = chunk
.lines()
.map(|line| line.strip_suffix(',').unwrap_or(line));
// Sort column names, so differences in column order are ignored
if chunk.starts_with("CREATE TABLE ") {
let mut lines = stripped_lines.collect::<Vec<_>>();
sort_within_sections(&mut lines, |line| {
match line.chars().next() {
// CREATE
Some('C') => 0,
// Indented column name
Some(' ') => 1,
// End
Some(')') => 2,
_ => panic!("unrecognized part of `CREATE TABLE` statement: {line}"),
};
(placement, line)
}
});
Cow::Owned(lines.join("\n"))
} else if result.starts_with("CREATE VIEW") || result.starts_with("CREATE OR REPLACE VIEW") {
// Allow column order to change
let is_simple_select_statement = result.lines().enumerate().all(|(i, mut line)| {
line = line.trim_start();
match (i, line.chars().next()) {
(0, Some('C')) => true, // create
(1, Some('S')) => true, // select
(_, Some('F')) if line.ends_with(';') => true, // from
(_, Some(c)) if c.is_lowercase() => true, // column name
chunk = Cow::Owned(lines.join("\n"));
} else if chunk.starts_with("CREATE VIEW ") || chunk.starts_with("CREATE OR REPLACE VIEW ") {
let is_simple_select_statement = chunk.lines().enumerate().all(|(i, line)| {
match (i, line.trim_start().chars().next()) {
// CREATE
(0, Some('C')) => true,
// SELECT
(1, Some('S')) => true,
// FROM
(_, Some('F')) if line.ends_with(';') => true,
// Column name
(_, Some(c)) if c.is_lowercase() => true,
_ => false,
}
});
if is_simple_select_statement {
let mut lines = result
.lines()
.map(|line| line.strip_suffix(',').unwrap_or(line))
.collect::<Vec<_>>();
lines.sort_unstable_by_key(|line| -> (u8, &str) {
let placement = match line.trim_start().chars().next() {
let mut lines = stripped_lines.collect::<Vec<_>>();
sort_within_sections(&mut lines, |line| {
match line.trim_start().chars().next() {
// CREATE
Some('C') => 0,
// SELECT
Some('S') => 1,
// FROM
Some('F') => 3,
// Column name
_ => 2,
};
(placement, line)
}
});
Cow::Owned(lines.join("\n"))
} else {
Cow::Borrowed(result)
chunk = Cow::Owned(lines.join("\n"));
}
} else {
Cow::Borrowed(result)
}
// Replace timestamps with a constant string, so differences in timestamps are ignored
for index in 0.. {
// Performance optimization
let Some(byte) = chunk.as_bytes().get(index) else {
break;
};
if !byte.is_ascii_digit() {
continue;
}
// Check for this pattern: 0000-00-00 00:00:00
let Some((
&[a0, a1, a2, a3, b0, a4, a5, b1, a6, a7, b2, a8, a9, b3, a10, a11, b4, a12, a13],
remaining,
)) = chunk
.get(index..)
.and_then(|s| s.as_bytes().split_first_chunk::<PATTERN_LEN>())
else {
break;
};
if [a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13]
.into_iter()
.all(|byte| byte.is_ascii_digit())
&& [b0, b1, b2, b3, b4] == *b"-- ::"
{
// Replace the part of the string that has the checked pattern and an optional fractional part
let len_after = if let Some((b'.', s)) = remaining.split_first() {
1 + s.iter().position(|c| !c.is_ascii_digit()).unwrap_or(0)
} else {
0
};
// Length of replacement string is likely to match previous string
// (there's up to 6 digits after the decimal point)
chunk.to_mut().replace_range(
index..(index + PATTERN_LEN + len_after),
"AAAAAAAAAAAAAAAAAAAAAAAAAA",
);
}
}
chunk
}
fn sort_within_sections<T: Ord + ?Sized>(vec: &mut Vec<&T>, mut section: impl FnMut(&T) -> u8) {
vec.sort_unstable_by_key(|&i|(section(i),i));
}
fn chunks(dump: &str) -> impl Iterator<Item = &str> {
@ -235,7 +267,8 @@ fn chunks(dump: &str) -> impl Iterator<Item = &str> {
while let Some(s) = remove_skipped_item_from_beginning(remaining) {
remaining = s.trim_start();
}
// `a` can't be empty because of trim_start
// `trim_start` guarantees that `result` is not empty
let (result, after_result) = remaining.split_once("\n\n")?;
remaining = after_result;
Some(result)
@ -245,14 +278,18 @@ fn chunks(dump: &str) -> impl Iterator<Item = &str> {
fn remove_skipped_item_from_beginning(s: &str) -> Option<&str> {
// Skip commented line
if let Some(after) = s.strip_prefix("--") {
Some(after.split_once('\n').unwrap_or_default().1)
Some(after_first_occurence(after, "\n"))
}
// Skip view definition that's replaced later (the first definition selects all nulls)
else if let Some(after) = s.strip_prefix("CREATE VIEW ") {
let (name, after_name) = after.split_once(' ').unwrap_or_default();
Some(after_name.split_once("\n\n").unwrap_or_default().1)
Some(after_first_occurence(after_name, "\n\n"))
.filter(|after_view| after_view.contains(&format!("\nCREATE OR REPLACE VIEW {name} ")))
} else {
None
}
}
fn after_first_occurence<'a>(s: &'a str, pat: &str) -> &'a str {
s.split_once(pat).unwrap_or_default().1
}

View File

@ -1,4 +1,4 @@
use crate::{newtypes::DbUrl, CommentSortType, SortType};
use crate::{newtypes::DbUrl, CommentSortType, SortType,schema_setup};
use chrono::{DateTime, TimeDelta, Utc};
use deadpool::Runtime;
use diesel::{
@ -427,7 +427,7 @@ pub async fn build_db_pool() -> LemmyResult<ActualDbPool> {
}))
.build()?;
crate::schema_setup::run(Default::default())?;
schema_setup::run(schema_setup::Options::default().run())?;
Ok(pool)
}

View File

@ -62,16 +62,16 @@ DROP VIEW community_aggregates_view CASCADE;
CREATE VIEW community_aggregates_view AS
SELECT
c.id,
c.name,
c.title,
c.description,
c.category_id,
c.creator_id,
c.removed,
c.published,
c.updated,
c.deleted,
c.nsfw,
c.name,
c.title,
c.description,
c.category_id,
c.creator_id,
c.removed,
c.published,
c.updated,
c.deleted,
c.nsfw,
(
SELECT
name
@ -287,23 +287,24 @@ DROP VIEW post_aggregates_view;
-- regen post view
CREATE VIEW post_aggregates_view AS
SELECT p.id,
SELECT
p.id,
p.name,
p.url,
p.body,
p.creator_id,
p.community_id,
p.removed,
p.locked,
p.published,
p.updated,
p.deleted,
p.nsfw,
p.stickied,
p.embed_title,
p.embed_description,
p.embed_html,
p.thumbnail_url,
p.url,
p.body,
p.creator_id,
p.community_id,
p.removed,
p.locked,
p.published,
p.updated,
p.deleted,
p.nsfw,
p.stickied,
p.embed_title,
p.embed_description,
p.embed_html,
p.thumbnail_url,
(
SELECT
u.banned
@ -537,15 +538,15 @@ DROP VIEW comment_aggregates_view;
CREATE VIEW comment_aggregates_view AS
SELECT
c.id,
c.creator_id,
c.post_id,
c.parent_id,
c.content,
c.removed,
c.read,
c.published,
c.updated,
c.deleted,
c.creator_id,
c.post_id,
c.parent_id,
c.content,
c.removed,
c.read,
c.published,
c.updated,
c.deleted,
(
SELECT
community_id

View File

@ -1014,7 +1014,7 @@ BEGIN
END
$$;
CREATE or replace TRIGGER refresh_community_follower
CREATE OR REPLACE TRIGGER refresh_community_follower
AFTER INSERT OR UPDATE OR DELETE OR TRUNCATE ON community_follower
FOR EACH statement
EXECUTE PROCEDURE refresh_community_follower ();
@ -1031,7 +1031,7 @@ BEGIN
END
$$;
CREATE or replace TRIGGER refresh_community_user_ban
CREATE OR REPLACE TRIGGER refresh_community_user_ban
AFTER INSERT OR UPDATE OR DELETE OR TRUNCATE ON community_user_ban
FOR EACH statement
EXECUTE PROCEDURE refresh_community_user_ban ();
@ -1048,12 +1048,12 @@ BEGIN
END
$$;
CREATE or replace TRIGGER refresh_post_like
CREATE OR REPLACE TRIGGER refresh_post_like
AFTER INSERT OR UPDATE OR DELETE OR TRUNCATE ON post_like
FOR EACH statement
EXECUTE PROCEDURE refresh_post_like ();
CREATE or replace VIEW community_moderator_view AS
CREATE OR REPLACE VIEW community_moderator_view AS
SELECT
*,
(
@ -1107,7 +1107,7 @@ SELECT
FROM
community_moderator cm;
CREATE or replace VIEW community_follower_view AS
CREATE OR REPLACE VIEW community_follower_view AS
SELECT
*,
(
@ -1161,7 +1161,7 @@ SELECT
FROM
community_follower cf;
CREATE or replace VIEW community_user_ban_view AS
CREATE OR REPLACE VIEW community_user_ban_view AS
SELECT
*,
(

View File

@ -1,5 +1,5 @@
ALTER TABLE activity
ADD COLUMN user_id INTEGER NOT NULL REFERENCES user_(id) ON UPDATE CASCADE ON DELETE CASCADE;
ADD COLUMN user_id INTEGER NOT NULL REFERENCES user_ (id) ON UPDATE CASCADE ON DELETE CASCADE;
ALTER TABLE activity
DROP COLUMN sensitive;

View File

@ -241,7 +241,8 @@ ALTER TABLE user_
ADD COLUMN matrix_user_id text UNIQUE;
-- Default is only for existing rows
alter table user_ alter column password_encrypted drop default;
ALTER TABLE user_
ALTER COLUMN password_encrypted DROP DEFAULT;
-- Update the user_ table with the local_user data
UPDATE
@ -269,30 +270,30 @@ CREATE VIEW user_alias_1 AS
SELECT
id,
actor_id,
admin,
avatar,
banned,
banner,
bio,
default_listing_type,
default_sort_type,
deleted,
email,
lang,
last_refreshed_at,
local,
matrix_user_id,
name,
password_encrypted,
preferred_username,
private_key,
public_key,
published,
send_notifications_to_email,
show_avatars,
show_nsfw,
theme,
updated
admin,
avatar,
banned,
banner,
bio,
default_listing_type,
default_sort_type,
deleted,
email,
lang,
last_refreshed_at,
local,
matrix_user_id,
name,
password_encrypted,
preferred_username,
private_key,
public_key,
published,
send_notifications_to_email,
show_avatars,
show_nsfw,
theme,
updated
FROM
user_;
@ -300,30 +301,30 @@ CREATE VIEW user_alias_2 AS
SELECT
id,
actor_id,
admin,
avatar,
banned,
banner,
bio,
default_listing_type,
default_sort_type,
deleted,
email,
lang,
last_refreshed_at,
local,
matrix_user_id,
name,
password_encrypted,
preferred_username,
private_key,
public_key,
published,
send_notifications_to_email,
show_avatars,
show_nsfw,
theme,
updated
admin,
avatar,
banned,
banner,
bio,
default_listing_type,
default_sort_type,
deleted,
email,
lang,
last_refreshed_at,
local,
matrix_user_id,
name,
password_encrypted,
preferred_username,
private_key,
public_key,
published,
send_notifications_to_email,
show_avatars,
show_nsfw,
theme,
updated
FROM
user_;

View File

@ -1,3 +1,3 @@
ALTER TABLE local_site
ADD COLUMN federation_debug boolean DEFAULT false not null;
ADD COLUMN federation_debug boolean DEFAULT FALSE NOT NULL;

View File

@ -86,10 +86,7 @@ ALTER TABLE local_user
DROP TYPE sort_type_enum__;
-- Remove int to float conversions that were automatically added to index filters
DROP INDEX
idx_comment_aggregates_nonzero_hotrank,
idx_community_aggregates_nonzero_hotrank,
idx_post_aggregates_nonzero_hotrank;
DROP INDEX idx_comment_aggregates_nonzero_hotrank, idx_community_aggregates_nonzero_hotrank, idx_post_aggregates_nonzero_hotrank;
CREATE INDEX idx_community_aggregates_nonzero_hotrank ON community_aggregates (published)
WHERE

View File

@ -16,9 +16,13 @@ CREATE FUNCTION forbid_diesel_cli ()
LANGUAGE plpgsql
AS $$
BEGIN
IF current_setting('lemmy.enable_migrations', TRUE) IS DISTINCT FROM 'on' THEN
RAISE 'migrations must be managed using lemmy_server instead of diesel CLI';
END IF;
IF NOT EXISTS (
SELECT
FROM
pg_locks
WHERE (locktype, pid, objid) = ('advisory', pg_backend_pid(), 0)) THEN
RAISE 'migrations must be managed using lemmy_server instead of diesel CLI';
END IF;
RETURN NULL;
END;
$$;

View File

@ -114,13 +114,19 @@ enum CmdSubcommand {
Migration {
#[command(subcommand)]
subcommand: MigrationSubcommand,
#[arg(short, long, conflicts_with("number"))]
all: bool,
#[arg(short, long, default_value_t = 1)]
number: u64,
},
}
#[derive(Subcommand, Debug)]
enum MigrationSubcommand {
/// Run all pending migrations.
/// Run up.sql for the specified migrations.
Run,
/// Run down.sql for the specified migrations.
Revert,
}
/// Placing the main function in lib.rs allows other crates to import it and embed Lemmy
@ -128,11 +134,22 @@ pub async fn start_lemmy_server(args: CmdArgs) -> LemmyResult<()> {
// Print version number to log
println!("Lemmy v{VERSION}");
if let Some(CmdSubcommand::Migration { subcommand }) = args.subcommand {
let options = match subcommand {
MigrationSubcommand::Run => schema_setup::Options::default(),
// todo test
if let Some(CmdSubcommand::Migration {
subcommand,
all,
number,
}) = args.subcommand
{
let mut options = match subcommand {
MigrationSubcommand::Run => schema_setup::Options::default().run(),
MigrationSubcommand::Revert => schema_setup::Options::default().revert(),
};
if !all {
options = options.limit(number);
}
schema_setup::run(options)?;
return Ok(());