Skip to content
Snippets Groups Projects
Commit 5c0e1039 authored by rinpatch's avatar rinpatch
Browse files

Chunk the notification type backfill migration

Long-term we want that migration to be done entirely in SQL,
but for now this is a hotfix to not cause OOMs on large databases.

This is using a homegrown version of `Repo.stream`, it's worse in
terms of performance than the upstream since it doesn't use the same
prepared query for chunk queries, but unlike the upstream it supports
preloads.
parent b536e571
No related branches found
No related tags found
No related merge requests found
......@@ -18,7 +18,7 @@ def fill_in_notification_types do
)
query
|> Repo.all()
|> Repo.chunk_stream(100)
|> Enum.each(fn notification ->
type =
notification.activity
......
......@@ -8,6 +8,7 @@ defmodule Pleroma.Repo do
adapter: Ecto.Adapters.Postgres,
migration_timestamps: [type: :naive_datetime_usec]
import Ecto.Query
require Logger
defmodule Instrumenter do
......@@ -78,6 +79,33 @@ def check_migrations_applied!() do
:ok
end
end
def chunk_stream(query, chunk_size) do
# We don't actually need start and end funcitons of resource streaming,
# but it seems to be the only way to not fetch records one-by-one and
# have individual records be the elements of the stream, instead of
# lists of records
Stream.resource(
fn -> 0 end,
fn
last_id ->
query
|> order_by(asc: :id)
|> where([r], r.id > ^last_id)
|> limit(^chunk_size)
|> all()
|> case do
[] ->
{:halt, last_id}
records ->
last_id = List.last(records).id
{records, last_id}
end
end,
fn _ -> :ok end
)
end
end
defmodule Pleroma.Repo.UnappliedMigrationsError do
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment