[#3213] rescue around potentially-raising Repo.insert_all/_ calls. Misc. improvements (docs etc.).

This commit is contained in:
Ivan Tashkinov 2021-02-13 22:01:11 +03:00
parent 5992382cf8
commit 349b8b0f4f
6 changed files with 51 additions and 22 deletions

View File

@ -33,7 +33,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- Admin API: Reports now ordered by newest
</details>
- Extracted object hashtags into separate table in order to improve hashtag timeline performance (via background migration in `Pleroma.Migrators.HashtagsTableMigrator`).
- Improved hashtag timeline performance (requires a background migration).
### Added

View File

@ -556,7 +556,6 @@ config :pleroma, Oban,
remote_fetcher: 2,
attachments_cleanup: 1,
new_users_digest: 1,
hashtags_cleanup: 1,
mute_expire: 5
],
plugins: [Oban.Plugins.Pruner],

View File

@ -473,6 +473,20 @@ config :pleroma, :config_description, [
}
]
},
%{
group: :pleroma,
key: :populate_hashtags_table,
type: :group,
description: "`populate_hashtags_table` background migration settings",
children: [
%{
key: :sleep_interval_ms,
type: :integer,
description:
"Sleep interval between each chunk of processed records in order to decrease the load on the system (defaults to 0 and should be keep default on most instances)."
}
]
},
%{
group: :pleroma,
key: :instance,

View File

@ -65,6 +65,12 @@ To add configuration to your config file, you can copy it from the base config.
* `show_reactions`: Let favourites and emoji reactions be viewed through the API (default: `true`).
* `password_reset_token_validity`: The time after which reset tokens aren't accepted anymore, in seconds (default: one day).
## :database
* `improved_hashtag_timeline`: If `true`, hashtags will be fetched from `hashtags` table for hashtags timeline. When `false`, object-embedded hashtags will be used (slower). Is auto-set to `true` (unless overridden) when `HashtagsTableMigrator` completes.
## Background migrations
* `populate_hashtags_table/sleep_interval_ms`: Sleep interval between each chunk of processed records in order to decrease the load on the system (defaults to 0 and should be keep default on most instances).
## Welcome
* `direct_message`: - welcome message sent as a direct message.
* `enabled`: Enables the send a direct message to a newly registered user. Defaults to `false`.

View File

@ -47,6 +47,7 @@ defmodule Pleroma.Hashtag do
|> Map.merge(%{inserted_at: timestamp, updated_at: timestamp})
end)
try do
with {:ok, %{query_op: hashtags}} <-
Multi.new()
|> Multi.insert_all(:insert_all_op, Hashtag, structs, on_conflict: :nothing)
@ -58,6 +59,9 @@ defmodule Pleroma.Hashtag do
else
{:error, _name, value, _changes_so_far} -> {:error, value}
end
rescue
e -> {:error, e}
end
end
def changeset(%Hashtag{} = struct, params) do
@ -74,8 +78,9 @@ defmodule Pleroma.Hashtag do
where: hto.object_id == ^object_id,
select: hto.hashtag_id
)
|> Repo.delete_all() do
delete_unreferenced(hashtag_ids)
|> Repo.delete_all(),
{:ok, unreferenced_count} <- delete_unreferenced(hashtag_ids) do
{:ok, length(hashtag_ids), unreferenced_count}
end
end

View File

@ -214,15 +214,20 @@ defmodule Pleroma.Migrators.HashtagsTableMigrator do
maps = Enum.map(hashtag_records, &%{hashtag_id: &1.id, object_id: object.id})
expected_rows = length(hashtag_records)
base_error =
"ERROR when inserting #{expected_rows} hashtags_objects for obj. #{object.id}"
try do
with {^expected_rows, _} <- Repo.insert_all("hashtags_objects", maps) do
object.id
else
e ->
error =
"ERROR when inserting #{expected_rows} hashtags_objects " <>
"for object #{object.id}: #{inspect(e)}"
Logger.error(error)
Logger.error("#{base_error}: #{inspect(e)}")
Repo.rollback(object.id)
end
rescue
e ->
Logger.error("#{base_error}: #{inspect(e)}")
Repo.rollback(object.id)
end
else