@ -3,48 +3,62 @@ class FixReblogsInFeeds < ActiveRecord::Migration[5.1]
redis = Redis . current
redis = Redis . current
fm = FeedManager . instance
fm = FeedManager . instance
# find_each is batched on the database side.
# Old scheme:
User . includes ( :account ) . find_each do | user |
# Each user's feed zset had a series of score:value entries,
account = user . account
# where "regular" statuses had the same score and value (their
# ID). Reblogs had a score of the reblogging status' ID, and a
# value of the reblogged status' ID.
# Old scheme:
# New scheme:
# Each user's feed zset had a series of score:value entries,
# The feed contains only entries with the same score and value.
# where "regular" statuses had the same score and value (their
# Reblogs result in the reblogging status being added to the
# ID). Reblogs had a score of the reblogging status' ID, and a
# feed, with an entry in a reblog tracking zset (where the score
# value of the reblogged status' ID.
# is once again set to the reblogging status' ID, and the value
# is set to the reblogged status' ID). This is safe for Redis'
# New scheme:
# float coersion because in this reblog tracking zset, we only
# The feed contains only entries with the same score and value.
# need the rebloggging status' ID to be able to stop tracking
# Reblogs result in the reblogging status being added to the
# entries after they have gotten too far down the feed, which
# feed, with an entry in a reblog tracking zset (where the score
# does not require an exact value.
# is once again set to the reblogging status' ID, and the value
# is set to the reblogged status' ID). This is safe for Redis'
# This process reads all feeds and writes 3 times for each reblogs.
# float coersion because in this reblog tracking zset, we only
# So we use Lua script to avoid overhead between Ruby and Redis.
# need the rebloggging status' ID to be able to stop tracking
script = << - LUA
# entries after they have gotten too far down the feed, which
local timeline_key = KEYS [ 1 ]
# does not require an exact value.
local reblog_key = KEYS [ 2 ]
# So, first, we iterate over the user's feed to find any reblogs.
timeline_key = fm . key ( :home , account . id )
reblog_key = fm . key ( :home , account . id , 'reblogs' )
redis . zrange ( timeline_key , 0 , - 1 , with_scores : true ) . each do | entry |
next if entry [ 0 ] == entry [ 1 ]
# The score and value don't match, so this is a reblog.
- - So , first , we iterate over the user ' s feed to find any reblogs .
# (note that we're transitioning from IDs < 53 bits so we
local items = redis . call ( 'zrange' , timeline_key , 0 , - 1 , 'withscores' )
# don't have to worry about the loss of precision)
reblogged_id , reblogging_id = entry
for i = 1 , #items, 2 do
local reblogged_id = items [ i ]
local reblogging_id = items [ i + 1 ]
if ( reblogged_id ~= reblogging_id ) then
# Remove the old entry
- - The score and value don ' t match , so this is a reblog .
redis . zrem ( timeline_key , reblogged_id )
- - ( note that we ' re transitioning from IDs < 53 bits so we
- - don ' t have to worry about the loss of precision )
# Add a new one for the reblogging status
- - Remove the old entry
redis . zadd ( timeline_key , reblogging_id , reblogging _id)
redis . call ( 'zrem' , timeline_key , reblogged _id)
# Track the fact that this was a reblog
- - Add a new one for the reblogging status
redis . zadd ( reblog_key , reblogging_id , reblogged_id )
redis . call ( 'zadd' , timeline_key , reblogging_id , reblogging_id )
- - Track the fact that this was a reblog
redis . call ( 'zadd' , reblog_key , reblogging_id , reblogged_id )
end
end
end
LUA
script_hash = redis . script ( :load , script )
# find_each is batched on the database side.
User . includes ( :account ) . find_each do | user |
account = user . account
timeline_key = fm . key ( :home , account . id )
reblog_key = fm . key ( :home , account . id , 'reblogs' )
redis . evalsha ( script_hash , [ timeline_key , reblog_key ] )
end
end
end
end