From 9b9c825b6868d1e458d20b4b14531a14e3469757 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 3 Sep 2020 10:57:07 +0300 Subject: [PATCH 001/158] Initial DDL: get and return tasks --- server/ddl.sql | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 server/ddl.sql diff --git a/server/ddl.sql b/server/ddl.sql new file mode 100644 index 00000000000..75baced2ea8 --- /dev/null +++ b/server/ddl.sql @@ -0,0 +1,92 @@ +CREATE SCHEMA IF NOT EXISTS parade; + +-- (requires extensioon pgcrypto) + +CREATE TYPE task_status_code_value AS ENUM ( + 'pending', -- waiting for an actor to perform it (new or being retried) + 'in-progress', -- task is being performed by an actor + 'aborted', -- an actor has aborted this task with message, will not be reissued + 'completed' -- an actor has completed this task with message, will not reissued +); + +CREATE TABLE IF NOT EXISTS tasks ( + id VARCHAR(32) NOT NULL PRIMARY KEY, -- nanoid + + action VARCHAR(128) NOT NULL, -- name (type) of action to perform + body JSONB, -- data used by action + status TEXT, -- status text defined by action, visible to action + + status_code TASK_STATUS_CODE_VALUE NOT NULL, -- internal status code, used by parade to issue tasks + num_tries INTEGER NOT NULL DEFAULT 0, + max_tries INTEGER, + actor_id VARCHAR(32), -- ID of performing actor if in-progress + action_deadline TIMESTAMPTZ, -- offer this task to other actors once action_deadline has elapsed + performance_token UUID +-- TODO(ariels): add a lock token to each row, set when leasing the +-- task, and accept task completion only when lock token is unchanged. +); + +CREATE TABLE IF NOT EXISTS task_dependencies ( + after VARCHAR(32) REFERENCES tasks(id) -- after this task ID is done + ON DELETE CASCADE ON UPDATE CASCADE, + run VARCHAR(32) REFERENCES tasks(id) -- run this task ID + ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX IF NOT EXISTS task_dependencies_after ON task_dependencies(after); +CREATE INDEX IF NOT EXISTS task_dependencies_run ON task_dependencies(run); + +-- Returns true if task with this id, code and deadline can +-- be allocated. +CREATE OR REPLACE FUNCTION can_allocate_task(id VARCHAR(32), code TASK_STATUS_CODE_VALUE, deadline TIMESTAMPTZ) +RETURNS BOOLEAN +LANGUAGE sql IMMUTABLE AS $$ + SELECT (code = 'pending' OR (code = 'in-progress' AND deadline < NOW())) AND + id NOT IN (SELECT DISTINCT run AS id FROM task_dependencies) +$$; + +-- Marks up to `max_tasks' on one of `actions' as in-progress and +-- belonging to `actor_id' and returns their ids and a "performance +-- token". Both must be returned to complete the task successfully. +CREATE OR REPLACE FUNCTION own_tasks( + max_tasks INTEGER, actions VARCHAR(128) ARRAY, owner_id VARCHAR(32) +) +RETURNS TABLE(task_id VARCHAR(32), token UUID) +LANGUAGE sql VOLATILE AS $$ + UPDATE tasks + SET actor_id = owner_id, status_code = 'in-progress', num_tries = num_tries + 1, performance_token = gen_random_uuid() + WHERE id = ( + SELECT id + FROM tasks + WHERE can_allocate_task(id, status_code, action_deadline) AND + ARRAY[action] <@ actions AND + (max_tries IS NULL OR num_tries < max_tries) + -- maybe: AND not_before <= NOW() + -- maybe: ORDER BY priority (eventually) + FOR UPDATE SKIP LOCKED + LIMIT max_tasks) + RETURNING id, performance_token +$$; + +-- Returns an owned task id that was locked with token. It is an error +-- to return a task with the wrong token; that can happen if the +-- deadline expired and the task was given to another actor. +CREATE OR REPLACE PROCEDURE return_task( + task_id VARCHAR(32), token UUID, result_status TEXT, result_status_code TASK_STATUS_CODE_VALUE +) +LANGUAGE plpgsql AS $$ +DECLARE + num_updated INTEGER; +BEGIN + UPDATE tasks + SET status = result_status, status_code = result_status_code, actor_id = NULL, performance_token = NULL + WHERE id = task_id AND performance_token = token; + + GET DIAGNOSTICS num_updated := ROW_COUNT; + IF num_updated != 1 THEN + RAISE EXCEPTION 'found % tasks for task % with performance token % when setting status % code %; perhaps another task took it?', num_updated, task_id, token, result_status, result_status_code; + END IF; + + DELETE FROM task_dependencies WHERE after=task_id; +END; +$$; From 66f43229a73063c99f9d2035247de30fc268e808 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Sun, 6 Sep 2020 19:52:23 +0300 Subject: [PATCH 002/158] Add simple Go wrapper and test DDL This primarily tests the SQL DDL, in Go. --- ddl/ddl.go | 140 ++++++++++++++++++ {server => ddl}/ddl.sql | 55 ++++--- ddl/ddl_test.go | 320 ++++++++++++++++++++++++++++++++++++++++ go.mod | 1 + 4 files changed, 496 insertions(+), 20 deletions(-) create mode 100644 ddl/ddl.go rename {server => ddl}/ddl.sql (66%) create mode 100644 ddl/ddl_test.go diff --git a/ddl/ddl.go b/ddl/ddl.go new file mode 100644 index 00000000000..c5dfb85fe00 --- /dev/null +++ b/ddl/ddl.go @@ -0,0 +1,140 @@ +package ddl + +import ( + "database/sql/driver" + "errors" + "fmt" + "strings" + "time" + + "github.com/jackc/pgtype" + "github.com/jmoiron/sqlx" +) + +type TaskId string + +type ActorId string + +type PerformanceToken pgtype.UUID + +func (dst *PerformanceToken) Scan(src interface{}) error { + var scanned pgtype.UUID + if err := scanned.Scan(src); err != nil { + return err + } + *dst = PerformanceToken(scanned) + return nil +} + +func (src PerformanceToken) Value() (driver.Value, error) { + return pgtype.UUID(src).Value() +} + +func (src PerformanceToken) String() string { + res := strings.Builder{} + offset := 0 + addBytes := func(n int) { + for i := 0; i < n; i++ { + res.WriteString(fmt.Sprintf("%02x", src.Bytes[offset+i])) + } + offset += n + } + addBytes(4) + res.WriteString("-") + addBytes(2) + res.WriteString("-") + addBytes(2) + res.WriteString("-") + addBytes(2) + res.WriteString("-") + addBytes(6) + return res.String() +} + +type TaskStatusCodeValue string + +const ( + // TASK_PENDING indicates a task is waiting for an actor to perform it (new or being + // retried) + TASK_PENDING TaskStatusCodeValue = "pending" + // IN_PROGRESS indicates a task is being performed by an actor. + TASK_IN_PROGRESS TaskStatusCodeValue = "in-progress" + // ABORTED indicates an actor has aborted this task with message, will not be reissued + TASK_ABORTED TaskStatusCodeValue = "aborted" + // TASK_COMPLETED indicates an actor has completed this task with message, will not reissued + TASK_COMPLETED TaskStatusCodeValue = "completed" +) + +// TaskData is a row in table "tasks". It describes a task to perform. +type TaskData struct { + Id TaskId `db:"task_id"` + Action string `db:"action"` + // Body is JSON-formatted + Body *string `db:"body"` + Status *string `db:"status"` + StatusCode string `db:"status_code"` + NumTries int `db:"num_tries"` + MaxTries *int `db:"max_tries"` + ActorId ActorId `db:"actor_id"` + ActionDeadline *time.Time `db:"action_deadline"` + PerformanceToken *PerformanceToken `db:"performance_token"` + FinishChannelName *string `db:"finish_channel"` +} + +// TaskDependencyData is a row in table "task_dependencies". It describes that task Run must +// occur after task After succeeds. +type TaskDependencyData struct { + After TaskId + Run TaskId +} + +// OwnedTaskData is a row returned from "SELECT * FROM own_tasks(...)". +type OwnedTaskData struct { + Id TaskId `db:"task_id"` + Token PerformanceToken `db:"token"` +} + +// OwnTasks owns for actor and returns up to maxTasks tasks for performing any of actions. +func OwnTasks(conn *sqlx.DB, actor ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]OwnedTaskData, error) { + // Use sqlx.In to expand slice actions + query, args, err := sqlx.In(`SELECT * FROM own_tasks(?, ARRAY[?], ?, ?)`, maxTasks, actions, actor, maxDuration) + if err != nil { + return nil, fmt.Errorf("expand own tasks query: %s", err) + } + query = conn.Rebind(query) + rows, err := conn.Queryx(query, args...) + if err != nil { + return nil, fmt.Errorf("try to own tasks: %w", err) + } + tasks := make([]OwnedTaskData, 0, maxTasks) + for rows.Next() { + var task OwnedTaskData + if err = rows.StructScan(&task); err != nil { + return nil, fmt.Errorf("failed to scan row %+v: %w", rows, err) + } + tasks = append(tasks, task) + } + return tasks, nil +} + +var InvalidTokenError = errors.New("performance token invalid (action may have exceeded deadline)") + +// ReturnTask returns taskId which was acquired using the specified performanceToken, giving it +// resultStatus and resultStatusCode. It returns InvalidTokenError if the performanceToken is +// invalid; this happens when ReturnTask is called after its deadline expires, or due to a logic +// error. +func ReturnTask(conn *sqlx.DB, taskId TaskId, token PerformanceToken, resultStatus string, resultStatusCode TaskStatusCodeValue) error { + var res int + query, args, err := sqlx.In(`SELECT return_task(?, ?, ?, ?)`, taskId, token, resultStatus, resultStatusCode) + query = conn.Rebind(query) + err = conn.Get(&res, query, args...) + if err != nil { + return fmt.Errorf("return_task: %w", err) + } + + if res != 1 { + return InvalidTokenError + } + + return nil +} diff --git a/server/ddl.sql b/ddl/ddl.sql similarity index 66% rename from server/ddl.sql rename to ddl/ddl.sql index 75baced2ea8..7cd8b3d99cd 100644 --- a/server/ddl.sql +++ b/ddl/ddl.sql @@ -1,3 +1,5 @@ +CREATE EXTENSION pgcrypto; + CREATE SCHEMA IF NOT EXISTS parade; -- (requires extensioon pgcrypto) @@ -10,26 +12,27 @@ CREATE TYPE task_status_code_value AS ENUM ( ); CREATE TABLE IF NOT EXISTS tasks ( - id VARCHAR(32) NOT NULL PRIMARY KEY, -- nanoid + id VARCHAR(64) NOT NULL PRIMARY KEY, -- nanoid action VARCHAR(128) NOT NULL, -- name (type) of action to perform body JSONB, -- data used by action status TEXT, -- status text defined by action, visible to action - status_code TASK_STATUS_CODE_VALUE NOT NULL, -- internal status code, used by parade to issue tasks + status_code TASK_STATUS_CODE_VALUE NOT NULL DEFAULT 'pending', -- internal status code, used by parade to issue tasks num_tries INTEGER NOT NULL DEFAULT 0, max_tries INTEGER, - actor_id VARCHAR(32), -- ID of performing actor if in-progress + actor_id VARCHAR(64), -- ID of performing actor if in-progress action_deadline TIMESTAMPTZ, -- offer this task to other actors once action_deadline has elapsed - performance_token UUID + performance_token UUID, + finish_channel VARCHAR(64) -- (if non-NULL) name of a channel to NOTIFY when this task ends -- TODO(ariels): add a lock token to each row, set when leasing the -- task, and accept task completion only when lock token is unchanged. ); CREATE TABLE IF NOT EXISTS task_dependencies ( - after VARCHAR(32) REFERENCES tasks(id) -- after this task ID is done + after VARCHAR(64) REFERENCES tasks(id) -- after this task ID is done ON DELETE CASCADE ON UPDATE CASCADE, - run VARCHAR(32) REFERENCES tasks(id) -- run this task ID + run VARCHAR(64) REFERENCES tasks(id) -- run this task ID ON DELETE CASCADE ON UPDATE CASCADE ); @@ -38,7 +41,7 @@ CREATE INDEX IF NOT EXISTS task_dependencies_run ON task_dependencies(run); -- Returns true if task with this id, code and deadline can -- be allocated. -CREATE OR REPLACE FUNCTION can_allocate_task(id VARCHAR(32), code TASK_STATUS_CODE_VALUE, deadline TIMESTAMPTZ) +CREATE OR REPLACE FUNCTION can_allocate_task(id VARCHAR(64), code TASK_STATUS_CODE_VALUE, deadline TIMESTAMPTZ) RETURNS BOOLEAN LANGUAGE sql IMMUTABLE AS $$ SELECT (code = 'pending' OR (code = 'in-progress' AND deadline < NOW())) AND @@ -49,13 +52,17 @@ $$; -- belonging to `actor_id' and returns their ids and a "performance -- token". Both must be returned to complete the task successfully. CREATE OR REPLACE FUNCTION own_tasks( - max_tasks INTEGER, actions VARCHAR(128) ARRAY, owner_id VARCHAR(32) + max_tasks INTEGER, actions VARCHAR(128) ARRAY, owner_id VARCHAR(64), max_duration INTERVAL ) -RETURNS TABLE(task_id VARCHAR(32), token UUID) +RETURNS TABLE(task_id VARCHAR(64), token UUID) LANGUAGE sql VOLATILE AS $$ UPDATE tasks - SET actor_id = owner_id, status_code = 'in-progress', num_tries = num_tries + 1, performance_token = gen_random_uuid() - WHERE id = ( + SET actor_id = owner_id, + status_code = 'in-progress', + num_tries = num_tries + 1, + performance_token = gen_random_uuid(), + action_deadline = NOW() + max_duration -- NULL if max_duration IS NULL + WHERE id IN ( SELECT id FROM tasks WHERE can_allocate_task(id, status_code, action_deadline) AND @@ -71,22 +78,30 @@ $$; -- Returns an owned task id that was locked with token. It is an error -- to return a task with the wrong token; that can happen if the -- deadline expired and the task was given to another actor. -CREATE OR REPLACE PROCEDURE return_task( - task_id VARCHAR(32), token UUID, result_status TEXT, result_status_code TASK_STATUS_CODE_VALUE -) +CREATE OR REPLACE FUNCTION return_task( + task_id VARCHAR(64), token UUID, result_status TEXT, result_status_code TASK_STATUS_CODE_VALUE +) RETURNS INTEGER LANGUAGE plpgsql AS $$ DECLARE num_updated INTEGER; + channel VARCHAR(64); BEGIN - UPDATE tasks - SET status = result_status, status_code = result_status_code, actor_id = NULL, performance_token = NULL - WHERE id = task_id AND performance_token = token; + UPDATE tasks INTO channel + SET status = result_status, + status_code = result_status_code, + actor_id = NULL, + performance_token = NULL + WHERE id = task_id AND performance_token = token + RETURNING finish_channel; GET DIAGNOSTICS num_updated := ROW_COUNT; - IF num_updated != 1 THEN - RAISE EXCEPTION 'found % tasks for task % with performance token % when setting status % code %; perhaps another task took it?', num_updated, task_id, token, result_status, result_status_code; - END IF; DELETE FROM task_dependencies WHERE after=task_id; + + IF channel IS NOT NULL THEN + SELECT pg_notify(channel, NULL); + END IF; + + RETURN num_updated; END; $$; diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go new file mode 100644 index 00000000000..874e7c1db71 --- /dev/null +++ b/ddl/ddl_test.go @@ -0,0 +1,320 @@ +package ddl_test + +import ( + "errors" + "fmt" + "log" + "os" + "sort" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + _ "github.com/jackc/pgx/v4/stdlib" + "github.com/treeverse/parade/ddl" + + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" +) + +const ( + dbContainerTimeoutSeconds = 10 * 60 // 10 min + dbName = "parade_db" +) + +var ( + pool *dockertest.Pool + databaseURI string + db *sqlx.DB +) + +// taskIdSlice attaches the methods of sort.Interface to []TaskId. +type taskIdSlice []ddl.TaskId + +func (p taskIdSlice) Len() int { return len(p) } +func (p taskIdSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p taskIdSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// runDBInstance starts a test Postgres server inside container pool, and returns a connection +// URI and a closer function. +func runDBInstance(pool *dockertest.Pool) (string, func()) { + resource, err := pool.Run("postgres", "11", []string{ + "POSTGRES_USER=parade", + "POSTGRES_PASSWORD=parade", + "POSTGRES_DB=parade_db", + }) + if err != nil { + log.Fatalf("could not start postgresql: %s", err) + } + + // set cleanup + closer := func() { + err := pool.Purge(resource) + if err != nil { + log.Fatalf("could not kill postgres container") + } + } + + // expire, just to make sure + err = resource.Expire(dbContainerTimeoutSeconds) + if err != nil { + log.Fatalf("could not expire postgres container") + } + + // create connection + var conn *sqlx.DB + uri := fmt.Sprintf("postgres://parade:parade@localhost:%s/"+dbName+"?sslmode=disable", resource.GetPort("5432/tcp")) + err = pool.Retry(func() error { + var err error + conn, err = sqlx.Connect("pgx", uri) + if err != nil { + return err + } + return conn.Ping() + }) + if err != nil { + log.Fatalf("could not connect to postgres: %s", err) + } + + // Run the DDL + if _, err = sqlx.LoadFile(conn, "./ddl.sql"); err != nil { + log.Fatalf("exec command file ./ddl.sql: %s", err) + } + + _ = conn.Close() + + // return DB URI + return uri, closer +} + +func TestMain(m *testing.M) { + var err error + pool, err = dockertest.NewPool("") + if err != nil { + log.Fatalf("could not connect to Docker: %s", err) + } + databaseURI, dbCleanup := runDBInstance(pool) + defer dbCleanup() // In case we don't reach the cleanup action. + db = sqlx.MustConnect("pgx", databaseURI) + code := m.Run() + if _, ok := os.LookupEnv("GOTEST_KEEP_DB"); !ok { + dbCleanup() // os.Exit() below won't call the defered cleanup, do it now. + } + os.Exit(code) +} + +// wrapper derives a prefix from t.Name and uses it to provide namespaced access to DB db as +// well as a simple error-reporting inserter. +type wrapper struct { + t *testing.T + db *sqlx.DB +} + +func (w wrapper) prefix(s string) string { + return fmt.Sprintf("%s.%s", w.t.Name(), s) +} + +func (w wrapper) strip(s string) string { + return strings.TrimPrefix(s, w.t.Name()+".") +} + +func (w wrapper) prefixTask(id ddl.TaskId) ddl.TaskId { + return ddl.TaskId(w.prefix(string(id))) +} + +func (w wrapper) stripTask(id ddl.TaskId) ddl.TaskId { + return ddl.TaskId(w.strip(string(id))) +} + +func (w wrapper) prefixActor(actor ddl.ActorId) ddl.ActorId { + return ddl.ActorId(w.prefix(string(actor))) +} + +func (w wrapper) stripActor(actor ddl.TaskId) ddl.ActorId { + return ddl.ActorId(w.strip(string(actor))) +} + +func (w wrapper) insertTasks(tasks []ddl.TaskData) { + w.t.Helper() + const insertSql = `INSERT INTO tasks + (id, action, body, status, status_code, num_tries, max_tries, actor_id, + action_deadline, performance_token, finish_channel) + VALUES(:task_id, :action, :body, :status, :status_code, :num_tries, :max_tries, :actor_id, + :action_deadline, :performance_token, :finish_channel)` + for _, task := range tasks { + copy := task + copy.Id = w.prefixTask(copy.Id) + copy.Action = w.prefix(copy.Action) + copy.ActorId = w.prefixActor(copy.ActorId) + if copy.StatusCode == "" { + copy.StatusCode = "pending" + } + _, err := w.db.NamedExec(insertSql, copy) + if err != nil { + w.t.Fatalf("insert %+v into tasks: %s", tasks, err) + } + } +} + +func (w wrapper) returnTask(taskId ddl.TaskId, token ddl.PerformanceToken, resultStatus string, resultStatusCode ddl.TaskStatusCodeValue) error { + return ddl.ReturnTask(w.db, w.prefixTask(taskId), token, resultStatus, resultStatusCode) +} + +func (w wrapper) ownTasks(actorId ddl.ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]ddl.OwnedTaskData, error) { + prefixedActions := make([]string, len(actions)) + for i, action := range actions { + prefixedActions[i] = w.prefix(action) + } + tasks, err := ddl.OwnTasks(w.db, actorId, maxTasks, prefixedActions, maxDuration) + if tasks != nil { + for i := 0; i < len(tasks); i++ { + task := &tasks[i] + task.Id = w.stripTask(task.Id) + } + } + return tasks, err +} + +func TestOwn(t *testing.T) { + w := wrapper{t, db} + + w.insertTasks([]ddl.TaskData{ + {Id: "000", Action: "never"}, + {Id: "111", Action: "frob"}, + {Id: "123", Action: "broz"}, + {Id: "222", Action: "broz"}, + }) + tasks, err := w.ownTasks(ddl.ActorId("tester"), 2, []string{"frob", "broz"}, nil) + if err != nil { + t.Errorf("first own_tasks query: %s", err) + } + if len(tasks) != 2 { + t.Errorf("expected first OwnTasks to return 2 tasks but got %d: %+v", len(tasks), tasks) + } + gotTasks := tasks + + tasks, err = w.ownTasks(ddl.ActorId("tester-two"), 2, []string{"frob", "broz"}, nil) + if err != nil { + t.Errorf("second own_tasks query: %s", err) + } + if len(tasks) != 1 { + t.Errorf("expected second OwnTasks to return 1 task but got %d: %+v", len(tasks), tasks) + } + gotTasks = append(gotTasks, tasks...) + + gotIds := make([]ddl.TaskId, 0, len(gotTasks)) + for _, got := range gotTasks { + gotIds = append(gotIds, got.Id) + } + sort.Sort(taskIdSlice(gotIds)) + if diffs := deep.Equal([]ddl.TaskId{"111", "123", "222"}, gotIds); diffs != nil { + t.Errorf("expected other task IDs: %s", diffs) + } +} + +func TestOwnAfterDeadlineElapsed(t *testing.T) { + second := 1 * time.Second + w := wrapper{t, db} + + w.insertTasks([]ddl.TaskData{ + {Id: "111", Action: "frob"}, + }) + _, err := w.ownTasks(ddl.ActorId("tortoise"), 1, []string{"frob"}, &second) + if err != nil { + t.Fatalf("failed to setup tortoise task ownership: %s", err) + } + + fastTasks, err := w.ownTasks(ddl.ActorId("hare"), 1, []string{"frob"}, &second) + if err != nil { + t.Fatalf("failed to request fast task ownership: %s", err) + } + if len(fastTasks) != 0 { + t.Errorf("expected immedidate hare task ownership to return nothing but got %+v", fastTasks) + } + + time.Sleep(2 * time.Second) + fastTasks, err = w.ownTasks(ddl.ActorId("hare"), 1, []string{"frob"}, &second) + if err != nil { + t.Fatalf("failed to request fast task ownership after sleeping: %s", err) + } + if len(fastTasks) != 1 || fastTasks[0].Id != "111" { + t.Errorf("expected eventual hare task ownership to return task \"111\" but got tasks %+v", fastTasks) + } +} + +func TestReturnTask_DirectlyAndRetry(t *testing.T) { + w := wrapper{t, db} + + w.insertTasks([]ddl.TaskData{ + {Id: "111", Action: "frob"}, + {Id: "123", Action: "broz"}, + {Id: "222", Action: "broz"}, + }) + + tasks, err := w.ownTasks(ddl.ActorId("foo"), 4, []string{"frob", "broz"}, nil) + if err != nil { + t.Fatalf("acquire all tasks: %s", err) + } + + taskById := make(map[ddl.TaskId]*ddl.OwnedTaskData, len(tasks)) + for index := range tasks { + taskById[tasks[index].Id] = &tasks[index] + } + + if err = w.returnTask(taskById[ddl.TaskId("111")].Id, taskById[ddl.TaskId("111")].Token, "done", ddl.TASK_COMPLETED); err != nil { + t.Errorf("return task 111: %s", err) + } + + if err = w.returnTask(taskById[ddl.TaskId("111")].Id, taskById[ddl.TaskId("111")].Token, "done", ddl.TASK_COMPLETED); !errors.Is(err, ddl.InvalidTokenError) { + t.Errorf("expected second attempt to return task 111 to fail with InvalidTokenError, got %s", err) + } + + // Now attempt to return a task to in-progress state. + if err = w.returnTask(taskById[ddl.TaskId("123")].Id, taskById[ddl.TaskId("123")].Token, "try-again", ddl.TASK_PENDING); err != nil { + t.Errorf("return task 123 (%+v) for another round: %s", taskById[ddl.TaskId("123")], err) + } + moreTasks, err := w.ownTasks(ddl.ActorId("foo"), 4, []string{"frob", "broz"}, nil) + if err != nil { + t.Fatalf("re-acquire task 123: %s", err) + } + if len(moreTasks) != 1 || moreTasks[0].Id != ddl.TaskId("123") { + t.Errorf("expected to receive only task 123 but got tasks %+v", moreTasks) + } +} + +func TestReturnTask_RetryMulti(t *testing.T) { + w := wrapper{t, db} + + maxTries := 7 + lifetime := 250 * time.Millisecond + + w.insertTasks([]ddl.TaskData{ + {Id: "111", Action: "frob", MaxTries: &maxTries}, + }) + + for i := 0; i < maxTries; i++ { + tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, &lifetime) + if err != nil { + t.Errorf("acquire task after %d/%d tries: %s", i, maxTries, err) + } + if len(tasks) != 1 { + t.Fatalf("expected to own single task after %d/%d tries but got %+v", i, maxTries, tasks) + } + if i%2 == 0 { + time.Sleep(2 * lifetime) + } else { + if err = w.returnTask(tasks[0].Id, tasks[0].Token, "retry", ddl.TASK_PENDING); err != nil { + t.Fatalf("return task %+v after %d/%d tries: %s", tasks[0], i, maxTries, err) + } + } + } + + tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, &lifetime) + if err != nil { + t.Fatalf("re-acquire task failed: %s", err) + } + if len(tasks) != 0 { + t.Errorf("expected not to receive any tasks (maxRetries) but got tasks %+v", tasks) + } +} diff --git a/go.mod b/go.mod index 4b41e7fc04d..b3ad898f4f1 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ require ( github.com/jackc/pgconn v1.6.4 github.com/jackc/pgerrcode v0.0.0-20190803225404-afa3381909a6 github.com/jackc/pgproto3/v2 v2.0.4 // indirect + github.com/jackc/pgtype v1.4.2 github.com/jackc/pgx/v4 v4.8.1 github.com/jamiealquiza/tachymeter v2.0.0+incompatible github.com/jedib0t/go-pretty v4.3.0+incompatible From e0a4ec59b93ed35aee679de5f5668485975006be Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Mon, 7 Sep 2020 10:17:58 +0300 Subject: [PATCH 003/158] Return body from own_tasks; test notifications and dependencies --- ddl/ddl.go | 37 ++++++++++++ ddl/ddl.sql | 4 +- ddl/ddl_test.go | 148 +++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 185 insertions(+), 4 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index c5dfb85fe00..9c77920cd89 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -1,6 +1,7 @@ package ddl import ( + "context" "database/sql/driver" "errors" "fmt" @@ -8,6 +9,7 @@ import ( "time" "github.com/jackc/pgtype" + "github.com/jackc/pgx/v4" "github.com/jmoiron/sqlx" ) @@ -63,6 +65,8 @@ const ( TASK_ABORTED TaskStatusCodeValue = "aborted" // TASK_COMPLETED indicates an actor has completed this task with message, will not reissued TASK_COMPLETED TaskStatusCodeValue = "completed" + // TASK_INVALID is used by the API to report errors + TASK_INVALID TaskStatusCodeValue = "[invalid]" ) // TaskData is a row in table "tasks". It describes a task to perform. @@ -92,6 +96,7 @@ type TaskDependencyData struct { type OwnedTaskData struct { Id TaskId `db:"task_id"` Token PerformanceToken `db:"token"` + Body *string } // OwnTasks owns for actor and returns up to maxTasks tasks for performing any of actions. @@ -138,3 +143,35 @@ func ReturnTask(conn *sqlx.DB, taskId TaskId, token PerformanceToken, resultStat return nil } + +// WaitForTask blocks until taskId ends, and returns its result status and status code. It +// needs a pgx.Conn -- *not* a sqlx.Conn -- because it depends on PostgreSQL specific features. +func WaitForTask(ctx context.Context, conn *pgx.Conn, taskId TaskId) (resultStatus string, resultStatusCode TaskStatusCodeValue, err error) { + row := conn.QueryRow(ctx, `SELECT finish_channel, status_code FROM tasks WHERE id=$1`, taskId) + var ( + finishChannel string + statusCode TaskStatusCodeValue + status string + ) + if err = row.Scan(&finishChannel, &statusCode); err != nil { + return "", TASK_INVALID, fmt.Errorf("check task %s to listen: %w", taskId, err) + } + if statusCode != TASK_IN_PROGRESS && statusCode != TASK_PENDING { + return "", statusCode, fmt.Errorf("task %s already in status %s", taskId, statusCode) + } + + if _, err = conn.Exec(ctx, "LISTEN "+pgx.Identifier{finishChannel}.Sanitize()); err != nil { + return "", TASK_INVALID, fmt.Errorf("listen for %s: %w", finishChannel, err) + } + + _, err = conn.WaitForNotification(ctx) + if err != nil { + return "", TASK_INVALID, fmt.Errorf("wait for notification %s: %w", finishChannel, err) + } + + row = conn.QueryRow(ctx, `SELECT status, status_code FROM tasks WHERE id=$1`, taskId) + status = "" + statusCode = TASK_INVALID + err = row.Scan(&status, &statusCode) + return status, statusCode, err +} diff --git a/ddl/ddl.sql b/ddl/ddl.sql index 7cd8b3d99cd..c72ad2636cd 100644 --- a/ddl/ddl.sql +++ b/ddl/ddl.sql @@ -54,7 +54,7 @@ $$; CREATE OR REPLACE FUNCTION own_tasks( max_tasks INTEGER, actions VARCHAR(128) ARRAY, owner_id VARCHAR(64), max_duration INTERVAL ) -RETURNS TABLE(task_id VARCHAR(64), token UUID) +RETURNS TABLE(task_id VARCHAR(64), token UUID, body jsonb) LANGUAGE sql VOLATILE AS $$ UPDATE tasks SET actor_id = owner_id, @@ -72,7 +72,7 @@ LANGUAGE sql VOLATILE AS $$ -- maybe: ORDER BY priority (eventually) FOR UPDATE SKIP LOCKED LIMIT max_tasks) - RETURNING id, performance_token + RETURNING id, performance_token, body $$; -- Returns an owned task id that was locked with token. It is an error diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 874e7c1db71..81f3d79e186 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -1,16 +1,19 @@ package ddl_test import ( + "context" "errors" "fmt" "log" "os" "sort" + "strconv" "strings" "testing" "time" "github.com/go-test/deep" + "github.com/jackc/pgx/v4" _ "github.com/jackc/pgx/v4/stdlib" "github.com/treeverse/parade/ddl" @@ -94,7 +97,9 @@ func TestMain(m *testing.M) { if err != nil { log.Fatalf("could not connect to Docker: %s", err) } - databaseURI, dbCleanup := runDBInstance(pool) + var dbCleanup func() + databaseURI, dbCleanup = runDBInstance(pool) + fmt.Println("[DEBUG] connectionURI", databaseURI) defer dbCleanup() // In case we don't reach the cleanup action. db = sqlx.MustConnect("pgx", databaseURI) code := m.Run() @@ -152,7 +157,21 @@ func (w wrapper) insertTasks(tasks []ddl.TaskData) { } _, err := w.db.NamedExec(insertSql, copy) if err != nil { - w.t.Fatalf("insert %+v into tasks: %s", tasks, err) + w.t.Fatalf("insert %+v into tasks: %s", copy, err) + } + } +} + +func (w wrapper) insertTaskDeps(deps []ddl.TaskDependencyData) { + w.t.Helper() + const insertSql = `INSERT INTO task_dependencies (after, run) VALUES(:after, :run)` + for _, dep := range deps { + copy := dep + copy.After = w.prefixTask(copy.After) + copy.Run = w.prefixTask(copy.Run) + _, err := w.db.NamedExec(insertSql, copy) + if err != nil { + w.t.Fatalf("insert %+v into task_dependencies: %s", copy, err) } } } @@ -318,3 +337,128 @@ func TestReturnTask_RetryMulti(t *testing.T) { t.Errorf("expected not to receive any tasks (maxRetries) but got tasks %+v", tasks) } } + +func TestDependencies(t *testing.T) { + w := wrapper{t, db} + + id := func(n int) ddl.TaskId { + return ddl.TaskId(fmt.Sprintf("number-%d", n)) + } + makeBody := func(n int) *string { + ret := fmt.Sprintf("%d", n) + return &ret + } + parseBody := func(s string) int { + ret, err := strconv.ParseInt(s, 10, 0) + if err != nil { + t.Fatalf("parse body %s: %s", s, err) + } + return int(ret) + } + + // Tasks: take a limit number. Create a task for each number from 1 to this limit + // number. Now add dependencies: each task can only be executed after all its divisors' + // tasks have been executed. This provides an interesting graph of dependencies that is + // also easy to check. + const num = 63 + taskData := make([]ddl.TaskData, 0, num) + for i := 1; i <= num; i++ { + taskData = append(taskData, ddl.TaskData{Id: id(i), Action: "div", Body: makeBody(i)}) + } + w.insertTasks(taskData) + deps := make([]ddl.TaskDependencyData, 0, num*20) + for i := 1; i <= num; i++ { + for j := 2 * i; j <= num; j++ { + deps = append(deps, ddl.TaskDependencyData{After: taskData[i-1].Id, Run: taskData[j-1].Id}) + } + } + w.insertTaskDeps(deps) + + doneSet := make(map[int]struct{}, num) + + for { + tasks, err := w.ownTasks(ddl.ActorId("foo"), 17, []string{"div"}, nil) + if err != nil { + t.Fatalf("acquire tasks with done %+v: %s", doneSet, err) + } + if len(tasks) == 0 { + break + } + for _, task := range tasks { + n := parseBody(*task.Body) + doneSet[n] = struct{}{} + for d := 1; d <= n/2; d++ { + if n%d == 0 { + if _, ok := doneSet[d]; !ok { + t.Errorf("retrieved task %+v before task for its divisor %d", task, d) + } + } + } + if err = w.returnTask(task.Id, task.Token, "divided", ddl.TASK_COMPLETED); err != nil { + t.Errorf("failed to complete task %+v: %s", task, err) + } + } + } + if len(doneSet) < num { + t.Errorf("finished before processing all numbers up to %d: got just %+v", num, doneSet) + } +} + +func TestNotification(t *testing.T) { + ctx := context.Background() + w := wrapper{t, db} + + type testCase struct { + title string + id ddl.TaskId + status string + statusCode ddl.TaskStatusCodeValue + } + + cases := []testCase{ + {"task aborted", ddl.TaskId("111"), "b0rked!", ddl.TASK_ABORTED}, + {"task succeeded", ddl.TaskId("222"), "yay!", ddl.TASK_COMPLETED}, + } + + for _, c := range cases { + t.Run(c.title, func(t *testing.T) { + w.insertTasks([]ddl.TaskData{ + {Id: c.id, Action: "frob"}, + }) + + tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, nil) + if err != nil { + t.Fatalf("acquire task: %s", err) + } + if len(tasks) != 1 { + t.Fatalf("expected to own single task but got %+v", tasks) + } + + conn, err := pgx.Connect(ctx, databaseURI) + if err != nil { + t.Fatalf("pgx.Connect: %s", err) + } + + type result struct { + status string + statusCode ddl.TaskStatusCodeValue + err error + } + ch := make(chan result) + go func() { + status, statusCode, err := ddl.WaitForTask(ctx, conn, ddl.TaskId("111")) + ch <- result{status, statusCode, err} + }() + + if err = w.returnTask(tasks[0].Id, tasks[0].Token, c.status, c.statusCode); err != nil { + t.Fatalf("return task %+v: %s", tasks[0], err) + } + + got := <-ch + expected := result{c.status, c.statusCode, nil} + if diffs := deep.Equal(expected, got); diffs != nil { + t.Errorf("WaitForTask returned unexpected values: %s", diffs) + } + }) + } +} From 49c7b058b79eb980ad3c8f60b703e5a79c2f44ab Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Mon, 7 Sep 2020 10:26:03 +0300 Subject: [PATCH 004/158] Test body return in ownTasks --- ddl/ddl_test.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 81f3d79e186..5f9ef3f6b8a 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -232,6 +232,36 @@ func TestOwn(t *testing.T) { } } +func TestOwnBody(t *testing.T) { + w := wrapper{t, db} + + val := "\"the quick brown fox jumps over the lazy dog\"" + + w.insertTasks([]ddl.TaskData{ + {Id: "body", Action: "yes", Body: &val}, + {Id: "nobody", Action: "no"}, + }) + + tasks, err := w.ownTasks(ddl.ActorId("somebody"), 2, []string{"yes", "no"}, nil) + if err != nil { + t.Fatalf("own tasks: %s", err) + } + if len(tasks) != 2 { + t.Fatalf("expected to own 2 tasks but got %+v", tasks) + } + body, nobody := tasks[0], tasks[1] + if body.Id != "body" { + body, nobody = nobody, body + } + + if nobody.Body != nil { + t.Errorf("unexpected body in task %+v", nobody) + } + if body.Body == nil || *body.Body != val { + t.Errorf("expected body \"%s\" in task %+v", val, body) + } +} + func TestOwnAfterDeadlineElapsed(t *testing.T) { second := 1 * time.Second w := wrapper{t, db} From 047ff0df209dc10d3b99e755fcbaad0bdd8acfa3 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Mon, 7 Sep 2020 16:21:37 +0300 Subject: [PATCH 005/158] Add fan-in benchmark Achieves ~300s-400s (varying) for 50K fan-in. --- ddl/ddl.sql | 3 +- ddl/ddl_test.go | 96 +++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 96 insertions(+), 3 deletions(-) diff --git a/ddl/ddl.sql b/ddl/ddl.sql index c72ad2636cd..41f0ccffba0 100644 --- a/ddl/ddl.sql +++ b/ddl/ddl.sql @@ -38,6 +38,7 @@ CREATE TABLE IF NOT EXISTS task_dependencies ( CREATE INDEX IF NOT EXISTS task_dependencies_after ON task_dependencies(after); CREATE INDEX IF NOT EXISTS task_dependencies_run ON task_dependencies(run); +ALTER TABLE task_dependencies ALTER run SET STATISTICS 1000; -- Returns true if task with this id, code and deadline can -- be allocated. @@ -70,7 +71,7 @@ LANGUAGE sql VOLATILE AS $$ (max_tries IS NULL OR num_tries < max_tries) -- maybe: AND not_before <= NOW() -- maybe: ORDER BY priority (eventually) - FOR UPDATE SKIP LOCKED + FOR NO KEY UPDATE SKIP LOCKED LIMIT max_tasks) RETURNING id, performance_token, body $$; diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 5f9ef3f6b8a..a4730e3c25a 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "log" + "math/rand" "os" "sort" "strconv" @@ -99,7 +100,6 @@ func TestMain(m *testing.M) { } var dbCleanup func() databaseURI, dbCleanup = runDBInstance(pool) - fmt.Println("[DEBUG] connectionURI", databaseURI) defer dbCleanup() // In case we don't reach the cleanup action. db = sqlx.MustConnect("pgx", databaseURI) code := m.Run() @@ -112,7 +112,7 @@ func TestMain(m *testing.M) { // wrapper derives a prefix from t.Name and uses it to provide namespaced access to DB db as // well as a simple error-reporting inserter. type wrapper struct { - t *testing.T + t testing.TB db *sqlx.DB } @@ -492,3 +492,95 @@ func TestNotification(t *testing.T) { }) } } + +func BenchmarkFanIn(b *testing.B) { + const ( + parallelism = 5 + bulk = 1000 + ) + + numTasks := b.N * 50000 + + w := wrapper{b, db} + + id := func(n int) ddl.TaskId { + return ddl.TaskId(fmt.Sprintf("in:%08d", n)) + } + + tasks := make([]ddl.TaskData, 0, numTasks+1) + for i := 0; i < numTasks; i++ { + tasks = append(tasks, ddl.TaskData{Id: id(i), Action: "part"}) + } + tasks = append(tasks, ddl.TaskData{Id: "done", Action: "done"}) + w.insertTasks(tasks) + + deps := make([]ddl.TaskDependencyData, 0, numTasks) + for i := 0; i < numTasks; i++ { + deps = append(deps, ddl.TaskDependencyData{After: id(i), Run: "done"}) + } + w.insertTaskDeps(deps) + + _, err := db.Exec("ANALYZE task_dependencies") + if err != nil { + b.Fatalf("ANALYZE task_dependencies: %s", err) + } + + type result struct { + count int + receivedDone bool + err error + } + resultCh := make([]chan result, parallelism) + for i := 0; i < parallelism; i++ { + resultCh[i] = make(chan result) + } + + b.ResetTimer() + for i := 0; i < parallelism; i++ { + go func(i int) { + count := 0 + receivedDone := false + for { + size := int(bulk + rand.Int31n(bulk/10) - bulk/10) + tasks, err := w.ownTasks(ddl.ActorId(fmt.Sprintf("worker-%d", i)), size, []string{"part", "done"}, nil) + if err != nil { + resultCh[i] <- result{err: err} + return + } + if len(tasks) == 0 { + break + } + for _, task := range tasks { + if task.Id != "done" { + count++ + } else { + receivedDone = true + } + w.returnTask(task.Id, task.Token, "ok", ddl.TASK_COMPLETED) + } + } + resultCh[i] <- result{count: count, receivedDone: receivedDone} + }(i) + } + + var total, numDone int + for i := 0; i < parallelism; i++ { + r := <-resultCh[i] + if r.err != nil { + b.Errorf("goroutine %d failed: %s", i, r.err) + } + total += r.count + if r.receivedDone { + numDone++ + } + } + b.StopTimer() + if total != numTasks { + b.Errorf("expected %d tasks but processed %d", numTasks, total) + } + if numDone != 1 { + b.Errorf("expected single goroutine to process \"done\" but got %d", numDone) + } + b.ReportMetric(float64(numTasks), "num_tasks") + b.ReportMetric(float64(parallelism), "num_goroutines") +} From 11d24a2f8b0dd7eb3a210d42f81bb7ee72eba003 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Tue, 8 Sep 2020 11:32:42 +0300 Subject: [PATCH 006/158] Replace task_dependencies with just a task count *Much* faster: 50-80 seconds for a 50_000 task test with 5 concurrent connections. If scaling persists then we can do 1_000_000 tasks in 1000-1600 seconds, i.e. <30 minutes. This is a fairly major change. It makes debugging tasks that never got started a bit harder (there is still a to_signal column, but it is hard to determine which tasks must *precede* a given task). --- ddl/ddl.go | 72 +++++++++++++++++++-- ddl/ddl.sql | 43 +++++++------ ddl/ddl_test.go | 167 +++++++++++++++++++++++++++++++++++------------- 3 files changed, 213 insertions(+), 69 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 9c77920cd89..65474e93034 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -79,17 +79,79 @@ type TaskData struct { StatusCode string `db:"status_code"` NumTries int `db:"num_tries"` MaxTries *int `db:"max_tries"` + TotalDependencies *int `db:"total_dependencies"` + ToSignal []TaskId `db:"to_signal"` ActorId ActorId `db:"actor_id"` ActionDeadline *time.Time `db:"action_deadline"` PerformanceToken *PerformanceToken `db:"performance_token"` FinishChannelName *string `db:"finish_channel"` } -// TaskDependencyData is a row in table "task_dependencies". It describes that task Run must -// occur after task After succeeds. -type TaskDependencyData struct { - After TaskId - Run TaskId +// TaskDataIterator implements the pgx.CopyFromSource interface and allows using CopyFrom to insert +// multiple TaskData rapidly. +type TaskDataIterator struct { + Data []TaskData + next int + err error +} + +func (td *TaskDataIterator) Next() bool { + if td.next > len(td.Data) { + td.err = NoMoreDataError + return false + } + ret := td.next < len(td.Data) + td.next++ + return ret +} + +var NoMoreDataError = errors.New("no more data") + +func (td *TaskDataIterator) Err() error { + return td.err +} + +func (td *TaskDataIterator) Values() ([]interface{}, error) { + if td.next > len(td.Data) { + td.err = NoMoreDataError + return nil, td.err + } + value := td.Data[td.next-1] + // Convert ToSignal to a text array so pgx can convert it to text. Needed because Go + // types + toSignal := make([]string, len(value.ToSignal)) + for i := 0; i < len(value.ToSignal); i++ { + toSignal[i] = string(value.ToSignal[i]) + } + return []interface{}{ + value.Id, + value.Action, + value.Body, + value.Status, + value.StatusCode, + value.NumTries, + value.MaxTries, + value.TotalDependencies, + toSignal, + value.ActorId, + value.ActionDeadline, + value.PerformanceToken, + value.FinishChannelName, + }, nil +} + +var TaskDataColumnNames = []string{ + "id", "action", "body", "status", "status_code", "num_tries", "max_tries", + "total_dependencies", "to_signal", "actor_id", "action_deadline", "performance_token", + "finish_channel", +} + +var tasksTable = pgx.Identifier{"tasks"} + +// InsertTasks adds multiple tasks efficiently. +func InsertTasks(ctx context.Context, pgConn *pgx.Conn, source *TaskDataIterator) error { + _, err := pgConn.CopyFrom(ctx, tasksTable, TaskDataColumnNames, source) + return err } // OwnedTaskData is a row returned from "SELECT * FROM own_tasks(...)". diff --git a/ddl/ddl.sql b/ddl/ddl.sql index 41f0ccffba0..2e040f1b37f 100644 --- a/ddl/ddl.sql +++ b/ddl/ddl.sql @@ -15,12 +15,21 @@ CREATE TABLE IF NOT EXISTS tasks ( id VARCHAR(64) NOT NULL PRIMARY KEY, -- nanoid action VARCHAR(128) NOT NULL, -- name (type) of action to perform - body JSONB, -- data used by action + body TEXT, -- data used by action status TEXT, -- status text defined by action, visible to action status_code TASK_STATUS_CODE_VALUE NOT NULL DEFAULT 'pending', -- internal status code, used by parade to issue tasks num_tries INTEGER NOT NULL DEFAULT 0, max_tries INTEGER, + + total_dependencies INTEGER, -- number of tasks that must signal this task + num_signals INTEGER NOT NULL DEFAULT 0, -- number of tasks that have already signalled this task + + -- BUG(ariels): add REFERENCES dependency to each of the to_signal + -- tasks. Or at least add triggers that perform ON DELETE + -- CASCADE. + to_signal VARCHAR(64) ARRAY, -- IDs to signal after performing this task + actor_id VARCHAR(64), -- ID of performing actor if in-progress action_deadline TIMESTAMPTZ, -- offer this task to other actors once action_deadline has elapsed performance_token UUID, @@ -29,24 +38,13 @@ CREATE TABLE IF NOT EXISTS tasks ( -- task, and accept task completion only when lock token is unchanged. ); -CREATE TABLE IF NOT EXISTS task_dependencies ( - after VARCHAR(64) REFERENCES tasks(id) -- after this task ID is done - ON DELETE CASCADE ON UPDATE CASCADE, - run VARCHAR(64) REFERENCES tasks(id) -- run this task ID - ON DELETE CASCADE ON UPDATE CASCADE -); - -CREATE INDEX IF NOT EXISTS task_dependencies_after ON task_dependencies(after); -CREATE INDEX IF NOT EXISTS task_dependencies_run ON task_dependencies(run); -ALTER TABLE task_dependencies ALTER run SET STATISTICS 1000; - -- Returns true if task with this id, code and deadline can -- be allocated. -CREATE OR REPLACE FUNCTION can_allocate_task(id VARCHAR(64), code TASK_STATUS_CODE_VALUE, deadline TIMESTAMPTZ) +CREATE OR REPLACE FUNCTION can_allocate_task(id VARCHAR(64), code TASK_STATUS_CODE_VALUE, deadline TIMESTAMPTZ, num_signals INTEGER, total_dependencies INTEGER) RETURNS BOOLEAN LANGUAGE sql IMMUTABLE AS $$ SELECT (code = 'pending' OR (code = 'in-progress' AND deadline < NOW())) AND - id NOT IN (SELECT DISTINCT run AS id FROM task_dependencies) + (total_dependencies IS NULL OR num_signals = total_dependencies) $$; -- Marks up to `max_tasks' on one of `actions' as in-progress and @@ -55,7 +53,7 @@ $$; CREATE OR REPLACE FUNCTION own_tasks( max_tasks INTEGER, actions VARCHAR(128) ARRAY, owner_id VARCHAR(64), max_duration INTERVAL ) -RETURNS TABLE(task_id VARCHAR(64), token UUID, body jsonb) +RETURNS TABLE(task_id VARCHAR(64), token UUID, body TEXT) LANGUAGE sql VOLATILE AS $$ UPDATE tasks SET actor_id = owner_id, @@ -66,8 +64,8 @@ LANGUAGE sql VOLATILE AS $$ WHERE id IN ( SELECT id FROM tasks - WHERE can_allocate_task(id, status_code, action_deadline) AND - ARRAY[action] <@ actions AND + WHERE can_allocate_task(id, status_code, action_deadline, num_signals, total_dependencies) AND + action = ANY(actions) AND (max_tries IS NULL OR num_tries < max_tries) -- maybe: AND not_before <= NOW() -- maybe: ORDER BY priority (eventually) @@ -86,23 +84,26 @@ LANGUAGE plpgsql AS $$ DECLARE num_updated INTEGER; channel VARCHAR(64); + tasks_to_signal VARCHAR(64) ARRAY; BEGIN - UPDATE tasks INTO channel + UPDATE tasks INTO channel, tasks_to_signal SET status = result_status, status_code = result_status_code, actor_id = NULL, performance_token = NULL WHERE id = task_id AND performance_token = token - RETURNING finish_channel; + RETURNING finish_channel, to_signal; GET DIAGNOSTICS num_updated := ROW_COUNT; - DELETE FROM task_dependencies WHERE after=task_id; - IF channel IS NOT NULL THEN SELECT pg_notify(channel, NULL); END IF; + UPDATE tasks + SET num_signals = num_signals+1 + WHERE id = ANY(tasks_to_signal); + RETURN num_updated; END; $$; diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index a4730e3c25a..a44f9294b2a 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -142,37 +142,31 @@ func (w wrapper) stripActor(actor ddl.TaskId) ddl.ActorId { func (w wrapper) insertTasks(tasks []ddl.TaskData) { w.t.Helper() - const insertSql = `INSERT INTO tasks - (id, action, body, status, status_code, num_tries, max_tries, actor_id, - action_deadline, performance_token, finish_channel) - VALUES(:task_id, :action, :body, :status, :status_code, :num_tries, :max_tries, :actor_id, - :action_deadline, :performance_token, :finish_channel)` - for _, task := range tasks { - copy := task + ctx := context.Background() + conn, err := pgx.Connect(ctx, databaseURI) + if err != nil { + w.t.Fatalf("pgx.Connect: %s", err) + } + + prefixedTasks := make([]ddl.TaskData, len(tasks)) + for i := 0; i < len(tasks); i++ { + copy := tasks[i] copy.Id = w.prefixTask(copy.Id) copy.Action = w.prefix(copy.Action) copy.ActorId = w.prefixActor(copy.ActorId) if copy.StatusCode == "" { copy.StatusCode = "pending" } - _, err := w.db.NamedExec(insertSql, copy) - if err != nil { - w.t.Fatalf("insert %+v into tasks: %s", copy, err) + toSignal := make([]ddl.TaskId, len(copy.ToSignal)) + for j := 0; j < len(toSignal); j++ { + toSignal[j] = w.prefixTask(copy.ToSignal[j]) } + copy.ToSignal = toSignal + prefixedTasks[i] = copy } -} - -func (w wrapper) insertTaskDeps(deps []ddl.TaskDependencyData) { - w.t.Helper() - const insertSql = `INSERT INTO task_dependencies (after, run) VALUES(:after, :run)` - for _, dep := range deps { - copy := dep - copy.After = w.prefixTask(copy.After) - copy.Run = w.prefixTask(copy.Run) - _, err := w.db.NamedExec(insertSql, copy) - if err != nil { - w.t.Fatalf("insert %+v into task_dependencies: %s", copy, err) - } + err = ddl.InsertTasks(ctx, conn, &ddl.TaskDataIterator{Data: prefixedTasks}) + if err != nil { + w.t.Fatalf("InsertTasks: %s", err) } } @@ -195,6 +189,93 @@ func (w wrapper) ownTasks(actorId ddl.ActorId, maxTasks int, actions []string, m return tasks, err } +func stringAddr(s string) *string { + return &s +} + +func performanceTokenAddr(p ddl.PerformanceToken) *ddl.PerformanceToken { + return &p +} + +func intAddr(i int) *int { + return &i +} + +func TestTaskDataIterator_Empty(t *testing.T) { + it := ddl.TaskDataIterator{Data: []ddl.TaskData{}} + if it.Err() != nil { + t.Errorf("expected empty new iterator to have no errors, got %s", it.Err()) + } + if it.Next() { + t.Errorf("expected empty new iterator %+v not to advance", it) + } + if it.Err() != nil { + t.Errorf("advanced empty new iterator to have no errors, got %s", it.Err()) + } + if it.Next() { + t.Errorf("expected advanced empty new iterator %+v not to advance", it) + } + if !errors.Is(it.Err(), ddl.NoMoreDataError) { + t.Errorf("expected twice-advanced iterator to raise NoMoreDataError, got %s", it.Err()) + } +} + +func TestTaskDataIterator_Values(t *testing.T) { + now := time.Now() + tasks := []ddl.TaskData{ + {Id: "000", Action: "zero", StatusCode: "enum values enforced on DB"}, + {Id: "111", Action: "frob", Body: stringAddr("1"), Status: stringAddr("state"), + StatusCode: "pending", + NumTries: 11, MaxTries: intAddr(17), + TotalDependencies: intAddr(9), + ToSignal: []ddl.TaskId{ddl.TaskId("foo"), ddl.TaskId("bar")}, + ActorId: ddl.ActorId("actor"), ActionDeadline: &now, + PerformanceToken: performanceTokenAddr(ddl.PerformanceToken{}), + FinishChannelName: stringAddr("done"), + }, + } + it := ddl.TaskDataIterator{Data: tasks} + + for index, task := range tasks { + if it.Err() != nil { + t.Errorf("expected iterator to be OK at index %d, got error %s", index, it.Err()) + } + if !it.Next() { + t.Errorf("expected to advance iterator %+v at index %d", it, index) + } + values, err := it.Values() + if err != nil { + t.Errorf("expected to values at index %d, got error %s", index, err) + } + toSignal := make([]string, len(task.ToSignal)) + for i := 0; i < len(task.ToSignal); i++ { + toSignal[i] = string(task.ToSignal[i]) + } + if diffs := deep.Equal( + []interface{}{ + task.Id, task.Action, task.Body, task.Status, task.StatusCode, + task.NumTries, task.MaxTries, + task.TotalDependencies, toSignal, + task.ActorId, task.ActionDeadline, + task.PerformanceToken, task.FinishChannelName, + }, values); diffs != nil { + t.Errorf("got other values at index %d than expected: %s", index, diffs) + } + } + if it.Next() { + t.Errorf("expected iterator %+v to end after tasks done", it) + } + if _, err := it.Values(); !errors.Is(err, ddl.NoMoreDataError) { + t.Errorf("expected NoMoreData after iterator done, got %s", err) + } + if !errors.Is(it.Err(), ddl.NoMoreDataError) { + t.Errorf("expected iterator Err() to repeat NoMoreDataError, got %s", it.Err()) + } + if it.Next() { + t.Errorf("expected iterator %+v to end-and-error after advanced past tasks done", it) + } +} + func TestOwn(t *testing.T) { w := wrapper{t, db} @@ -393,16 +474,26 @@ func TestDependencies(t *testing.T) { const num = 63 taskData := make([]ddl.TaskData, 0, num) for i := 1; i <= num; i++ { - taskData = append(taskData, ddl.TaskData{Id: id(i), Action: "div", Body: makeBody(i)}) - } - w.insertTasks(taskData) - deps := make([]ddl.TaskDependencyData, 0, num*20) - for i := 1; i <= num; i++ { - for j := 2 * i; j <= num; j++ { - deps = append(deps, ddl.TaskDependencyData{After: taskData[i-1].Id, Run: taskData[j-1].Id}) + toSignal := make([]ddl.TaskId, 0, i/20) + for j := 2 * i; j <= num; j += i { + toSignal = append(toSignal, id(j)) + } + numDivisors := 0 + for k := 1; k <= i/2; k++ { + if i%k == 0 { + numDivisors++ + } } + + taskData = append(taskData, ddl.TaskData{ + Id: id(i), + Action: "div", + Body: makeBody(i), + ToSignal: toSignal, + TotalDependencies: &numDivisors, + }) } - w.insertTaskDeps(deps) + w.insertTasks(taskData) doneSet := make(map[int]struct{}, num) @@ -508,23 +599,13 @@ func BenchmarkFanIn(b *testing.B) { } tasks := make([]ddl.TaskData, 0, numTasks+1) + toSignal := []ddl.TaskId{"done"} for i := 0; i < numTasks; i++ { - tasks = append(tasks, ddl.TaskData{Id: id(i), Action: "part"}) + tasks = append(tasks, ddl.TaskData{Id: id(i), Action: "part", ToSignal: toSignal}) } tasks = append(tasks, ddl.TaskData{Id: "done", Action: "done"}) w.insertTasks(tasks) - deps := make([]ddl.TaskDependencyData, 0, numTasks) - for i := 0; i < numTasks; i++ { - deps = append(deps, ddl.TaskDependencyData{After: id(i), Run: "done"}) - } - w.insertTaskDeps(deps) - - _, err := db.Exec("ANALYZE task_dependencies") - if err != nil { - b.Fatalf("ANALYZE task_dependencies: %s", err) - } - type result struct { count int receivedDone bool From 3bf419814209690d2c1910a53bff1db20466aefb Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Wed, 16 Sep 2020 09:53:36 +0300 Subject: [PATCH 007/158] Change all constants affecting test DB and benchmark size into flags --- ddl/ddl_test.go | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index a44f9294b2a..2f12ebe7f8a 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -3,6 +3,7 @@ package ddl_test import ( "context" "errors" + "flag" "fmt" "log" "math/rand" @@ -31,6 +32,11 @@ var ( pool *dockertest.Pool databaseURI string db *sqlx.DB + + postgresUrl = flag.String("postgres-url", "", "Postgres connection string. If unset, run a Postgres in a Docker container. If set, should have ddl.sql already loaded.") + parallelism = flag.Int("parallelism", 16, "Number of concurrent client worker goroutines.") + bulk = flag.Int("bulk", 2_000, "Number of tasks to acquire at once in each client goroutine.") + taskFactor = flag.Int("task-factor", 20_000, "Scale benchmark N by this many tasks") ) // taskIdSlice attaches the methods of sort.Interface to []TaskId. @@ -43,6 +49,10 @@ func (p taskIdSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // runDBInstance starts a test Postgres server inside container pool, and returns a connection // URI and a closer function. func runDBInstance(pool *dockertest.Pool) (string, func()) { + if *postgresUrl != "" { + return *postgresUrl, nil + } + resource, err := pool.Run("postgres", "11", []string{ "POSTGRES_USER=parade", "POSTGRES_PASSWORD=parade", @@ -585,12 +595,7 @@ func TestNotification(t *testing.T) { } func BenchmarkFanIn(b *testing.B) { - const ( - parallelism = 5 - bulk = 1000 - ) - - numTasks := b.N * 50000 + numTasks := b.N * *taskFactor w := wrapper{b, db} @@ -611,18 +616,19 @@ func BenchmarkFanIn(b *testing.B) { receivedDone bool err error } - resultCh := make([]chan result, parallelism) - for i := 0; i < parallelism; i++ { + resultCh := make([]chan result, *parallelism) + for i := 0; i < *parallelism; i++ { resultCh[i] = make(chan result) } + startTime := time.Now() // Cannot access Go benchmark timer, get it manually b.ResetTimer() - for i := 0; i < parallelism; i++ { + for i := 0; i < *parallelism; i++ { go func(i int) { count := 0 receivedDone := false for { - size := int(bulk + rand.Int31n(bulk/10) - bulk/10) + size := *bulk + int(rand.Int31n(int32(*bulk/10))) - *bulk/10 tasks, err := w.ownTasks(ddl.ActorId(fmt.Sprintf("worker-%d", i)), size, []string{"part", "done"}, nil) if err != nil { resultCh[i] <- result{err: err} @@ -645,7 +651,7 @@ func BenchmarkFanIn(b *testing.B) { } var total, numDone int - for i := 0; i < parallelism; i++ { + for i := 0; i < *parallelism; i++ { r := <-resultCh[i] if r.err != nil { b.Errorf("goroutine %d failed: %s", i, r.err) @@ -656,6 +662,7 @@ func BenchmarkFanIn(b *testing.B) { } } b.StopTimer() + duration := time.Since(startTime) if total != numTasks { b.Errorf("expected %d tasks but processed %d", numTasks, total) } @@ -663,5 +670,6 @@ func BenchmarkFanIn(b *testing.B) { b.Errorf("expected single goroutine to process \"done\" but got %d", numDone) } b.ReportMetric(float64(numTasks), "num_tasks") - b.ReportMetric(float64(parallelism), "num_goroutines") + b.ReportMetric(float64(*parallelism), "num_goroutines") + b.ReportMetric(float64(float64(numTasks)/float64(duration)*1e9), "tasks/sec") } From 103eece342d20ab39c759c68444a78ca15f87d74 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 17 Sep 2020 16:59:29 +0300 Subject: [PATCH 008/158] [bug] Correctly read postgres-url flag and close some object in test --- ddl/ddl_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 2f12ebe7f8a..fd8c99ab20f 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -104,6 +104,7 @@ func runDBInstance(pool *dockertest.Pool) (string, func()) { func TestMain(m *testing.M) { var err error + flag.Parse() pool, err = dockertest.NewPool("") if err != nil { log.Fatalf("could not connect to Docker: %s", err) @@ -112,8 +113,9 @@ func TestMain(m *testing.M) { databaseURI, dbCleanup = runDBInstance(pool) defer dbCleanup() // In case we don't reach the cleanup action. db = sqlx.MustConnect("pgx", databaseURI) + defer db.Close() code := m.Run() - if _, ok := os.LookupEnv("GOTEST_KEEP_DB"); !ok { + if _, ok := os.LookupEnv("GOTEST_KEEP_DB"); !ok && dbCleanup != nil { dbCleanup() // os.Exit() below won't call the defered cleanup, do it now. } os.Exit(code) @@ -157,6 +159,7 @@ func (w wrapper) insertTasks(tasks []ddl.TaskData) { if err != nil { w.t.Fatalf("pgx.Connect: %s", err) } + defer conn.Close(ctx) prefixedTasks := make([]ddl.TaskData, len(tasks)) for i := 0; i < len(tasks); i++ { From cbc16c79493f6667a8a17236452486bc2a61f07a Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 17 Sep 2020 17:03:13 +0300 Subject: [PATCH 009/158] Add DeleteTasks Most useful for cleanups rather than online processing -- which should use the task lifecycle. --- ddl/ddl.go | 26 ++++++++++++ ddl/ddl.sql | 52 +++++++++++++++++++++++- ddl/ddl_test.go | 103 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 180 insertions(+), 1 deletion(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 65474e93034..4aa267b4eb4 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -11,6 +11,7 @@ import ( "github.com/jackc/pgtype" "github.com/jackc/pgx/v4" "github.com/jmoiron/sqlx" + nanoid "github.com/matoous/go-nanoid" ) type TaskId string @@ -237,3 +238,28 @@ func WaitForTask(ctx context.Context, conn *pgx.Conn, taskId TaskId) (resultStat err = row.Scan(&status, &statusCode) return status, statusCode, err } + +// DeleteTasks deletes taskIds, removing dependencies and deleting (effectively recursively) any +// tasks that are left with no dependencies. The effect is easiest to analyze when all deleted +// tasks have been either completed or been aborted. +func DeleteTasks(tx *sqlx.Tx, taskIds []TaskId) error { + uniqueId, err := nanoid.Nanoid() + if err != nil { + return fmt.Errorf("generate random component for table name: %w", err) + } + tableName := fmt.Sprintf("delete_tasks_%s", uniqueId) + if _, err = tx.Exec(fmt.Sprintf(`CREATE TABLE "%s" (id VARCHAR(64), mark tasks_recurse_value NOT NULL)`, tableName)); err != nil { + return fmt.Errorf("create temp work table %s: %w", tableName, err) + } + insertStmt, err := tx.Prepare(fmt.Sprintf(`INSERT INTO "%s" VALUES($1, 'new')`, tableName)) + if err != nil { + return fmt.Errorf("prepare INSERT statement: %w", err) + } + for _, id := range taskIds { + if _, err = insertStmt.Exec(id); err != nil { + return fmt.Errorf("insert ID %s: %w", id, err) + } + } + _, err = tx.Exec(`SELECT delete_tasks($1)`, tableName) + return err +} diff --git a/ddl/ddl.sql b/ddl/ddl.sql index 2e040f1b37f..e513eca9cb0 100644 --- a/ddl/ddl.sql +++ b/ddl/ddl.sql @@ -6,7 +6,7 @@ CREATE SCHEMA IF NOT EXISTS parade; CREATE TYPE task_status_code_value AS ENUM ( 'pending', -- waiting for an actor to perform it (new or being retried) - 'in-progress', -- task is being performed by an actor + 'in-progress', -- task is being performed by an actor 'aborted', -- an actor has aborted this task with message, will not be reissued 'completed' -- an actor has completed this task with message, will not reissued ); @@ -107,3 +107,53 @@ BEGIN RETURN num_updated; END; $$; + +-- (Utility for delete_task function: remove all dependencies from task ID, returning ids of any +-- tasks with no remaining dependencies.) +CREATE OR REPLACE FUNCTION remove_task_dependencies(task_id VARCHAR(64)) +RETURNS SETOF VARCHAR(64) +LANGUAGE sql AS $$ +WITH signalled_ids AS ( + UPDATE tasks + SET total_dependencies = tasks.total_dependencies-1 + WHERE tasks.id IN (SELECT UNNEST(to_signal) FROM tasks WHERE id=task_id) + RETURNING (CASE WHEN tasks.total_dependencies = 0 THEN tasks.id ELSE NULL END) id +) +SELECT id FROM signalled_ids WHERE id IS NOT NULL; +$$; + +CREATE TYPE tasks_recurse_value AS ENUM ('new', 'in-progress', 'done'); + +-- Deletes taskIds from column id of task_id_name (with columns id (an ID) and mark (a +-- recurse_value), presumably a temporary table) and empties it, decrements each of its +-- dependent tasks, and deletes that task (effectively recursively) if it has no further +-- dependencies. Uses table tasks for storage of to-be-deleted tasks during the operation. +-- Returns the total number of tasks deleted. No abort marking is performed -- make sure to +-- abort the task first! +CREATE OR REPLACE FUNCTION delete_tasks(task_id_name TEXT) RETURNS VOID LANGUAGE plpgsql AS $$ +DECLARE + total_num_updated INTEGER; + num_updated INTEGER; + row_count INTEGER; +BEGIN + LOOP + EXECUTE format($Q$ + UPDATE %1$I SET mark='in-progress' WHERE mark='new' + $Q$, task_id_name); + EXECUTE format($Q$ + WITH new_to_delete AS ( + SELECT remove_task_dependencies(id) id FROM %1$I WHERE mark='in-progress' + ) + INSERT INTO %1$I (SELECT id, 'new' mark FROM new_to_delete) + $Q$, task_id_name); + GET DIAGNOSTICS row_count = ROW_COUNT; + EXIT WHEN row_count=0; + EXECUTE format($Q$ + UPDATE %1$I SET mark='done' WHERE mark='in-progress' + $Q$, task_id_name); + END LOOP; + EXECUTE format($Q$ + DELETE FROM tasks WHERE id IN (SELECT id FROM %1$I) + $Q$, task_id_name); +END; +$$; diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index fd8c99ab20f..df7ca806889 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -183,6 +183,32 @@ func (w wrapper) insertTasks(tasks []ddl.TaskData) { } } +func (w wrapper) deleteTasks(ids []ddl.TaskId) error { + prefixedIds := make([]ddl.TaskId, len(ids)) + for i := 0; i < len(ids); i++ { + prefixedIds[i] = w.prefixTask(ids[i]) + } + tx, err := w.db.BeginTxx(context.Background(), nil) + if err != nil { + return fmt.Errorf("BEGIN: %w", err) + } + defer func() { + if tx != nil { + tx.Rollback() + } + }() + + if err = ddl.DeleteTasks(tx, prefixedIds); err != nil { + return err + } + + if err = tx.Commit(); err != nil { + return fmt.Errorf("COMMIT: %w", err) + } + tx = nil + return nil +} + func (w wrapper) returnTask(taskId ddl.TaskId, token ddl.PerformanceToken, resultStatus string, resultStatusCode ddl.TaskStatusCodeValue) error { return ddl.ReturnTask(w.db, w.prefixTask(taskId), token, resultStatus, resultStatusCode) } @@ -538,6 +564,82 @@ func TestDependencies(t *testing.T) { } } +func TestDeleteTasks(t *testing.T) { + // Delete tasks requires whitebox testing, to ensure tasks really are deleted. + w := wrapper{t, db} + + w.insertTasks([]ddl.TaskData{ + {Id: ddl.TaskId("a0"), Action: "root", ToSignal: []ddl.TaskId{"a1", "a3"}}, + {Id: ddl.TaskId("a1"), Action: "dep", ToSignal: []ddl.TaskId{"a2"}, TotalDependencies: intAddr(1)}, + {Id: ddl.TaskId("a2"), Action: "dep", ToSignal: []ddl.TaskId{"a3"}, TotalDependencies: intAddr(1)}, + {Id: ddl.TaskId("a3"), Action: "leaf", TotalDependencies: intAddr(2)}, + + {Id: ddl.TaskId("b0"), Action: "root", ToSignal: []ddl.TaskId{"b1"}}, + {Id: ddl.TaskId("b1"), Action: "root-keep", ToSignal: []ddl.TaskId{"b2"}}, + {Id: ddl.TaskId("b2"), Action: "leaf", TotalDependencies: intAddr(2)}, + + {Id: ddl.TaskId("c0"), Action: "root", ToSignal: []ddl.TaskId{"c1", "c2"}}, + {Id: ddl.TaskId("c1"), Action: "dep", ToSignal: []ddl.TaskId{"c3", "c4"}, TotalDependencies: intAddr(1)}, + {Id: ddl.TaskId("c2"), Action: "dep", ToSignal: []ddl.TaskId{"c4", "c5"}, TotalDependencies: intAddr(1)}, + {Id: ddl.TaskId("c3"), Action: "dep", ToSignal: []ddl.TaskId{"c5", "c6"}, TotalDependencies: intAddr(1)}, + {Id: ddl.TaskId("c4"), Action: "leaf", TotalDependencies: intAddr(2)}, + {Id: ddl.TaskId("c5"), Action: "leaf", TotalDependencies: intAddr(2)}, + {Id: ddl.TaskId("c6"), Action: "leaf", TotalDependencies: intAddr(1)}, + }) + + type testCase struct { + title string + casePrefix string + toDelete []ddl.TaskId + expectedRemaining []ddl.TaskId + } + cases := []testCase{ + {title: "chain with extra link", casePrefix: "a", toDelete: []ddl.TaskId{"a0"}}, + {title: "delete only one dep", casePrefix: "b", toDelete: []ddl.TaskId{"b0"}, expectedRemaining: []ddl.TaskId{"b1", "b2"}}, + {title: "treelike", casePrefix: "c", toDelete: []ddl.TaskId{"c0"}}, + } + prefix := t.Name() + for _, c := range cases { + t.Run(c.title, func(t *testing.T) { + casePrefix := fmt.Sprint(prefix, ".", c.casePrefix) + if err := w.deleteTasks(c.toDelete); err != nil { + t.Errorf("DeleteTasks failed: %s", err) + } + + rows, err := w.db.Query(`SELECT id FROM tasks WHERE id LIKE format('%s%%', $1::text)`, casePrefix) + if err != nil { + t.Errorf("[I] select remaining IDs for prefix %s: %s", casePrefix, err) + } + + defer func() { + if err := rows.Close(); err != nil { + t.Fatalf("[I] remaining ids iterator close: %s", err) + } + }() + gotRemaining := make([]ddl.TaskId, 0, len(c.expectedRemaining)) + for rows.Next() { + var id ddl.TaskId + if err = rows.Scan(&id); err != nil { + t.Errorf("[I] scan ID value: %s", err) + } + gotRemaining = append(gotRemaining, id) + } + sort.Sort(taskIdSlice(gotRemaining)) + expectedRemaining := c.expectedRemaining + if expectedRemaining == nil { + expectedRemaining = []ddl.TaskId{} + } + for i, e := range expectedRemaining { + expectedRemaining[i] = w.prefixTask(e) + } + sort.Sort(taskIdSlice(expectedRemaining)) + if diffs := deep.Equal(expectedRemaining, gotRemaining); diffs != nil { + t.Errorf("left with other IDs than expected: %s", diffs) + } + }) + } +} + func TestNotification(t *testing.T) { ctx := context.Background() w := wrapper{t, db} @@ -572,6 +674,7 @@ func TestNotification(t *testing.T) { if err != nil { t.Fatalf("pgx.Connect: %s", err) } + defer conn.Close(ctx) type result struct { status string From b9c84e557cb559f3c22f9f4613fcebcef7710cc3 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Mon, 21 Sep 2020 09:57:23 +0300 Subject: [PATCH 010/158] Use deleteTasks to clean up every test Allows re-running the same tests on the same DB (when testing with `--postgres-url`, presumably). Tests that fail halfway through may leave an unclean DB; this may be considered a debugging feature. --- ddl/ddl_test.go | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index df7ca806889..33cbd6de29e 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -152,7 +152,7 @@ func (w wrapper) stripActor(actor ddl.TaskId) ddl.ActorId { return ddl.ActorId(w.strip(string(actor))) } -func (w wrapper) insertTasks(tasks []ddl.TaskData) { +func (w wrapper) insertTasks(tasks []ddl.TaskData) func() { w.t.Helper() ctx := context.Background() conn, err := pgx.Connect(ctx, databaseURI) @@ -181,6 +181,13 @@ func (w wrapper) insertTasks(tasks []ddl.TaskData) { if err != nil { w.t.Fatalf("InsertTasks: %s", err) } + + // Create cleanup callback. Compute the ids now, tasks may change later. + ids := make([]ddl.TaskId, 0, len(tasks)) + for _, task := range tasks { + ids = append(ids, task.Id) + } + return func() { w.deleteTasks(ids) } } func (w wrapper) deleteTasks(ids []ddl.TaskId) error { @@ -318,12 +325,13 @@ func TestTaskDataIterator_Values(t *testing.T) { func TestOwn(t *testing.T) { w := wrapper{t, db} - w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]ddl.TaskData{ {Id: "000", Action: "never"}, {Id: "111", Action: "frob"}, {Id: "123", Action: "broz"}, {Id: "222", Action: "broz"}, }) + defer cleanup() tasks, err := w.ownTasks(ddl.ActorId("tester"), 2, []string{"frob", "broz"}, nil) if err != nil { t.Errorf("first own_tasks query: %s", err) @@ -357,10 +365,11 @@ func TestOwnBody(t *testing.T) { val := "\"the quick brown fox jumps over the lazy dog\"" - w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]ddl.TaskData{ {Id: "body", Action: "yes", Body: &val}, {Id: "nobody", Action: "no"}, }) + defer cleanup() tasks, err := w.ownTasks(ddl.ActorId("somebody"), 2, []string{"yes", "no"}, nil) if err != nil { @@ -386,9 +395,11 @@ func TestOwnAfterDeadlineElapsed(t *testing.T) { second := 1 * time.Second w := wrapper{t, db} - w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]ddl.TaskData{ {Id: "111", Action: "frob"}, }) + defer cleanup() + _, err := w.ownTasks(ddl.ActorId("tortoise"), 1, []string{"frob"}, &second) if err != nil { t.Fatalf("failed to setup tortoise task ownership: %s", err) @@ -415,11 +426,12 @@ func TestOwnAfterDeadlineElapsed(t *testing.T) { func TestReturnTask_DirectlyAndRetry(t *testing.T) { w := wrapper{t, db} - w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]ddl.TaskData{ {Id: "111", Action: "frob"}, {Id: "123", Action: "broz"}, {Id: "222", Action: "broz"}, }) + defer cleanup() tasks, err := w.ownTasks(ddl.ActorId("foo"), 4, []string{"frob", "broz"}, nil) if err != nil { @@ -458,9 +470,10 @@ func TestReturnTask_RetryMulti(t *testing.T) { maxTries := 7 lifetime := 250 * time.Millisecond - w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]ddl.TaskData{ {Id: "111", Action: "frob", MaxTries: &maxTries}, }) + defer cleanup() for i := 0; i < maxTries; i++ { tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, &lifetime) @@ -532,7 +545,8 @@ func TestDependencies(t *testing.T) { TotalDependencies: &numDivisors, }) } - w.insertTasks(taskData) + cleanup := w.insertTasks(taskData) + defer cleanup() doneSet := make(map[int]struct{}, num) @@ -568,7 +582,7 @@ func TestDeleteTasks(t *testing.T) { // Delete tasks requires whitebox testing, to ensure tasks really are deleted. w := wrapper{t, db} - w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]ddl.TaskData{ {Id: ddl.TaskId("a0"), Action: "root", ToSignal: []ddl.TaskId{"a1", "a3"}}, {Id: ddl.TaskId("a1"), Action: "dep", ToSignal: []ddl.TaskId{"a2"}, TotalDependencies: intAddr(1)}, {Id: ddl.TaskId("a2"), Action: "dep", ToSignal: []ddl.TaskId{"a3"}, TotalDependencies: intAddr(1)}, @@ -586,6 +600,7 @@ func TestDeleteTasks(t *testing.T) { {Id: ddl.TaskId("c5"), Action: "leaf", TotalDependencies: intAddr(2)}, {Id: ddl.TaskId("c6"), Action: "leaf", TotalDependencies: intAddr(1)}, }) + defer cleanup() type testCase struct { title string @@ -658,9 +673,10 @@ func TestNotification(t *testing.T) { for _, c := range cases { t.Run(c.title, func(t *testing.T) { - w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]ddl.TaskData{ {Id: c.id, Action: "frob"}, }) + defer cleanup() tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, nil) if err != nil { @@ -715,7 +731,9 @@ func BenchmarkFanIn(b *testing.B) { tasks = append(tasks, ddl.TaskData{Id: id(i), Action: "part", ToSignal: toSignal}) } tasks = append(tasks, ddl.TaskData{Id: "done", Action: "done"}) - w.insertTasks(tasks) + cleanup := w.insertTasks(tasks) + + defer cleanup() type result struct { count int From e8d76e81a28f0fd7b39905b992d4f882caca4e8a Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Wed, 23 Sep 2020 10:17:40 +0300 Subject: [PATCH 011/158] Rename ddl/ddl* -> parade/ddl* (Rewriting git history is hard...) --- go.mod | 1 + go.sum | 2 + {ddl => parade}/ddl.go | 2 +- {ddl => parade}/ddl.sql | 0 {ddl => parade}/ddl_test.go | 210 ++++++++++++++++++------------------ 5 files changed, 109 insertions(+), 106 deletions(-) rename {ddl => parade}/ddl.go (99%) rename {ddl => parade}/ddl.sql (100%) rename {ddl => parade}/ddl_test.go (70%) diff --git a/go.mod b/go.mod index b3ad898f4f1..e0fe58e55a0 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( github.com/lunixbochs/vtclean v1.0.0 // indirect github.com/mailru/easyjson v0.7.2 // indirect github.com/manifoldco/promptui v0.7.0 + github.com/matoous/go-nanoid v1.4.1 github.com/mattn/go-sqlite3 v1.14.0 // indirect github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.3.3 // indirect diff --git a/go.sum b/go.sum index 63c90368014..3512c81d01e 100644 --- a/go.sum +++ b/go.sum @@ -749,6 +749,8 @@ github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpAp github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/matoous/go-nanoid v1.4.1 h1:Yag04X+qPMDtYbyJsMDhoe8rP5kRl293b2QK8KRp2SE= +github.com/matoous/go-nanoid v1.4.1/go.mod h1:fvGBnhcQ+zcrB3qJIG32PAN11J/y1IYkGX2/VeHzuH0= github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE= github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= diff --git a/ddl/ddl.go b/parade/ddl.go similarity index 99% rename from ddl/ddl.go rename to parade/ddl.go index 4aa267b4eb4..fc5f1b94b00 100644 --- a/ddl/ddl.go +++ b/parade/ddl.go @@ -1,4 +1,4 @@ -package ddl +package parade import ( "context" diff --git a/ddl/ddl.sql b/parade/ddl.sql similarity index 100% rename from ddl/ddl.sql rename to parade/ddl.sql diff --git a/ddl/ddl_test.go b/parade/ddl_test.go similarity index 70% rename from ddl/ddl_test.go rename to parade/ddl_test.go index 33cbd6de29e..aa100ae74c6 100644 --- a/ddl/ddl_test.go +++ b/parade/ddl_test.go @@ -1,4 +1,4 @@ -package ddl_test +package parade_test import ( "context" @@ -17,7 +17,7 @@ import ( "github.com/go-test/deep" "github.com/jackc/pgx/v4" _ "github.com/jackc/pgx/v4/stdlib" - "github.com/treeverse/parade/ddl" + "github.com/treeverse/lakefs/parade" "github.com/jmoiron/sqlx" "github.com/ory/dockertest/v3" @@ -40,7 +40,7 @@ var ( ) // taskIdSlice attaches the methods of sort.Interface to []TaskId. -type taskIdSlice []ddl.TaskId +type taskIdSlice []parade.TaskId func (p taskIdSlice) Len() int { return len(p) } func (p taskIdSlice) Less(i, j int) bool { return p[i] < p[j] } @@ -136,23 +136,23 @@ func (w wrapper) strip(s string) string { return strings.TrimPrefix(s, w.t.Name()+".") } -func (w wrapper) prefixTask(id ddl.TaskId) ddl.TaskId { - return ddl.TaskId(w.prefix(string(id))) +func (w wrapper) prefixTask(id parade.TaskId) parade.TaskId { + return parade.TaskId(w.prefix(string(id))) } -func (w wrapper) stripTask(id ddl.TaskId) ddl.TaskId { - return ddl.TaskId(w.strip(string(id))) +func (w wrapper) stripTask(id parade.TaskId) parade.TaskId { + return parade.TaskId(w.strip(string(id))) } -func (w wrapper) prefixActor(actor ddl.ActorId) ddl.ActorId { - return ddl.ActorId(w.prefix(string(actor))) +func (w wrapper) prefixActor(actor parade.ActorId) parade.ActorId { + return parade.ActorId(w.prefix(string(actor))) } -func (w wrapper) stripActor(actor ddl.TaskId) ddl.ActorId { - return ddl.ActorId(w.strip(string(actor))) +func (w wrapper) stripActor(actor parade.TaskId) parade.ActorId { + return parade.ActorId(w.strip(string(actor))) } -func (w wrapper) insertTasks(tasks []ddl.TaskData) func() { +func (w wrapper) insertTasks(tasks []parade.TaskData) func() { w.t.Helper() ctx := context.Background() conn, err := pgx.Connect(ctx, databaseURI) @@ -161,7 +161,7 @@ func (w wrapper) insertTasks(tasks []ddl.TaskData) func() { } defer conn.Close(ctx) - prefixedTasks := make([]ddl.TaskData, len(tasks)) + prefixedTasks := make([]parade.TaskData, len(tasks)) for i := 0; i < len(tasks); i++ { copy := tasks[i] copy.Id = w.prefixTask(copy.Id) @@ -170,28 +170,28 @@ func (w wrapper) insertTasks(tasks []ddl.TaskData) func() { if copy.StatusCode == "" { copy.StatusCode = "pending" } - toSignal := make([]ddl.TaskId, len(copy.ToSignal)) + toSignal := make([]parade.TaskId, len(copy.ToSignal)) for j := 0; j < len(toSignal); j++ { toSignal[j] = w.prefixTask(copy.ToSignal[j]) } copy.ToSignal = toSignal prefixedTasks[i] = copy } - err = ddl.InsertTasks(ctx, conn, &ddl.TaskDataIterator{Data: prefixedTasks}) + err = parade.InsertTasks(ctx, conn, ¶de.TaskDataIterator{Data: prefixedTasks}) if err != nil { w.t.Fatalf("InsertTasks: %s", err) } // Create cleanup callback. Compute the ids now, tasks may change later. - ids := make([]ddl.TaskId, 0, len(tasks)) + ids := make([]parade.TaskId, 0, len(tasks)) for _, task := range tasks { ids = append(ids, task.Id) } return func() { w.deleteTasks(ids) } } -func (w wrapper) deleteTasks(ids []ddl.TaskId) error { - prefixedIds := make([]ddl.TaskId, len(ids)) +func (w wrapper) deleteTasks(ids []parade.TaskId) error { + prefixedIds := make([]parade.TaskId, len(ids)) for i := 0; i < len(ids); i++ { prefixedIds[i] = w.prefixTask(ids[i]) } @@ -205,7 +205,7 @@ func (w wrapper) deleteTasks(ids []ddl.TaskId) error { } }() - if err = ddl.DeleteTasks(tx, prefixedIds); err != nil { + if err = parade.DeleteTasks(tx, prefixedIds); err != nil { return err } @@ -216,16 +216,16 @@ func (w wrapper) deleteTasks(ids []ddl.TaskId) error { return nil } -func (w wrapper) returnTask(taskId ddl.TaskId, token ddl.PerformanceToken, resultStatus string, resultStatusCode ddl.TaskStatusCodeValue) error { - return ddl.ReturnTask(w.db, w.prefixTask(taskId), token, resultStatus, resultStatusCode) +func (w wrapper) returnTask(taskId parade.TaskId, token parade.PerformanceToken, resultStatus string, resultStatusCode parade.TaskStatusCodeValue) error { + return parade.ReturnTask(w.db, w.prefixTask(taskId), token, resultStatus, resultStatusCode) } -func (w wrapper) ownTasks(actorId ddl.ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]ddl.OwnedTaskData, error) { +func (w wrapper) ownTasks(actorId parade.ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]parade.OwnedTaskData, error) { prefixedActions := make([]string, len(actions)) for i, action := range actions { prefixedActions[i] = w.prefix(action) } - tasks, err := ddl.OwnTasks(w.db, actorId, maxTasks, prefixedActions, maxDuration) + tasks, err := parade.OwnTasks(w.db, actorId, maxTasks, prefixedActions, maxDuration) if tasks != nil { for i := 0; i < len(tasks); i++ { task := &tasks[i] @@ -239,7 +239,7 @@ func stringAddr(s string) *string { return &s } -func performanceTokenAddr(p ddl.PerformanceToken) *ddl.PerformanceToken { +func performanceTokenAddr(p parade.PerformanceToken) *parade.PerformanceToken { return &p } @@ -248,7 +248,7 @@ func intAddr(i int) *int { } func TestTaskDataIterator_Empty(t *testing.T) { - it := ddl.TaskDataIterator{Data: []ddl.TaskData{}} + it := parade.TaskDataIterator{Data: []parade.TaskData{}} if it.Err() != nil { t.Errorf("expected empty new iterator to have no errors, got %s", it.Err()) } @@ -261,26 +261,26 @@ func TestTaskDataIterator_Empty(t *testing.T) { if it.Next() { t.Errorf("expected advanced empty new iterator %+v not to advance", it) } - if !errors.Is(it.Err(), ddl.NoMoreDataError) { + if !errors.Is(it.Err(), parade.NoMoreDataError) { t.Errorf("expected twice-advanced iterator to raise NoMoreDataError, got %s", it.Err()) } } func TestTaskDataIterator_Values(t *testing.T) { now := time.Now() - tasks := []ddl.TaskData{ + tasks := []parade.TaskData{ {Id: "000", Action: "zero", StatusCode: "enum values enforced on DB"}, {Id: "111", Action: "frob", Body: stringAddr("1"), Status: stringAddr("state"), StatusCode: "pending", NumTries: 11, MaxTries: intAddr(17), TotalDependencies: intAddr(9), - ToSignal: []ddl.TaskId{ddl.TaskId("foo"), ddl.TaskId("bar")}, - ActorId: ddl.ActorId("actor"), ActionDeadline: &now, - PerformanceToken: performanceTokenAddr(ddl.PerformanceToken{}), + ToSignal: []parade.TaskId{parade.TaskId("foo"), parade.TaskId("bar")}, + ActorId: parade.ActorId("actor"), ActionDeadline: &now, + PerformanceToken: performanceTokenAddr(parade.PerformanceToken{}), FinishChannelName: stringAddr("done"), }, } - it := ddl.TaskDataIterator{Data: tasks} + it := parade.TaskDataIterator{Data: tasks} for index, task := range tasks { if it.Err() != nil { @@ -311,10 +311,10 @@ func TestTaskDataIterator_Values(t *testing.T) { if it.Next() { t.Errorf("expected iterator %+v to end after tasks done", it) } - if _, err := it.Values(); !errors.Is(err, ddl.NoMoreDataError) { + if _, err := it.Values(); !errors.Is(err, parade.NoMoreDataError) { t.Errorf("expected NoMoreData after iterator done, got %s", err) } - if !errors.Is(it.Err(), ddl.NoMoreDataError) { + if !errors.Is(it.Err(), parade.NoMoreDataError) { t.Errorf("expected iterator Err() to repeat NoMoreDataError, got %s", it.Err()) } if it.Next() { @@ -325,14 +325,14 @@ func TestTaskDataIterator_Values(t *testing.T) { func TestOwn(t *testing.T) { w := wrapper{t, db} - cleanup := w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]parade.TaskData{ {Id: "000", Action: "never"}, {Id: "111", Action: "frob"}, {Id: "123", Action: "broz"}, {Id: "222", Action: "broz"}, }) defer cleanup() - tasks, err := w.ownTasks(ddl.ActorId("tester"), 2, []string{"frob", "broz"}, nil) + tasks, err := w.ownTasks(parade.ActorId("tester"), 2, []string{"frob", "broz"}, nil) if err != nil { t.Errorf("first own_tasks query: %s", err) } @@ -341,7 +341,7 @@ func TestOwn(t *testing.T) { } gotTasks := tasks - tasks, err = w.ownTasks(ddl.ActorId("tester-two"), 2, []string{"frob", "broz"}, nil) + tasks, err = w.ownTasks(parade.ActorId("tester-two"), 2, []string{"frob", "broz"}, nil) if err != nil { t.Errorf("second own_tasks query: %s", err) } @@ -350,12 +350,12 @@ func TestOwn(t *testing.T) { } gotTasks = append(gotTasks, tasks...) - gotIds := make([]ddl.TaskId, 0, len(gotTasks)) + gotIds := make([]parade.TaskId, 0, len(gotTasks)) for _, got := range gotTasks { gotIds = append(gotIds, got.Id) } sort.Sort(taskIdSlice(gotIds)) - if diffs := deep.Equal([]ddl.TaskId{"111", "123", "222"}, gotIds); diffs != nil { + if diffs := deep.Equal([]parade.TaskId{"111", "123", "222"}, gotIds); diffs != nil { t.Errorf("expected other task IDs: %s", diffs) } } @@ -365,13 +365,13 @@ func TestOwnBody(t *testing.T) { val := "\"the quick brown fox jumps over the lazy dog\"" - cleanup := w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]parade.TaskData{ {Id: "body", Action: "yes", Body: &val}, {Id: "nobody", Action: "no"}, }) defer cleanup() - tasks, err := w.ownTasks(ddl.ActorId("somebody"), 2, []string{"yes", "no"}, nil) + tasks, err := w.ownTasks(parade.ActorId("somebody"), 2, []string{"yes", "no"}, nil) if err != nil { t.Fatalf("own tasks: %s", err) } @@ -395,17 +395,17 @@ func TestOwnAfterDeadlineElapsed(t *testing.T) { second := 1 * time.Second w := wrapper{t, db} - cleanup := w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]parade.TaskData{ {Id: "111", Action: "frob"}, }) defer cleanup() - _, err := w.ownTasks(ddl.ActorId("tortoise"), 1, []string{"frob"}, &second) + _, err := w.ownTasks(parade.ActorId("tortoise"), 1, []string{"frob"}, &second) if err != nil { t.Fatalf("failed to setup tortoise task ownership: %s", err) } - fastTasks, err := w.ownTasks(ddl.ActorId("hare"), 1, []string{"frob"}, &second) + fastTasks, err := w.ownTasks(parade.ActorId("hare"), 1, []string{"frob"}, &second) if err != nil { t.Fatalf("failed to request fast task ownership: %s", err) } @@ -414,7 +414,7 @@ func TestOwnAfterDeadlineElapsed(t *testing.T) { } time.Sleep(2 * time.Second) - fastTasks, err = w.ownTasks(ddl.ActorId("hare"), 1, []string{"frob"}, &second) + fastTasks, err = w.ownTasks(parade.ActorId("hare"), 1, []string{"frob"}, &second) if err != nil { t.Fatalf("failed to request fast task ownership after sleeping: %s", err) } @@ -426,40 +426,40 @@ func TestOwnAfterDeadlineElapsed(t *testing.T) { func TestReturnTask_DirectlyAndRetry(t *testing.T) { w := wrapper{t, db} - cleanup := w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]parade.TaskData{ {Id: "111", Action: "frob"}, {Id: "123", Action: "broz"}, {Id: "222", Action: "broz"}, }) defer cleanup() - tasks, err := w.ownTasks(ddl.ActorId("foo"), 4, []string{"frob", "broz"}, nil) + tasks, err := w.ownTasks(parade.ActorId("foo"), 4, []string{"frob", "broz"}, nil) if err != nil { t.Fatalf("acquire all tasks: %s", err) } - taskById := make(map[ddl.TaskId]*ddl.OwnedTaskData, len(tasks)) + taskById := make(map[parade.TaskId]*parade.OwnedTaskData, len(tasks)) for index := range tasks { taskById[tasks[index].Id] = &tasks[index] } - if err = w.returnTask(taskById[ddl.TaskId("111")].Id, taskById[ddl.TaskId("111")].Token, "done", ddl.TASK_COMPLETED); err != nil { + if err = w.returnTask(taskById[parade.TaskId("111")].Id, taskById[parade.TaskId("111")].Token, "done", parade.TASK_COMPLETED); err != nil { t.Errorf("return task 111: %s", err) } - if err = w.returnTask(taskById[ddl.TaskId("111")].Id, taskById[ddl.TaskId("111")].Token, "done", ddl.TASK_COMPLETED); !errors.Is(err, ddl.InvalidTokenError) { + if err = w.returnTask(taskById[parade.TaskId("111")].Id, taskById[parade.TaskId("111")].Token, "done", parade.TASK_COMPLETED); !errors.Is(err, parade.InvalidTokenError) { t.Errorf("expected second attempt to return task 111 to fail with InvalidTokenError, got %s", err) } // Now attempt to return a task to in-progress state. - if err = w.returnTask(taskById[ddl.TaskId("123")].Id, taskById[ddl.TaskId("123")].Token, "try-again", ddl.TASK_PENDING); err != nil { - t.Errorf("return task 123 (%+v) for another round: %s", taskById[ddl.TaskId("123")], err) + if err = w.returnTask(taskById[parade.TaskId("123")].Id, taskById[parade.TaskId("123")].Token, "try-again", parade.TASK_PENDING); err != nil { + t.Errorf("return task 123 (%+v) for another round: %s", taskById[parade.TaskId("123")], err) } - moreTasks, err := w.ownTasks(ddl.ActorId("foo"), 4, []string{"frob", "broz"}, nil) + moreTasks, err := w.ownTasks(parade.ActorId("foo"), 4, []string{"frob", "broz"}, nil) if err != nil { t.Fatalf("re-acquire task 123: %s", err) } - if len(moreTasks) != 1 || moreTasks[0].Id != ddl.TaskId("123") { + if len(moreTasks) != 1 || moreTasks[0].Id != parade.TaskId("123") { t.Errorf("expected to receive only task 123 but got tasks %+v", moreTasks) } } @@ -470,13 +470,13 @@ func TestReturnTask_RetryMulti(t *testing.T) { maxTries := 7 lifetime := 250 * time.Millisecond - cleanup := w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]parade.TaskData{ {Id: "111", Action: "frob", MaxTries: &maxTries}, }) defer cleanup() for i := 0; i < maxTries; i++ { - tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, &lifetime) + tasks, err := w.ownTasks(parade.ActorId("foo"), 1, []string{"frob"}, &lifetime) if err != nil { t.Errorf("acquire task after %d/%d tries: %s", i, maxTries, err) } @@ -486,13 +486,13 @@ func TestReturnTask_RetryMulti(t *testing.T) { if i%2 == 0 { time.Sleep(2 * lifetime) } else { - if err = w.returnTask(tasks[0].Id, tasks[0].Token, "retry", ddl.TASK_PENDING); err != nil { + if err = w.returnTask(tasks[0].Id, tasks[0].Token, "retry", parade.TASK_PENDING); err != nil { t.Fatalf("return task %+v after %d/%d tries: %s", tasks[0], i, maxTries, err) } } } - tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, &lifetime) + tasks, err := w.ownTasks(parade.ActorId("foo"), 1, []string{"frob"}, &lifetime) if err != nil { t.Fatalf("re-acquire task failed: %s", err) } @@ -504,8 +504,8 @@ func TestReturnTask_RetryMulti(t *testing.T) { func TestDependencies(t *testing.T) { w := wrapper{t, db} - id := func(n int) ddl.TaskId { - return ddl.TaskId(fmt.Sprintf("number-%d", n)) + id := func(n int) parade.TaskId { + return parade.TaskId(fmt.Sprintf("number-%d", n)) } makeBody := func(n int) *string { ret := fmt.Sprintf("%d", n) @@ -524,9 +524,9 @@ func TestDependencies(t *testing.T) { // tasks have been executed. This provides an interesting graph of dependencies that is // also easy to check. const num = 63 - taskData := make([]ddl.TaskData, 0, num) + taskData := make([]parade.TaskData, 0, num) for i := 1; i <= num; i++ { - toSignal := make([]ddl.TaskId, 0, i/20) + toSignal := make([]parade.TaskId, 0, i/20) for j := 2 * i; j <= num; j += i { toSignal = append(toSignal, id(j)) } @@ -537,7 +537,7 @@ func TestDependencies(t *testing.T) { } } - taskData = append(taskData, ddl.TaskData{ + taskData = append(taskData, parade.TaskData{ Id: id(i), Action: "div", Body: makeBody(i), @@ -551,7 +551,7 @@ func TestDependencies(t *testing.T) { doneSet := make(map[int]struct{}, num) for { - tasks, err := w.ownTasks(ddl.ActorId("foo"), 17, []string{"div"}, nil) + tasks, err := w.ownTasks(parade.ActorId("foo"), 17, []string{"div"}, nil) if err != nil { t.Fatalf("acquire tasks with done %+v: %s", doneSet, err) } @@ -568,7 +568,7 @@ func TestDependencies(t *testing.T) { } } } - if err = w.returnTask(task.Id, task.Token, "divided", ddl.TASK_COMPLETED); err != nil { + if err = w.returnTask(task.Id, task.Token, "divided", parade.TASK_COMPLETED); err != nil { t.Errorf("failed to complete task %+v: %s", task, err) } } @@ -582,36 +582,36 @@ func TestDeleteTasks(t *testing.T) { // Delete tasks requires whitebox testing, to ensure tasks really are deleted. w := wrapper{t, db} - cleanup := w.insertTasks([]ddl.TaskData{ - {Id: ddl.TaskId("a0"), Action: "root", ToSignal: []ddl.TaskId{"a1", "a3"}}, - {Id: ddl.TaskId("a1"), Action: "dep", ToSignal: []ddl.TaskId{"a2"}, TotalDependencies: intAddr(1)}, - {Id: ddl.TaskId("a2"), Action: "dep", ToSignal: []ddl.TaskId{"a3"}, TotalDependencies: intAddr(1)}, - {Id: ddl.TaskId("a3"), Action: "leaf", TotalDependencies: intAddr(2)}, - - {Id: ddl.TaskId("b0"), Action: "root", ToSignal: []ddl.TaskId{"b1"}}, - {Id: ddl.TaskId("b1"), Action: "root-keep", ToSignal: []ddl.TaskId{"b2"}}, - {Id: ddl.TaskId("b2"), Action: "leaf", TotalDependencies: intAddr(2)}, - - {Id: ddl.TaskId("c0"), Action: "root", ToSignal: []ddl.TaskId{"c1", "c2"}}, - {Id: ddl.TaskId("c1"), Action: "dep", ToSignal: []ddl.TaskId{"c3", "c4"}, TotalDependencies: intAddr(1)}, - {Id: ddl.TaskId("c2"), Action: "dep", ToSignal: []ddl.TaskId{"c4", "c5"}, TotalDependencies: intAddr(1)}, - {Id: ddl.TaskId("c3"), Action: "dep", ToSignal: []ddl.TaskId{"c5", "c6"}, TotalDependencies: intAddr(1)}, - {Id: ddl.TaskId("c4"), Action: "leaf", TotalDependencies: intAddr(2)}, - {Id: ddl.TaskId("c5"), Action: "leaf", TotalDependencies: intAddr(2)}, - {Id: ddl.TaskId("c6"), Action: "leaf", TotalDependencies: intAddr(1)}, + cleanup := w.insertTasks([]parade.TaskData{ + {Id: parade.TaskId("a0"), Action: "root", ToSignal: []parade.TaskId{"a1", "a3"}}, + {Id: parade.TaskId("a1"), Action: "dep", ToSignal: []parade.TaskId{"a2"}, TotalDependencies: intAddr(1)}, + {Id: parade.TaskId("a2"), Action: "dep", ToSignal: []parade.TaskId{"a3"}, TotalDependencies: intAddr(1)}, + {Id: parade.TaskId("a3"), Action: "leaf", TotalDependencies: intAddr(2)}, + + {Id: parade.TaskId("b0"), Action: "root", ToSignal: []parade.TaskId{"b1"}}, + {Id: parade.TaskId("b1"), Action: "root-keep", ToSignal: []parade.TaskId{"b2"}}, + {Id: parade.TaskId("b2"), Action: "leaf", TotalDependencies: intAddr(2)}, + + {Id: parade.TaskId("c0"), Action: "root", ToSignal: []parade.TaskId{"c1", "c2"}}, + {Id: parade.TaskId("c1"), Action: "dep", ToSignal: []parade.TaskId{"c3", "c4"}, TotalDependencies: intAddr(1)}, + {Id: parade.TaskId("c2"), Action: "dep", ToSignal: []parade.TaskId{"c4", "c5"}, TotalDependencies: intAddr(1)}, + {Id: parade.TaskId("c3"), Action: "dep", ToSignal: []parade.TaskId{"c5", "c6"}, TotalDependencies: intAddr(1)}, + {Id: parade.TaskId("c4"), Action: "leaf", TotalDependencies: intAddr(2)}, + {Id: parade.TaskId("c5"), Action: "leaf", TotalDependencies: intAddr(2)}, + {Id: parade.TaskId("c6"), Action: "leaf", TotalDependencies: intAddr(1)}, }) defer cleanup() type testCase struct { title string casePrefix string - toDelete []ddl.TaskId - expectedRemaining []ddl.TaskId + toDelete []parade.TaskId + expectedRemaining []parade.TaskId } cases := []testCase{ - {title: "chain with extra link", casePrefix: "a", toDelete: []ddl.TaskId{"a0"}}, - {title: "delete only one dep", casePrefix: "b", toDelete: []ddl.TaskId{"b0"}, expectedRemaining: []ddl.TaskId{"b1", "b2"}}, - {title: "treelike", casePrefix: "c", toDelete: []ddl.TaskId{"c0"}}, + {title: "chain with extra link", casePrefix: "a", toDelete: []parade.TaskId{"a0"}}, + {title: "delete only one dep", casePrefix: "b", toDelete: []parade.TaskId{"b0"}, expectedRemaining: []parade.TaskId{"b1", "b2"}}, + {title: "treelike", casePrefix: "c", toDelete: []parade.TaskId{"c0"}}, } prefix := t.Name() for _, c := range cases { @@ -631,9 +631,9 @@ func TestDeleteTasks(t *testing.T) { t.Fatalf("[I] remaining ids iterator close: %s", err) } }() - gotRemaining := make([]ddl.TaskId, 0, len(c.expectedRemaining)) + gotRemaining := make([]parade.TaskId, 0, len(c.expectedRemaining)) for rows.Next() { - var id ddl.TaskId + var id parade.TaskId if err = rows.Scan(&id); err != nil { t.Errorf("[I] scan ID value: %s", err) } @@ -642,7 +642,7 @@ func TestDeleteTasks(t *testing.T) { sort.Sort(taskIdSlice(gotRemaining)) expectedRemaining := c.expectedRemaining if expectedRemaining == nil { - expectedRemaining = []ddl.TaskId{} + expectedRemaining = []parade.TaskId{} } for i, e := range expectedRemaining { expectedRemaining[i] = w.prefixTask(e) @@ -661,24 +661,24 @@ func TestNotification(t *testing.T) { type testCase struct { title string - id ddl.TaskId + id parade.TaskId status string - statusCode ddl.TaskStatusCodeValue + statusCode parade.TaskStatusCodeValue } cases := []testCase{ - {"task aborted", ddl.TaskId("111"), "b0rked!", ddl.TASK_ABORTED}, - {"task succeeded", ddl.TaskId("222"), "yay!", ddl.TASK_COMPLETED}, + {"task aborted", parade.TaskId("111"), "b0rked!", parade.TASK_ABORTED}, + {"task succeeded", parade.TaskId("222"), "yay!", parade.TASK_COMPLETED}, } for _, c := range cases { t.Run(c.title, func(t *testing.T) { - cleanup := w.insertTasks([]ddl.TaskData{ + cleanup := w.insertTasks([]parade.TaskData{ {Id: c.id, Action: "frob"}, }) defer cleanup() - tasks, err := w.ownTasks(ddl.ActorId("foo"), 1, []string{"frob"}, nil) + tasks, err := w.ownTasks(parade.ActorId("foo"), 1, []string{"frob"}, nil) if err != nil { t.Fatalf("acquire task: %s", err) } @@ -694,12 +694,12 @@ func TestNotification(t *testing.T) { type result struct { status string - statusCode ddl.TaskStatusCodeValue + statusCode parade.TaskStatusCodeValue err error } ch := make(chan result) go func() { - status, statusCode, err := ddl.WaitForTask(ctx, conn, ddl.TaskId("111")) + status, statusCode, err := parade.WaitForTask(ctx, conn, parade.TaskId("111")) ch <- result{status, statusCode, err} }() @@ -721,16 +721,16 @@ func BenchmarkFanIn(b *testing.B) { w := wrapper{b, db} - id := func(n int) ddl.TaskId { - return ddl.TaskId(fmt.Sprintf("in:%08d", n)) + id := func(n int) parade.TaskId { + return parade.TaskId(fmt.Sprintf("in:%08d", n)) } - tasks := make([]ddl.TaskData, 0, numTasks+1) - toSignal := []ddl.TaskId{"done"} + tasks := make([]parade.TaskData, 0, numTasks+1) + toSignal := []parade.TaskId{"done"} for i := 0; i < numTasks; i++ { - tasks = append(tasks, ddl.TaskData{Id: id(i), Action: "part", ToSignal: toSignal}) + tasks = append(tasks, parade.TaskData{Id: id(i), Action: "part", ToSignal: toSignal}) } - tasks = append(tasks, ddl.TaskData{Id: "done", Action: "done"}) + tasks = append(tasks, parade.TaskData{Id: "done", Action: "done"}) cleanup := w.insertTasks(tasks) defer cleanup() @@ -753,7 +753,7 @@ func BenchmarkFanIn(b *testing.B) { receivedDone := false for { size := *bulk + int(rand.Int31n(int32(*bulk/10))) - *bulk/10 - tasks, err := w.ownTasks(ddl.ActorId(fmt.Sprintf("worker-%d", i)), size, []string{"part", "done"}, nil) + tasks, err := w.ownTasks(parade.ActorId(fmt.Sprintf("worker-%d", i)), size, []string{"part", "done"}, nil) if err != nil { resultCh[i] <- result{err: err} return @@ -767,7 +767,7 @@ func BenchmarkFanIn(b *testing.B) { } else { receivedDone = true } - w.returnTask(task.Id, task.Token, "ok", ddl.TASK_COMPLETED) + w.returnTask(task.Id, task.Token, "ok", parade.TASK_COMPLETED) } } resultCh[i] <- result{count: count, receivedDone: receivedDone} From a43b711c3eb80f81370a79f50f2841775b464752 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 24 Sep 2020 11:23:15 +0300 Subject: [PATCH 012/158] Speed up DeleteTasks: load task IDs to delete with COPY FROM --- go.sum | 1 + parade/ddl.go | 56 +++++++++++++++++++++++++++++++++------------- parade/ddl.sql | 7 +++--- parade/ddl_test.go | 42 +++++++++++++++++++++++++--------- 4 files changed, 77 insertions(+), 29 deletions(-) diff --git a/go.sum b/go.sum index 3512c81d01e..0a7eac26858 100644 --- a/go.sum +++ b/go.sum @@ -1072,6 +1072,7 @@ github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa h1:RC4maTWLK github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= +github.com/treeverse/lakeFS v0.10.2 h1:tIHuECSHU2DTRxk8PYZ/UWYLW5u1xBmABwjEXlSo1Fk= github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo= github.com/tsenart/vegeta/v12 v12.8.3 h1:UEsDkSrEJojMKW/xr7KUv4H/bYykX+V48KCsPZPqEfk= github.com/tsenart/vegeta/v12 v12.8.3/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho= diff --git a/parade/ddl.go b/parade/ddl.go index fc5f1b94b00..46399fa7a51 100644 --- a/parade/ddl.go +++ b/parade/ddl.go @@ -157,9 +157,10 @@ func InsertTasks(ctx context.Context, pgConn *pgx.Conn, source *TaskDataIterator // OwnedTaskData is a row returned from "SELECT * FROM own_tasks(...)". type OwnedTaskData struct { - Id TaskId `db:"task_id"` - Token PerformanceToken `db:"token"` - Body *string + Id TaskId `db:"task_id"` + Token PerformanceToken `db:"token"` + Action string `db:"action"` + Body *string } // OwnTasks owns for actor and returns up to maxTasks tasks for performing any of actions. @@ -239,27 +240,50 @@ func WaitForTask(ctx context.Context, conn *pgx.Conn, taskId TaskId) (resultStat return status, statusCode, err } +// taskWithNewIterator is a pgx.CopyFromSource iterator that passes each task along with a +// 'new' copy status. +type taskWithNewIterator struct { + tasks []TaskId + idx int +} + +func (it *taskWithNewIterator) Next() bool { + it.idx++ + return it.idx < len(it.tasks) +} + +func (it *taskWithNewIterator) Values() ([]interface{}, error) { + return []interface{}{it.tasks[it.idx], "new"}, nil +} + +func (it *taskWithNewIterator) Err() error { + return nil +} + +func makeTaskWithNewIterator(tasks []TaskId) *taskWithNewIterator { + return &taskWithNewIterator{tasks, -1} +} + // DeleteTasks deletes taskIds, removing dependencies and deleting (effectively recursively) any -// tasks that are left with no dependencies. The effect is easiest to analyze when all deleted -// tasks have been either completed or been aborted. -func DeleteTasks(tx *sqlx.Tx, taskIds []TaskId) error { +// tasks that are left with no dependencies. It creates a temporary table on tx, so ideally +// close the transaction shortly after. The effect is easiest to analyze when all deleted tasks +// have been either completed or been aborted. +func DeleteTasks(ctx context.Context, tx pgx.Tx, taskIds []TaskId) error { uniqueId, err := nanoid.Nanoid() if err != nil { return fmt.Errorf("generate random component for table name: %w", err) } tableName := fmt.Sprintf("delete_tasks_%s", uniqueId) - if _, err = tx.Exec(fmt.Sprintf(`CREATE TABLE "%s" (id VARCHAR(64), mark tasks_recurse_value NOT NULL)`, tableName)); err != nil { + if _, err = tx.Exec( + ctx, + fmt.Sprintf(`CREATE TEMP TABLE "%s" (id VARCHAR(64), mark tasks_recurse_value NOT NULL) ON COMMIT DROP`, tableName), + ); err != nil { return fmt.Errorf("create temp work table %s: %w", tableName, err) } - insertStmt, err := tx.Prepare(fmt.Sprintf(`INSERT INTO "%s" VALUES($1, 'new')`, tableName)) - if err != nil { - return fmt.Errorf("prepare INSERT statement: %w", err) - } - for _, id := range taskIds { - if _, err = insertStmt.Exec(id); err != nil { - return fmt.Errorf("insert ID %s: %w", id, err) - } + + if _, err = tx.CopyFrom(ctx, pgx.Identifier{tableName}, []string{"id", "mark"}, makeTaskWithNewIterator(taskIds)); err != nil { + return fmt.Errorf("COPY: %w", err) } - _, err = tx.Exec(`SELECT delete_tasks($1)`, tableName) + _, err = tx.Exec(ctx, `SELECT delete_tasks($1)`, tableName) return err } diff --git a/parade/ddl.sql b/parade/ddl.sql index e513eca9cb0..381df9e511f 100644 --- a/parade/ddl.sql +++ b/parade/ddl.sql @@ -53,7 +53,7 @@ $$; CREATE OR REPLACE FUNCTION own_tasks( max_tasks INTEGER, actions VARCHAR(128) ARRAY, owner_id VARCHAR(64), max_duration INTERVAL ) -RETURNS TABLE(task_id VARCHAR(64), token UUID, body TEXT) +RETURNS TABLE(task_id VARCHAR(64), token UUID, action VARCHAR(128), body TEXT) LANGUAGE sql VOLATILE AS $$ UPDATE tasks SET actor_id = owner_id, @@ -69,9 +69,10 @@ LANGUAGE sql VOLATILE AS $$ (max_tries IS NULL OR num_tries < max_tries) -- maybe: AND not_before <= NOW() -- maybe: ORDER BY priority (eventually) - FOR NO KEY UPDATE SKIP LOCKED + ORDER BY random() + FOR UPDATE SKIP LOCKED LIMIT max_tasks) - RETURNING id, performance_token, body + RETURNING id, performance_token, action, body $$; -- Returns an owned task id that was locked with token. It is an error diff --git a/parade/ddl_test.go b/parade/ddl_test.go index aa100ae74c6..323df15fdd6 100644 --- a/parade/ddl_test.go +++ b/parade/ddl_test.go @@ -37,6 +37,7 @@ var ( parallelism = flag.Int("parallelism", 16, "Number of concurrent client worker goroutines.") bulk = flag.Int("bulk", 2_000, "Number of tasks to acquire at once in each client goroutine.") taskFactor = flag.Int("task-factor", 20_000, "Scale benchmark N by this many tasks") + numShards = flag.Int("num-shards", 400, "Number of intermediate fan-in shards") ) // taskIdSlice attaches the methods of sort.Interface to []TaskId. @@ -195,21 +196,26 @@ func (w wrapper) deleteTasks(ids []parade.TaskId) error { for i := 0; i < len(ids); i++ { prefixedIds[i] = w.prefixTask(ids[i]) } - tx, err := w.db.BeginTxx(context.Background(), nil) + ctx := context.Background() + conn, err := pgx.Connect(ctx, databaseURI) + if err != nil { + return fmt.Errorf("connect to DB: %w", err) + } + tx, err := conn.Begin(ctx) if err != nil { return fmt.Errorf("BEGIN: %w", err) } defer func() { if tx != nil { - tx.Rollback() + tx.Rollback(ctx) } }() - if err = parade.DeleteTasks(tx, prefixedIds); err != nil { + if err = parade.DeleteTasks(ctx, tx, prefixedIds); err != nil { return err } - if err = tx.Commit(); err != nil { + if err = tx.Commit(ctx); err != nil { return fmt.Errorf("COMMIT: %w", err) } tx = nil @@ -724,12 +730,20 @@ func BenchmarkFanIn(b *testing.B) { id := func(n int) parade.TaskId { return parade.TaskId(fmt.Sprintf("in:%08d", n)) } + shardId := func(n int) parade.TaskId { + return parade.TaskId(fmt.Sprintf("done:%05d", n)) + } - tasks := make([]parade.TaskData, 0, numTasks+1) - toSignal := []parade.TaskId{"done"} + tasks := make([]parade.TaskData, 0, numTasks+*numShards+1) for i := 0; i < numTasks; i++ { + toSignal := []parade.TaskId{shardId(i % *numShards)} tasks = append(tasks, parade.TaskData{Id: id(i), Action: "part", ToSignal: toSignal}) } + + toSignal := []parade.TaskId{"done"} + for i := 0; i < *numShards; i++ { + tasks = append(tasks, parade.TaskData{Id: shardId(i), Action: "spontaneous", ToSignal: toSignal}) + } tasks = append(tasks, parade.TaskData{Id: "done", Action: "done"}) cleanup := w.insertTasks(tasks) @@ -752,8 +766,11 @@ func BenchmarkFanIn(b *testing.B) { count := 0 receivedDone := false for { - size := *bulk + int(rand.Int31n(int32(*bulk/10))) - *bulk/10 - tasks, err := w.ownTasks(parade.ActorId(fmt.Sprintf("worker-%d", i)), size, []string{"part", "done"}, nil) + size := *bulk + int(rand.Int31n(int32(*bulk/5))) - *bulk/10 + tasks, err := w.ownTasks( + parade.ActorId(fmt.Sprintf("worker-%d", i)), + size, + []string{"part", "spontaneous", "done"}, nil) if err != nil { resultCh[i] <- result{err: err} return @@ -762,10 +779,15 @@ func BenchmarkFanIn(b *testing.B) { break } for _, task := range tasks { - if task.Id != "done" { + switch task.Action { + case w.prefix("part"): count++ - } else { + case w.prefix("spontaneous"): + // nothing, just reduce fan-in contention + case w.prefix("done"): receivedDone = true + default: + resultCh[i] <- result{err: fmt.Errorf("weird action %s", task.Action)} } w.returnTask(task.Id, task.Token, "ok", parade.TASK_COMPLETED) } From 751f1c7dae756ff82fc3710b43782bb710457c88 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 24 Sep 2020 15:10:52 +0300 Subject: [PATCH 013/158] fix lint errors --- go.mod | 1 - go.sum | 3 - parade/ddl.go | 91 ++++++++++--------- parade/ddl_test.go | 220 ++++++++++++++++++++++----------------------- 4 files changed, 161 insertions(+), 154 deletions(-) diff --git a/go.mod b/go.mod index e0fe58e55a0..04522cdda23 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/Masterminds/squirrel v1.4.0 github.com/apache/thrift v0.13.0 github.com/aws/aws-sdk-go v1.34.0 - github.com/benbjohnson/clock v1.0.3 github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/containerd/containerd v1.3.6 // indirect github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe // indirect diff --git a/go.sum b/go.sum index 0a7eac26858..d88ac6a7876 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,6 @@ github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve github.com/aws/aws-sdk-go v1.34.0 h1:brux2dRrlwCF5JhTL7MUT3WUwo9zfDHZZp3+g3Mvlmo= github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -1072,7 +1070,6 @@ github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa h1:RC4maTWLK github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/treeverse/lakeFS v0.10.2 h1:tIHuECSHU2DTRxk8PYZ/UWYLW5u1xBmABwjEXlSo1Fk= github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo= github.com/tsenart/vegeta/v12 v12.8.3 h1:UEsDkSrEJojMKW/xr7KUv4H/bYykX+V48KCsPZPqEfk= github.com/tsenart/vegeta/v12 v12.8.3/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho= diff --git a/parade/ddl.go b/parade/ddl.go index 46399fa7a51..61c65c2dff0 100644 --- a/parade/ddl.go +++ b/parade/ddl.go @@ -14,12 +14,13 @@ import ( nanoid "github.com/matoous/go-nanoid" ) -type TaskId string +type TaskID string -type ActorId string +type ActorID string type PerformanceToken pgtype.UUID +// nolint:stylecheck (change name from src below) func (dst *PerformanceToken) Scan(src interface{}) error { var scanned pgtype.UUID if err := scanned.Scan(src); err != nil { @@ -42,14 +43,19 @@ func (src PerformanceToken) String() string { } offset += n } + // nolint:gomnd addBytes(4) res.WriteString("-") + // nolint:gomnd addBytes(2) res.WriteString("-") + // nolint:gomnd addBytes(2) res.WriteString("-") + // nolint:gomnd addBytes(2) res.WriteString("-") + // nolint:gomnd addBytes(6) return res.String() } @@ -57,22 +63,22 @@ func (src PerformanceToken) String() string { type TaskStatusCodeValue string const ( - // TASK_PENDING indicates a task is waiting for an actor to perform it (new or being + // TaskPending indicates a task is waiting for an actor to perform it (new or being // retried) - TASK_PENDING TaskStatusCodeValue = "pending" + TaskPending TaskStatusCodeValue = "pending" // IN_PROGRESS indicates a task is being performed by an actor. - TASK_IN_PROGRESS TaskStatusCodeValue = "in-progress" + TaskInProgress TaskStatusCodeValue = "in-progress" // ABORTED indicates an actor has aborted this task with message, will not be reissued - TASK_ABORTED TaskStatusCodeValue = "aborted" - // TASK_COMPLETED indicates an actor has completed this task with message, will not reissued - TASK_COMPLETED TaskStatusCodeValue = "completed" - // TASK_INVALID is used by the API to report errors - TASK_INVALID TaskStatusCodeValue = "[invalid]" + TaskAborted TaskStatusCodeValue = "aborted" + // TaskCompleted indicates an actor has completed this task with message, will not reissued + TaskCompleted TaskStatusCodeValue = "completed" + // TaskInvalid is used by the API to report errors + TaskInvalid TaskStatusCodeValue = "[invalid]" ) // TaskData is a row in table "tasks". It describes a task to perform. type TaskData struct { - Id TaskId `db:"task_id"` + ID TaskID `db:"task_id"` Action string `db:"action"` // Body is JSON-formatted Body *string `db:"body"` @@ -81,8 +87,8 @@ type TaskData struct { NumTries int `db:"num_tries"` MaxTries *int `db:"max_tries"` TotalDependencies *int `db:"total_dependencies"` - ToSignal []TaskId `db:"to_signal"` - ActorId ActorId `db:"actor_id"` + ToSignal []TaskID `db:"to_signal"` + ActorID ActorID `db:"actor_id"` ActionDeadline *time.Time `db:"action_deadline"` PerformanceToken *PerformanceToken `db:"performance_token"` FinishChannelName *string `db:"finish_channel"` @@ -98,7 +104,7 @@ type TaskDataIterator struct { func (td *TaskDataIterator) Next() bool { if td.next > len(td.Data) { - td.err = NoMoreDataError + td.err = ErrNoMoreData return false } ret := td.next < len(td.Data) @@ -106,7 +112,7 @@ func (td *TaskDataIterator) Next() bool { return ret } -var NoMoreDataError = errors.New("no more data") +var ErrNoMoreData = errors.New("no more data") func (td *TaskDataIterator) Err() error { return td.err @@ -114,7 +120,7 @@ func (td *TaskDataIterator) Err() error { func (td *TaskDataIterator) Values() ([]interface{}, error) { if td.next > len(td.Data) { - td.err = NoMoreDataError + td.err = ErrNoMoreData return nil, td.err } value := td.Data[td.next-1] @@ -125,7 +131,7 @@ func (td *TaskDataIterator) Values() ([]interface{}, error) { toSignal[i] = string(value.ToSignal[i]) } return []interface{}{ - value.Id, + value.ID, value.Action, value.Body, value.Status, @@ -134,7 +140,7 @@ func (td *TaskDataIterator) Values() ([]interface{}, error) { value.MaxTries, value.TotalDependencies, toSignal, - value.ActorId, + value.ActorID, value.ActionDeadline, value.PerformanceToken, value.FinishChannelName, @@ -150,25 +156,25 @@ var TaskDataColumnNames = []string{ var tasksTable = pgx.Identifier{"tasks"} // InsertTasks adds multiple tasks efficiently. -func InsertTasks(ctx context.Context, pgConn *pgx.Conn, source *TaskDataIterator) error { +func InsertTasks(ctx context.Context, pgConn *pgx.Conn, source pgx.CopyFromSource) error { _, err := pgConn.CopyFrom(ctx, tasksTable, TaskDataColumnNames, source) return err } // OwnedTaskData is a row returned from "SELECT * FROM own_tasks(...)". type OwnedTaskData struct { - Id TaskId `db:"task_id"` + ID TaskID `db:"task_id"` Token PerformanceToken `db:"token"` Action string `db:"action"` Body *string } // OwnTasks owns for actor and returns up to maxTasks tasks for performing any of actions. -func OwnTasks(conn *sqlx.DB, actor ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]OwnedTaskData, error) { +func OwnTasks(conn *sqlx.DB, actor ActorID, maxTasks int, actions []string, maxDuration *time.Duration) ([]OwnedTaskData, error) { // Use sqlx.In to expand slice actions query, args, err := sqlx.In(`SELECT * FROM own_tasks(?, ARRAY[?], ?, ?)`, maxTasks, actions, actor, maxDuration) if err != nil { - return nil, fmt.Errorf("expand own tasks query: %s", err) + return nil, fmt.Errorf("expand own tasks query: %w", err) } query = conn.Rebind(query) rows, err := conn.Queryx(query, args...) @@ -186,15 +192,18 @@ func OwnTasks(conn *sqlx.DB, actor ActorId, maxTasks int, actions []string, maxD return tasks, nil } -var InvalidTokenError = errors.New("performance token invalid (action may have exceeded deadline)") +var ErrInvalidToken = errors.New("performance token invalid (action may have exceeded deadline)") // ReturnTask returns taskId which was acquired using the specified performanceToken, giving it // resultStatus and resultStatusCode. It returns InvalidTokenError if the performanceToken is // invalid; this happens when ReturnTask is called after its deadline expires, or due to a logic // error. -func ReturnTask(conn *sqlx.DB, taskId TaskId, token PerformanceToken, resultStatus string, resultStatusCode TaskStatusCodeValue) error { +func ReturnTask(conn *sqlx.DB, taskID TaskID, token PerformanceToken, resultStatus string, resultStatusCode TaskStatusCodeValue) error { var res int - query, args, err := sqlx.In(`SELECT return_task(?, ?, ?, ?)`, taskId, token, resultStatus, resultStatusCode) + query, args, err := sqlx.In(`SELECT return_task(?, ?, ?, ?)`, taskID, token, resultStatus, resultStatusCode) + if err != nil { + return fmt.Errorf("create return_task query: %w", err) + } query = conn.Rebind(query) err = conn.Get(&res, query, args...) if err != nil { @@ -202,40 +211,42 @@ func ReturnTask(conn *sqlx.DB, taskId TaskId, token PerformanceToken, resultStat } if res != 1 { - return InvalidTokenError + return ErrInvalidToken } return nil } +var ErrBadStatus = errors.New("bad status for task") + // WaitForTask blocks until taskId ends, and returns its result status and status code. It // needs a pgx.Conn -- *not* a sqlx.Conn -- because it depends on PostgreSQL specific features. -func WaitForTask(ctx context.Context, conn *pgx.Conn, taskId TaskId) (resultStatus string, resultStatusCode TaskStatusCodeValue, err error) { - row := conn.QueryRow(ctx, `SELECT finish_channel, status_code FROM tasks WHERE id=$1`, taskId) +func WaitForTask(ctx context.Context, conn *pgx.Conn, taskID TaskID) (resultStatus string, resultStatusCode TaskStatusCodeValue, err error) { + row := conn.QueryRow(ctx, `SELECT finish_channel, status_code FROM tasks WHERE id=$1`, taskID) var ( finishChannel string statusCode TaskStatusCodeValue status string ) if err = row.Scan(&finishChannel, &statusCode); err != nil { - return "", TASK_INVALID, fmt.Errorf("check task %s to listen: %w", taskId, err) + return "", TaskInvalid, fmt.Errorf("check task %s to listen: %w", taskID, err) } - if statusCode != TASK_IN_PROGRESS && statusCode != TASK_PENDING { - return "", statusCode, fmt.Errorf("task %s already in status %s", taskId, statusCode) + if statusCode != TaskInProgress && statusCode != TaskPending { + return "", statusCode, fmt.Errorf("task %s already in status %s: %w", taskID, statusCode, ErrBadStatus) } if _, err = conn.Exec(ctx, "LISTEN "+pgx.Identifier{finishChannel}.Sanitize()); err != nil { - return "", TASK_INVALID, fmt.Errorf("listen for %s: %w", finishChannel, err) + return "", TaskInvalid, fmt.Errorf("listen for %s: %w", finishChannel, err) } _, err = conn.WaitForNotification(ctx) if err != nil { - return "", TASK_INVALID, fmt.Errorf("wait for notification %s: %w", finishChannel, err) + return "", TaskInvalid, fmt.Errorf("wait for notification %s: %w", finishChannel, err) } - row = conn.QueryRow(ctx, `SELECT status, status_code FROM tasks WHERE id=$1`, taskId) + row = conn.QueryRow(ctx, `SELECT status, status_code FROM tasks WHERE id=$1`, taskID) status = "" - statusCode = TASK_INVALID + statusCode = TaskInvalid err = row.Scan(&status, &statusCode) return status, statusCode, err } @@ -243,7 +254,7 @@ func WaitForTask(ctx context.Context, conn *pgx.Conn, taskId TaskId) (resultStat // taskWithNewIterator is a pgx.CopyFromSource iterator that passes each task along with a // 'new' copy status. type taskWithNewIterator struct { - tasks []TaskId + tasks []TaskID idx int } @@ -260,7 +271,7 @@ func (it *taskWithNewIterator) Err() error { return nil } -func makeTaskWithNewIterator(tasks []TaskId) *taskWithNewIterator { +func makeTaskWithNewIterator(tasks []TaskID) *taskWithNewIterator { return &taskWithNewIterator{tasks, -1} } @@ -268,12 +279,12 @@ func makeTaskWithNewIterator(tasks []TaskId) *taskWithNewIterator { // tasks that are left with no dependencies. It creates a temporary table on tx, so ideally // close the transaction shortly after. The effect is easiest to analyze when all deleted tasks // have been either completed or been aborted. -func DeleteTasks(ctx context.Context, tx pgx.Tx, taskIds []TaskId) error { - uniqueId, err := nanoid.Nanoid() +func DeleteTasks(ctx context.Context, tx pgx.Tx, taskIds []TaskID) error { + uniqueID, err := nanoid.Nanoid() if err != nil { return fmt.Errorf("generate random component for table name: %w", err) } - tableName := fmt.Sprintf("delete_tasks_%s", uniqueId) + tableName := fmt.Sprintf("delete_tasks_%s", uniqueID) if _, err = tx.Exec( ctx, fmt.Sprintf(`CREATE TEMP TABLE "%s" (id VARCHAR(64), mark tasks_recurse_value NOT NULL) ON COMMIT DROP`, tableName), diff --git a/parade/ddl_test.go b/parade/ddl_test.go index 323df15fdd6..96564880dc7 100644 --- a/parade/ddl_test.go +++ b/parade/ddl_test.go @@ -41,7 +41,7 @@ var ( ) // taskIdSlice attaches the methods of sort.Interface to []TaskId. -type taskIdSlice []parade.TaskId +type taskIdSlice []parade.TaskID func (p taskIdSlice) Len() int { return len(p) } func (p taskIdSlice) Less(i, j int) bool { return p[i] < p[j] } @@ -137,20 +137,20 @@ func (w wrapper) strip(s string) string { return strings.TrimPrefix(s, w.t.Name()+".") } -func (w wrapper) prefixTask(id parade.TaskId) parade.TaskId { - return parade.TaskId(w.prefix(string(id))) +func (w wrapper) prefixTask(id parade.TaskID) parade.TaskID { + return parade.TaskID(w.prefix(string(id))) } -func (w wrapper) stripTask(id parade.TaskId) parade.TaskId { - return parade.TaskId(w.strip(string(id))) +func (w wrapper) stripTask(id parade.TaskID) parade.TaskID { + return parade.TaskID(w.strip(string(id))) } -func (w wrapper) prefixActor(actor parade.ActorId) parade.ActorId { - return parade.ActorId(w.prefix(string(actor))) +func (w wrapper) prefixActor(actor parade.ActorID) parade.ActorID { + return parade.ActorID(w.prefix(string(actor))) } -func (w wrapper) stripActor(actor parade.TaskId) parade.ActorId { - return parade.ActorId(w.strip(string(actor))) +func (w wrapper) stripActor(actor parade.TaskID) parade.ActorID { + return parade.ActorID(w.strip(string(actor))) } func (w wrapper) insertTasks(tasks []parade.TaskData) func() { @@ -165,13 +165,13 @@ func (w wrapper) insertTasks(tasks []parade.TaskData) func() { prefixedTasks := make([]parade.TaskData, len(tasks)) for i := 0; i < len(tasks); i++ { copy := tasks[i] - copy.Id = w.prefixTask(copy.Id) + copy.ID = w.prefixTask(copy.ID) copy.Action = w.prefix(copy.Action) - copy.ActorId = w.prefixActor(copy.ActorId) + copy.ActorID = w.prefixActor(copy.ActorID) if copy.StatusCode == "" { copy.StatusCode = "pending" } - toSignal := make([]parade.TaskId, len(copy.ToSignal)) + toSignal := make([]parade.TaskID, len(copy.ToSignal)) for j := 0; j < len(toSignal); j++ { toSignal[j] = w.prefixTask(copy.ToSignal[j]) } @@ -184,15 +184,15 @@ func (w wrapper) insertTasks(tasks []parade.TaskData) func() { } // Create cleanup callback. Compute the ids now, tasks may change later. - ids := make([]parade.TaskId, 0, len(tasks)) + ids := make([]parade.TaskID, 0, len(tasks)) for _, task := range tasks { - ids = append(ids, task.Id) + ids = append(ids, task.ID) } return func() { w.deleteTasks(ids) } } -func (w wrapper) deleteTasks(ids []parade.TaskId) error { - prefixedIds := make([]parade.TaskId, len(ids)) +func (w wrapper) deleteTasks(ids []parade.TaskID) error { + prefixedIds := make([]parade.TaskID, len(ids)) for i := 0; i < len(ids); i++ { prefixedIds[i] = w.prefixTask(ids[i]) } @@ -222,11 +222,11 @@ func (w wrapper) deleteTasks(ids []parade.TaskId) error { return nil } -func (w wrapper) returnTask(taskId parade.TaskId, token parade.PerformanceToken, resultStatus string, resultStatusCode parade.TaskStatusCodeValue) error { +func (w wrapper) returnTask(taskId parade.TaskID, token parade.PerformanceToken, resultStatus string, resultStatusCode parade.TaskStatusCodeValue) error { return parade.ReturnTask(w.db, w.prefixTask(taskId), token, resultStatus, resultStatusCode) } -func (w wrapper) ownTasks(actorId parade.ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]parade.OwnedTaskData, error) { +func (w wrapper) ownTasks(actorId parade.ActorID, maxTasks int, actions []string, maxDuration *time.Duration) ([]parade.OwnedTaskData, error) { prefixedActions := make([]string, len(actions)) for i, action := range actions { prefixedActions[i] = w.prefix(action) @@ -235,7 +235,7 @@ func (w wrapper) ownTasks(actorId parade.ActorId, maxTasks int, actions []string if tasks != nil { for i := 0; i < len(tasks); i++ { task := &tasks[i] - task.Id = w.stripTask(task.Id) + task.ID = w.stripTask(task.ID) } } return tasks, err @@ -267,7 +267,7 @@ func TestTaskDataIterator_Empty(t *testing.T) { if it.Next() { t.Errorf("expected advanced empty new iterator %+v not to advance", it) } - if !errors.Is(it.Err(), parade.NoMoreDataError) { + if !errors.Is(it.Err(), parade.ErrNoMoreData) { t.Errorf("expected twice-advanced iterator to raise NoMoreDataError, got %s", it.Err()) } } @@ -275,13 +275,13 @@ func TestTaskDataIterator_Empty(t *testing.T) { func TestTaskDataIterator_Values(t *testing.T) { now := time.Now() tasks := []parade.TaskData{ - {Id: "000", Action: "zero", StatusCode: "enum values enforced on DB"}, - {Id: "111", Action: "frob", Body: stringAddr("1"), Status: stringAddr("state"), + {ID: "000", Action: "zero", StatusCode: "enum values enforced on DB"}, + {ID: "111", Action: "frob", Body: stringAddr("1"), Status: stringAddr("state"), StatusCode: "pending", NumTries: 11, MaxTries: intAddr(17), TotalDependencies: intAddr(9), - ToSignal: []parade.TaskId{parade.TaskId("foo"), parade.TaskId("bar")}, - ActorId: parade.ActorId("actor"), ActionDeadline: &now, + ToSignal: []parade.TaskID{parade.TaskID("foo"), parade.TaskID("bar")}, + ActorID: parade.ActorID("actor"), ActionDeadline: &now, PerformanceToken: performanceTokenAddr(parade.PerformanceToken{}), FinishChannelName: stringAddr("done"), }, @@ -305,10 +305,10 @@ func TestTaskDataIterator_Values(t *testing.T) { } if diffs := deep.Equal( []interface{}{ - task.Id, task.Action, task.Body, task.Status, task.StatusCode, + task.ID, task.Action, task.Body, task.Status, task.StatusCode, task.NumTries, task.MaxTries, task.TotalDependencies, toSignal, - task.ActorId, task.ActionDeadline, + task.ActorID, task.ActionDeadline, task.PerformanceToken, task.FinishChannelName, }, values); diffs != nil { t.Errorf("got other values at index %d than expected: %s", index, diffs) @@ -317,10 +317,10 @@ func TestTaskDataIterator_Values(t *testing.T) { if it.Next() { t.Errorf("expected iterator %+v to end after tasks done", it) } - if _, err := it.Values(); !errors.Is(err, parade.NoMoreDataError) { + if _, err := it.Values(); !errors.Is(err, parade.ErrNoMoreData) { t.Errorf("expected NoMoreData after iterator done, got %s", err) } - if !errors.Is(it.Err(), parade.NoMoreDataError) { + if !errors.Is(it.Err(), parade.ErrNoMoreData) { t.Errorf("expected iterator Err() to repeat NoMoreDataError, got %s", it.Err()) } if it.Next() { @@ -332,13 +332,13 @@ func TestOwn(t *testing.T) { w := wrapper{t, db} cleanup := w.insertTasks([]parade.TaskData{ - {Id: "000", Action: "never"}, - {Id: "111", Action: "frob"}, - {Id: "123", Action: "broz"}, - {Id: "222", Action: "broz"}, + {ID: "000", Action: "never"}, + {ID: "111", Action: "frob"}, + {ID: "123", Action: "broz"}, + {ID: "222", Action: "broz"}, }) defer cleanup() - tasks, err := w.ownTasks(parade.ActorId("tester"), 2, []string{"frob", "broz"}, nil) + tasks, err := w.ownTasks(parade.ActorID("tester"), 2, []string{"frob", "broz"}, nil) if err != nil { t.Errorf("first own_tasks query: %s", err) } @@ -347,7 +347,7 @@ func TestOwn(t *testing.T) { } gotTasks := tasks - tasks, err = w.ownTasks(parade.ActorId("tester-two"), 2, []string{"frob", "broz"}, nil) + tasks, err = w.ownTasks(parade.ActorID("tester-two"), 2, []string{"frob", "broz"}, nil) if err != nil { t.Errorf("second own_tasks query: %s", err) } @@ -356,12 +356,12 @@ func TestOwn(t *testing.T) { } gotTasks = append(gotTasks, tasks...) - gotIds := make([]parade.TaskId, 0, len(gotTasks)) + gotIds := make([]parade.TaskID, 0, len(gotTasks)) for _, got := range gotTasks { - gotIds = append(gotIds, got.Id) + gotIds = append(gotIds, got.ID) } sort.Sort(taskIdSlice(gotIds)) - if diffs := deep.Equal([]parade.TaskId{"111", "123", "222"}, gotIds); diffs != nil { + if diffs := deep.Equal([]parade.TaskID{"111", "123", "222"}, gotIds); diffs != nil { t.Errorf("expected other task IDs: %s", diffs) } } @@ -372,12 +372,12 @@ func TestOwnBody(t *testing.T) { val := "\"the quick brown fox jumps over the lazy dog\"" cleanup := w.insertTasks([]parade.TaskData{ - {Id: "body", Action: "yes", Body: &val}, - {Id: "nobody", Action: "no"}, + {ID: "body", Action: "yes", Body: &val}, + {ID: "nobody", Action: "no"}, }) defer cleanup() - tasks, err := w.ownTasks(parade.ActorId("somebody"), 2, []string{"yes", "no"}, nil) + tasks, err := w.ownTasks(parade.ActorID("somebody"), 2, []string{"yes", "no"}, nil) if err != nil { t.Fatalf("own tasks: %s", err) } @@ -385,7 +385,7 @@ func TestOwnBody(t *testing.T) { t.Fatalf("expected to own 2 tasks but got %+v", tasks) } body, nobody := tasks[0], tasks[1] - if body.Id != "body" { + if body.ID != "body" { body, nobody = nobody, body } @@ -402,16 +402,16 @@ func TestOwnAfterDeadlineElapsed(t *testing.T) { w := wrapper{t, db} cleanup := w.insertTasks([]parade.TaskData{ - {Id: "111", Action: "frob"}, + {ID: "111", Action: "frob"}, }) defer cleanup() - _, err := w.ownTasks(parade.ActorId("tortoise"), 1, []string{"frob"}, &second) + _, err := w.ownTasks(parade.ActorID("tortoise"), 1, []string{"frob"}, &second) if err != nil { t.Fatalf("failed to setup tortoise task ownership: %s", err) } - fastTasks, err := w.ownTasks(parade.ActorId("hare"), 1, []string{"frob"}, &second) + fastTasks, err := w.ownTasks(parade.ActorID("hare"), 1, []string{"frob"}, &second) if err != nil { t.Fatalf("failed to request fast task ownership: %s", err) } @@ -420,11 +420,11 @@ func TestOwnAfterDeadlineElapsed(t *testing.T) { } time.Sleep(2 * time.Second) - fastTasks, err = w.ownTasks(parade.ActorId("hare"), 1, []string{"frob"}, &second) + fastTasks, err = w.ownTasks(parade.ActorID("hare"), 1, []string{"frob"}, &second) if err != nil { t.Fatalf("failed to request fast task ownership after sleeping: %s", err) } - if len(fastTasks) != 1 || fastTasks[0].Id != "111" { + if len(fastTasks) != 1 || fastTasks[0].ID != "111" { t.Errorf("expected eventual hare task ownership to return task \"111\" but got tasks %+v", fastTasks) } } @@ -433,39 +433,39 @@ func TestReturnTask_DirectlyAndRetry(t *testing.T) { w := wrapper{t, db} cleanup := w.insertTasks([]parade.TaskData{ - {Id: "111", Action: "frob"}, - {Id: "123", Action: "broz"}, - {Id: "222", Action: "broz"}, + {ID: "111", Action: "frob"}, + {ID: "123", Action: "broz"}, + {ID: "222", Action: "broz"}, }) defer cleanup() - tasks, err := w.ownTasks(parade.ActorId("foo"), 4, []string{"frob", "broz"}, nil) + tasks, err := w.ownTasks(parade.ActorID("foo"), 4, []string{"frob", "broz"}, nil) if err != nil { t.Fatalf("acquire all tasks: %s", err) } - taskById := make(map[parade.TaskId]*parade.OwnedTaskData, len(tasks)) + taskById := make(map[parade.TaskID]*parade.OwnedTaskData, len(tasks)) for index := range tasks { - taskById[tasks[index].Id] = &tasks[index] + taskById[tasks[index].ID] = &tasks[index] } - if err = w.returnTask(taskById[parade.TaskId("111")].Id, taskById[parade.TaskId("111")].Token, "done", parade.TASK_COMPLETED); err != nil { + if err = w.returnTask(taskById[parade.TaskID("111")].ID, taskById[parade.TaskID("111")].Token, "done", parade.TaskCompleted); err != nil { t.Errorf("return task 111: %s", err) } - if err = w.returnTask(taskById[parade.TaskId("111")].Id, taskById[parade.TaskId("111")].Token, "done", parade.TASK_COMPLETED); !errors.Is(err, parade.InvalidTokenError) { + if err = w.returnTask(taskById[parade.TaskID("111")].ID, taskById[parade.TaskID("111")].Token, "done", parade.TaskCompleted); !errors.Is(err, parade.ErrInvalidToken) { t.Errorf("expected second attempt to return task 111 to fail with InvalidTokenError, got %s", err) } // Now attempt to return a task to in-progress state. - if err = w.returnTask(taskById[parade.TaskId("123")].Id, taskById[parade.TaskId("123")].Token, "try-again", parade.TASK_PENDING); err != nil { - t.Errorf("return task 123 (%+v) for another round: %s", taskById[parade.TaskId("123")], err) + if err = w.returnTask(taskById[parade.TaskID("123")].ID, taskById[parade.TaskID("123")].Token, "try-again", parade.TaskPending); err != nil { + t.Errorf("return task 123 (%+v) for another round: %s", taskById[parade.TaskID("123")], err) } - moreTasks, err := w.ownTasks(parade.ActorId("foo"), 4, []string{"frob", "broz"}, nil) + moreTasks, err := w.ownTasks(parade.ActorID("foo"), 4, []string{"frob", "broz"}, nil) if err != nil { t.Fatalf("re-acquire task 123: %s", err) } - if len(moreTasks) != 1 || moreTasks[0].Id != parade.TaskId("123") { + if len(moreTasks) != 1 || moreTasks[0].ID != parade.TaskID("123") { t.Errorf("expected to receive only task 123 but got tasks %+v", moreTasks) } } @@ -477,12 +477,12 @@ func TestReturnTask_RetryMulti(t *testing.T) { lifetime := 250 * time.Millisecond cleanup := w.insertTasks([]parade.TaskData{ - {Id: "111", Action: "frob", MaxTries: &maxTries}, + {ID: "111", Action: "frob", MaxTries: &maxTries}, }) defer cleanup() for i := 0; i < maxTries; i++ { - tasks, err := w.ownTasks(parade.ActorId("foo"), 1, []string{"frob"}, &lifetime) + tasks, err := w.ownTasks(parade.ActorID("foo"), 1, []string{"frob"}, &lifetime) if err != nil { t.Errorf("acquire task after %d/%d tries: %s", i, maxTries, err) } @@ -492,13 +492,13 @@ func TestReturnTask_RetryMulti(t *testing.T) { if i%2 == 0 { time.Sleep(2 * lifetime) } else { - if err = w.returnTask(tasks[0].Id, tasks[0].Token, "retry", parade.TASK_PENDING); err != nil { + if err = w.returnTask(tasks[0].ID, tasks[0].Token, "retry", parade.TaskPending); err != nil { t.Fatalf("return task %+v after %d/%d tries: %s", tasks[0], i, maxTries, err) } } } - tasks, err := w.ownTasks(parade.ActorId("foo"), 1, []string{"frob"}, &lifetime) + tasks, err := w.ownTasks(parade.ActorID("foo"), 1, []string{"frob"}, &lifetime) if err != nil { t.Fatalf("re-acquire task failed: %s", err) } @@ -510,8 +510,8 @@ func TestReturnTask_RetryMulti(t *testing.T) { func TestDependencies(t *testing.T) { w := wrapper{t, db} - id := func(n int) parade.TaskId { - return parade.TaskId(fmt.Sprintf("number-%d", n)) + id := func(n int) parade.TaskID { + return parade.TaskID(fmt.Sprintf("number-%d", n)) } makeBody := func(n int) *string { ret := fmt.Sprintf("%d", n) @@ -532,7 +532,7 @@ func TestDependencies(t *testing.T) { const num = 63 taskData := make([]parade.TaskData, 0, num) for i := 1; i <= num; i++ { - toSignal := make([]parade.TaskId, 0, i/20) + toSignal := make([]parade.TaskID, 0, i/20) for j := 2 * i; j <= num; j += i { toSignal = append(toSignal, id(j)) } @@ -544,7 +544,7 @@ func TestDependencies(t *testing.T) { } taskData = append(taskData, parade.TaskData{ - Id: id(i), + ID: id(i), Action: "div", Body: makeBody(i), ToSignal: toSignal, @@ -557,7 +557,7 @@ func TestDependencies(t *testing.T) { doneSet := make(map[int]struct{}, num) for { - tasks, err := w.ownTasks(parade.ActorId("foo"), 17, []string{"div"}, nil) + tasks, err := w.ownTasks(parade.ActorID("foo"), 17, []string{"div"}, nil) if err != nil { t.Fatalf("acquire tasks with done %+v: %s", doneSet, err) } @@ -574,7 +574,7 @@ func TestDependencies(t *testing.T) { } } } - if err = w.returnTask(task.Id, task.Token, "divided", parade.TASK_COMPLETED); err != nil { + if err = w.returnTask(task.ID, task.Token, "divided", parade.TaskCompleted); err != nil { t.Errorf("failed to complete task %+v: %s", task, err) } } @@ -589,35 +589,35 @@ func TestDeleteTasks(t *testing.T) { w := wrapper{t, db} cleanup := w.insertTasks([]parade.TaskData{ - {Id: parade.TaskId("a0"), Action: "root", ToSignal: []parade.TaskId{"a1", "a3"}}, - {Id: parade.TaskId("a1"), Action: "dep", ToSignal: []parade.TaskId{"a2"}, TotalDependencies: intAddr(1)}, - {Id: parade.TaskId("a2"), Action: "dep", ToSignal: []parade.TaskId{"a3"}, TotalDependencies: intAddr(1)}, - {Id: parade.TaskId("a3"), Action: "leaf", TotalDependencies: intAddr(2)}, - - {Id: parade.TaskId("b0"), Action: "root", ToSignal: []parade.TaskId{"b1"}}, - {Id: parade.TaskId("b1"), Action: "root-keep", ToSignal: []parade.TaskId{"b2"}}, - {Id: parade.TaskId("b2"), Action: "leaf", TotalDependencies: intAddr(2)}, - - {Id: parade.TaskId("c0"), Action: "root", ToSignal: []parade.TaskId{"c1", "c2"}}, - {Id: parade.TaskId("c1"), Action: "dep", ToSignal: []parade.TaskId{"c3", "c4"}, TotalDependencies: intAddr(1)}, - {Id: parade.TaskId("c2"), Action: "dep", ToSignal: []parade.TaskId{"c4", "c5"}, TotalDependencies: intAddr(1)}, - {Id: parade.TaskId("c3"), Action: "dep", ToSignal: []parade.TaskId{"c5", "c6"}, TotalDependencies: intAddr(1)}, - {Id: parade.TaskId("c4"), Action: "leaf", TotalDependencies: intAddr(2)}, - {Id: parade.TaskId("c5"), Action: "leaf", TotalDependencies: intAddr(2)}, - {Id: parade.TaskId("c6"), Action: "leaf", TotalDependencies: intAddr(1)}, + {ID: parade.TaskID("a0"), Action: "root", ToSignal: []parade.TaskID{"a1", "a3"}}, + {ID: parade.TaskID("a1"), Action: "dep", ToSignal: []parade.TaskID{"a2"}, TotalDependencies: intAddr(1)}, + {ID: parade.TaskID("a2"), Action: "dep", ToSignal: []parade.TaskID{"a3"}, TotalDependencies: intAddr(1)}, + {ID: parade.TaskID("a3"), Action: "leaf", TotalDependencies: intAddr(2)}, + + {ID: parade.TaskID("b0"), Action: "root", ToSignal: []parade.TaskID{"b1"}}, + {ID: parade.TaskID("b1"), Action: "root-keep", ToSignal: []parade.TaskID{"b2"}}, + {ID: parade.TaskID("b2"), Action: "leaf", TotalDependencies: intAddr(2)}, + + {ID: parade.TaskID("c0"), Action: "root", ToSignal: []parade.TaskID{"c1", "c2"}}, + {ID: parade.TaskID("c1"), Action: "dep", ToSignal: []parade.TaskID{"c3", "c4"}, TotalDependencies: intAddr(1)}, + {ID: parade.TaskID("c2"), Action: "dep", ToSignal: []parade.TaskID{"c4", "c5"}, TotalDependencies: intAddr(1)}, + {ID: parade.TaskID("c3"), Action: "dep", ToSignal: []parade.TaskID{"c5", "c6"}, TotalDependencies: intAddr(1)}, + {ID: parade.TaskID("c4"), Action: "leaf", TotalDependencies: intAddr(2)}, + {ID: parade.TaskID("c5"), Action: "leaf", TotalDependencies: intAddr(2)}, + {ID: parade.TaskID("c6"), Action: "leaf", TotalDependencies: intAddr(1)}, }) defer cleanup() type testCase struct { title string casePrefix string - toDelete []parade.TaskId - expectedRemaining []parade.TaskId + toDelete []parade.TaskID + expectedRemaining []parade.TaskID } cases := []testCase{ - {title: "chain with extra link", casePrefix: "a", toDelete: []parade.TaskId{"a0"}}, - {title: "delete only one dep", casePrefix: "b", toDelete: []parade.TaskId{"b0"}, expectedRemaining: []parade.TaskId{"b1", "b2"}}, - {title: "treelike", casePrefix: "c", toDelete: []parade.TaskId{"c0"}}, + {title: "chain with extra link", casePrefix: "a", toDelete: []parade.TaskID{"a0"}}, + {title: "delete only one dep", casePrefix: "b", toDelete: []parade.TaskID{"b0"}, expectedRemaining: []parade.TaskID{"b1", "b2"}}, + {title: "treelike", casePrefix: "c", toDelete: []parade.TaskID{"c0"}}, } prefix := t.Name() for _, c := range cases { @@ -637,9 +637,9 @@ func TestDeleteTasks(t *testing.T) { t.Fatalf("[I] remaining ids iterator close: %s", err) } }() - gotRemaining := make([]parade.TaskId, 0, len(c.expectedRemaining)) + gotRemaining := make([]parade.TaskID, 0, len(c.expectedRemaining)) for rows.Next() { - var id parade.TaskId + var id parade.TaskID if err = rows.Scan(&id); err != nil { t.Errorf("[I] scan ID value: %s", err) } @@ -648,7 +648,7 @@ func TestDeleteTasks(t *testing.T) { sort.Sort(taskIdSlice(gotRemaining)) expectedRemaining := c.expectedRemaining if expectedRemaining == nil { - expectedRemaining = []parade.TaskId{} + expectedRemaining = []parade.TaskID{} } for i, e := range expectedRemaining { expectedRemaining[i] = w.prefixTask(e) @@ -667,24 +667,24 @@ func TestNotification(t *testing.T) { type testCase struct { title string - id parade.TaskId + id parade.TaskID status string statusCode parade.TaskStatusCodeValue } cases := []testCase{ - {"task aborted", parade.TaskId("111"), "b0rked!", parade.TASK_ABORTED}, - {"task succeeded", parade.TaskId("222"), "yay!", parade.TASK_COMPLETED}, + {"task aborted", parade.TaskID("111"), "b0rked!", parade.TaskAborted}, + {"task succeeded", parade.TaskID("222"), "yay!", parade.TaskCompleted}, } for _, c := range cases { t.Run(c.title, func(t *testing.T) { cleanup := w.insertTasks([]parade.TaskData{ - {Id: c.id, Action: "frob"}, + {ID: c.id, Action: "frob"}, }) defer cleanup() - tasks, err := w.ownTasks(parade.ActorId("foo"), 1, []string{"frob"}, nil) + tasks, err := w.ownTasks(parade.ActorID("foo"), 1, []string{"frob"}, nil) if err != nil { t.Fatalf("acquire task: %s", err) } @@ -705,11 +705,11 @@ func TestNotification(t *testing.T) { } ch := make(chan result) go func() { - status, statusCode, err := parade.WaitForTask(ctx, conn, parade.TaskId("111")) + status, statusCode, err := parade.WaitForTask(ctx, conn, parade.TaskID("111")) ch <- result{status, statusCode, err} }() - if err = w.returnTask(tasks[0].Id, tasks[0].Token, c.status, c.statusCode); err != nil { + if err = w.returnTask(tasks[0].ID, tasks[0].Token, c.status, c.statusCode); err != nil { t.Fatalf("return task %+v: %s", tasks[0], err) } @@ -727,24 +727,24 @@ func BenchmarkFanIn(b *testing.B) { w := wrapper{b, db} - id := func(n int) parade.TaskId { - return parade.TaskId(fmt.Sprintf("in:%08d", n)) + id := func(n int) parade.TaskID { + return parade.TaskID(fmt.Sprintf("in:%08d", n)) } - shardId := func(n int) parade.TaskId { - return parade.TaskId(fmt.Sprintf("done:%05d", n)) + shardId := func(n int) parade.TaskID { + return parade.TaskID(fmt.Sprintf("done:%05d", n)) } tasks := make([]parade.TaskData, 0, numTasks+*numShards+1) for i := 0; i < numTasks; i++ { - toSignal := []parade.TaskId{shardId(i % *numShards)} - tasks = append(tasks, parade.TaskData{Id: id(i), Action: "part", ToSignal: toSignal}) + toSignal := []parade.TaskID{shardId(i % *numShards)} + tasks = append(tasks, parade.TaskData{ID: id(i), Action: "part", ToSignal: toSignal}) } - toSignal := []parade.TaskId{"done"} + toSignal := []parade.TaskID{"done"} for i := 0; i < *numShards; i++ { - tasks = append(tasks, parade.TaskData{Id: shardId(i), Action: "spontaneous", ToSignal: toSignal}) + tasks = append(tasks, parade.TaskData{ID: shardId(i), Action: "spontaneous", ToSignal: toSignal}) } - tasks = append(tasks, parade.TaskData{Id: "done", Action: "done"}) + tasks = append(tasks, parade.TaskData{ID: "done", Action: "done"}) cleanup := w.insertTasks(tasks) defer cleanup() @@ -768,7 +768,7 @@ func BenchmarkFanIn(b *testing.B) { for { size := *bulk + int(rand.Int31n(int32(*bulk/5))) - *bulk/10 tasks, err := w.ownTasks( - parade.ActorId(fmt.Sprintf("worker-%d", i)), + parade.ActorID(fmt.Sprintf("worker-%d", i)), size, []string{"part", "spontaneous", "done"}, nil) if err != nil { @@ -789,7 +789,7 @@ func BenchmarkFanIn(b *testing.B) { default: resultCh[i] <- result{err: fmt.Errorf("weird action %s", task.Action)} } - w.returnTask(task.Id, task.Token, "ok", parade.TASK_COMPLETED) + w.returnTask(task.ID, task.Token, "ok", parade.TaskCompleted) } } resultCh[i] <- result{count: count, receivedDone: receivedDone} From ed9cf6a98f10dab61673c5357e81ebde0d963279 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 24 Sep 2020 17:33:57 +0300 Subject: [PATCH 014/158] Add missing dependencies to benchmark Doesn't affect runtime, but benchmarks should reflect actual usage (i.e. correctness). --- parade/ddl_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/parade/ddl_test.go b/parade/ddl_test.go index 96564880dc7..275bc0fbd16 100644 --- a/parade/ddl_test.go +++ b/parade/ddl_test.go @@ -735,16 +735,19 @@ func BenchmarkFanIn(b *testing.B) { } tasks := make([]parade.TaskData, 0, numTasks+*numShards+1) + totalShardDependencies := make([]int, *numShards) for i := 0; i < numTasks; i++ { - toSignal := []parade.TaskID{shardId(i % *numShards)} + shard := (i / 100) % *numShards + toSignal := []parade.TaskID{shardId(shard)} tasks = append(tasks, parade.TaskData{ID: id(i), Action: "part", ToSignal: toSignal}) + totalShardDependencies[shard]++ } toSignal := []parade.TaskID{"done"} for i := 0; i < *numShards; i++ { - tasks = append(tasks, parade.TaskData{ID: shardId(i), Action: "spontaneous", ToSignal: toSignal}) + tasks = append(tasks, parade.TaskData{ID: shardId(i), Action: "spontaneous", ToSignal: toSignal, TotalDependencies: &totalShardDependencies[i]}) } - tasks = append(tasks, parade.TaskData{ID: "done", Action: "done"}) + tasks = append(tasks, parade.TaskData{ID: "done", Action: "done", TotalDependencies: numShards}) cleanup := w.insertTasks(tasks) defer cleanup() From a6d2d6e159424e415546f249f9161ac5f803962d Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 10 Sep 2020 14:10:27 +0300 Subject: [PATCH 015/158] Requirements doc for task (queue) management API It should support https://docs.google.com/document/d/1jOnCWLFDXLebp5qbF0G6lXbQ2znZDT9i1WJUxYo3YsY/edit#heading=h.upjufbik35xu --- design/task-management-api.md | 346 ++++++++++++++++++++++++++++++++++ 1 file changed, 346 insertions(+) create mode 100644 design/task-management-api.md diff --git a/design/task-management-api.md b/design/task-management-api.md new file mode 100644 index 00000000000..53b1ce83e30 --- /dev/null +++ b/design/task-management-api.md @@ -0,0 +1,346 @@ +# Task management API: requirements + +Multiple subsystems require reliable long-lived distributed operations. We shall support them +with a task queue subsystem. Concepts of the API are intended to parallel concepts in the +[Celery][celery-api] and/or [Machinery][machinery-api] APIs. We do not use either of them to +reduce the number of required dependencies. A future version may support external queues to +achieve better performance or robustness, at the price of increased ops or cost. These will +likely be queues such as Kafka or SQS rather than full Celery or Machinery. + +In practice this generally means providing APIs similar to those of Machinery (which is more +Go-like than Celery) for constructing task flows and for registering workers. + +In particular: +1. We provide similar concepts for building task flows as do existing + task queues. +1. We use similar terminology. +1. We do *not* require the entire API of an existing task queue. +1. We do *not* use the verbs or API calls of an existing task queue. + +This API definition comes with implementation sketches for how we to use these APIs to implement +the branch export and concurrent dedupe "user" stories. + +## API + +### Concepts + +#### Tasks + +A task is the basic atom of task management. It represents a single unit of work to +perform, and can succeed or fail. Tasks may be retried on failure, so _executing a task +must be idempotent_. + +Tasks include these attributes: +- `Id`: a unique identifier for the task. Use a known-unique substring in the identifier + (e.g. a UUID or [nanoid][nanoid]) to avoid collisions, or a well-known identifier to ensure + only one task of a type can exist. +- `Action`: the type of action to perform for this task. Workers pick tasks to perform and the + actions to perform on them according to this field. +- `Body`: a description of parameters for this text. E.g. in a "copy file" task the body might + specify source key, ETag and destination key. +- `StatusCode`: the internally-used state of the task in its lifecycle, see [life of a + task](#life-of-a-task) below. +- `Status`: a textual description of the current status, generated by application code. +- `ToSignal`: an array of task IDs that cannot start before this task ends, and will therefore + be signalled when it does. +- `NumSignals`: number of tasks that must signal this task before it can be performed. + Initially equal to the number of tasks on which it appears in the `ToSignal` array. +- `MaxTries`: the maximal number of times to try to execute the task if it keeps being returned + to state `pending`. +- `ActorId`: the unique string identifier chosen by a worker which is currently performing the + task. Useful for monitoring. +- `ActionDeadline`: a time by which the worker currently performing the task has committed to + finish it. + +Tasks provide these additional facilities (and include fields not listed here to support them): +- **Retries**. A task repeatedly placed back into state `pending` will not be retried again. +- **Dependencies**. Every task can only occur after some other tasks + are done. + +A task is performed by a single worker; if that worker does not finish processing it and an +action deadline was set, it will be given to another worker. + +#### Life of a task + +``` + | + | InsertTasks + | + | + +-----v-----+ + +-->| pending | + | +-----+-----+ + ReturnTask| | + (to | | OwnTasks + pending) | | + | +-----v-----+ + +---+in-progress| + +-----------+ + | + +------------+------------+ ReturnTask + | | + +----v---+ +----v----+ + |aborted | |completed| + +--------+ +---------+ +``` + +A task arrives complete with dependencies, a count of the number of preceding tasks that +must "signal" it before it may be executed. When the task completes it signals all of its +dependent tasks. + +Tasks are inserted in state `pending`. Multiple workers call `OwnTasks` to get tasks. A +task may only be claimed by a call to `OwnTasks` if: +* Its action is specified as acceptable to that call. +* All dependencies of the task have been settled: all tasks specifying its task ID in their + `ToSignal` list have completed. +* The task is not claimed by another worker. Either: + - the task is in state `pending`, or + - the task is in state `in-progress`, but its `ActionDeadline` has elapsed (see "ownership + expiry", below). + +`OwnTasks` returns task IDs and a "performance token" for this performance of the task. +Both ID and token must be provided to _return_ the task from ownership. (The +performance token is used to resolve conflicts during "ownership expiry", below.) + +Once a worker owns a task, it performs it. It can decide to return the task to the task +queue and _complete_, _abort_ or _retry_ it by calling `ReturnTask`. Once completed, all +dependents of the task are signalled, causing any dependent that has received all its +required signals to be eligible for return by `OwnTasks`. + +#### Ownership expiry + +Processes can fail. To allow restarting a failed process calls to `OwnTasks` may specify a +deadline. The lease granted to an owning worker will expire after this deadline, allowing +another worker to own the task. Only the _last_ worker granted ownership may call +`ReturnTask` on the task. A delayed worker should still return the task, in case the task +has not yet been granted to another worker. + +#### Basic API + +This is a sample API. All details are fully subject to change, of course! Note that most +`func`s are probably going to be methods on some object, which we assume will carry DB +connection information etc. + +##### TaskData + +```go +type TaskId string + +type ActorId string + +type PerformanceToken pgtype.UUID // With added stringifiers + +// TaskData describes a task to perform. +type TaskData struct { + Id TaskId // Unique ID of task + Action string // Action to perform, used to fetch in OwnTasks + Body *string // Body containing details of action, used by clients only + Status *string // Human- and client-readable status + StatusCode TaskStatusCodeValue // Status code, used by task queue + NumTries int // Number of times this task has moved from started to in-progress + MaxTries *int // Maximal number of times to try this task + // Dependencies might be stored or handled differently, depending on what gives reasonable + // performance. + TotalDependencies *int // Number of tasks which must signal before this task can be owned + ToSignal []TaskId // Tasks to signal after this task is done + ActorId ActorId // ID of current actor performing this task (if in-progress) + ActionDeadline *time.Time // Deadline for current actor to finish performing this task (if in-progress) + PerformanceToken *PerformanceToken // Token to allow ReturnTask + PostResult bool // If set allow waiting for this task +} +``` + +##### InsertTasks + +```go +// InsertTasks atomically adds all tasks to the queue: if any task cannot be added (typically because +// it re-uses an existing key) then no tasks will be added. If PostResult was set on any tasks then +// they can be waited upon after InsertTasks returns. +func InsertTasks(ctx context.Context, source *taskDataIterator) error +``` + +A variant allows inserting a task _by force_ + +```go +// ReplaceTasks atomically adds all tasks to the queue. If a task not yet in-process with the same +// ID already exists then _replace it_ as though it were atomically aborted before this insert. If +// PostResult was set on any tasks then they can be waited upon after InsertTasks returns. +func ReplaceTasks(ctx context.Context, source *taskDataIterator) error +``` + +##### OwnTasks + +```go +// OwnedTaskData is a task returned from OwnedTask +type OwnedTaskData struct { + Id TaskId `db:"task_id"` + Token PerformanceToken `db:"token"` + Action string + Body *string +} + +// OwnTasks owns for actor and returns up to maxTasks tasks for performing any of actions, setting +// the lifetime of each returned owned task to maxDuration. +func OwnTasks(ctx context.Context, actor ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]OwnedTaskData, error) +``` + +##### ReturnTask + +```go +// ReturnTask returns taskId which was acquired using the specified performanceToken, giving it +// resultStatus and resultStatusCode. It returns InvalidTokenError if the performanceToken is +// invalid; this happens when ReturnTask is called after its deadline expires, or due to a logic +// error. If resultStatusCode is ABORT, abort all succeeding tasks. +func ReturnTask(ctx context.Context, taskId TaskId, token PerformanceToken, resultStatus string, resultStatusCode TaskStatusCodeValue) error +``` + +##### WaitForTask + +```go +// WaitForTask waits for taskId (which must have been started wth PostResult) to finish and +// returns it. It returns immediately the task has already finished. +func WaitForTask(ctx context.Context, taskId TaskId) (TaskData, error) +``` + +##### AddDependencies + +```go +// AddDependencies atomically adds dependencies: for every dependency, task Run must run after +// task After. +type TaskDependency interface { + After, Run TaskID +} + +func AddDependencies(ctx context.Context, dependencies []TaskDependency) error +``` + +##### Monitoring + +Also some routine as a basis for monitoring: it gives the number and status of each of a number +of actions and task IDs, possibly with some filtering. The exact nature depends on the +implementation chosen, however we _do_ require its availability. + +#### Differences from the Celery model + +This task management model is at a somewhat lower level than the Celery model: +* **Workers explicitly loop to own and handle tasks.** Emulate the Celery model by writing an + explicit function that takes "handlers" for the different actions. We may well do this. + + _Why change?_ Writing the loop is rarely an important factor. Flexibility in specifying the + action parameter of OwnTasks allows variable actions, for instance handling particular action + types only when a particular environmental condition is met (say, system load), or + incorporating side data in action names (and not only in task IDs). Flexibility in timing + allows a per-process rate limiters for particular actions: filter out expensive actions when + their token bucket runs out. Flexibility in specifying _when_ OwnTasks is called allows + controlling load on the queuing component. Flexibility in specifying action dispatch allows + controlling _how many goroutines_ run particular actions concurrently. All this without + having to add configurable structures to the task manager. +* **No explicit graph structures.** Emulate these using the section [Structures][#structures] + below. +* **No implicit argument serialization.** Rather than flatten an "args" array we pass a stringy + "body". In practice "args" anyway require serialization; incorporating them into the queue + requires either configuring the queue with relevant serialization or allowing only primitives. + Celery selects the first, Machinery the second. In both cases a Go client library must place + most of the serialization burden on application code -- simplest is to do so explicitly. + +#### Structures + +We can implement what Celery calls _Chains_, _Chords_ and _Groups_ using the basic API: these +are just ways to describe structured dependencies which form [parallel/serial +networks][parallel-series] networks. Drawings appear below. + +##### Chains + +``` + +----------+ + | task 1 | + +----------+ + | + | + +----v-----+ + | task 2 | + +----+-----+ + | + | + +----v-----+ + | task 3 | + +----------+ +``` + +##### Chords (and Groups) + +``` + +--------+ + +--->| task1 +-----+ + | +--------+ | + | | + | | + | +--------+ | + +--->| task2 +-----+ + | +--------+ | + +-------+ | | +-------------+ + | prev |-----+ +---->|(spontaneous)| + +-------+ | +--------+ | +-------------+ + +--->| task3 +-----+ + | +--------+ | + | | + | | + | +--------+ | + +--->| task4 +-----+ + +--------+ +``` + +## Implementing "user" stories with the API + +### Branch export + +1. Under the merge/commit lock for the branch: + 1. Insert a task with ID `start-export-{branch}` to start generating export tasks and a task + `done-export-{branch}` that depends on it. + 1. If insertion failed, _replace_ the task with ID `next-export-{branch}` with a task to + export _this_ comit ID. And add a dependency on `done-export-{branch}` (which may fail if + that task has completed). +1. To handle `start-export-{branch}`: + 1. Generate a task to copy or delete each file object (this is an opportunity to batch + multiple file objects if performance doesn't match. `done-export-{branch}` depends on + each of these tasks (and cannot have been deleted since `start-export-{branch}` has not + yet been returned). For every prefix for which such an object is configured, add a task + to generate its `.../_SUCCESS` object on S3, dependent on all the objects under that + prefix (or, to handle objects in sub-prefixes, on just the `_SUCCESS` of that sub-prefix). + 2. Add a task to generate manifests, dependent on `done-export-{branch}`. + 2. Return `start-export-{branch}` as completed. +1. To handle a copy or delete operation, perform it. +1. To handle `done-export-{branch}`: just return it, it can be spontaneous (if the task queue + supports that). +1. To handle `next-export-{branch}`: create `start-export-{branch}` (the previous one must have + ended), and return the task. + +`next-export-{branch}` is used to serialize branch exports, achieving the requirement for single +concurrent export per branch. Per-prefix `_SUCCESS` objects are generated on time due to their +dependencies. (As an option, we could set priorities and return tasks in priority order from +`OwnTasks`, to allow `_SUCCESS` objects to be created before copying other objects.) Retries +are handled by setting multiple per-copy attempts. + +### Concurrent dedupe batching + +1. For every created object, create a dedupe task. +2. To perform a dedupe task, acquire its checksum (from computation or ETag) and set a task to + write a dedupe record. +3. Acquire tasks _many_ dedupe records at a time, and process those in batches. + +## References + +### Well-known task queues +1. [Celery][celery-api] +2. [Machinery][machinery-api] + +### Modules +1. [nanoid][nanoid] + +### Graphs +1. [Parallel series][parallel-series] + +[celery-api]: https://docs.celeryproject.org/en/stable/userguide/index.html +[machinery-api]: https://github.com/RichardKnop/machinery#readme +[nanoid]: https://www.npmjs.com/package/nanoid +[parallel-series]: https://www.cpp.edu/~elab/projects/project_05/index.html From 156c271b55c26322d0fd10a5350918fda2883bb6 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Mon, 21 Sep 2020 18:35:11 +0300 Subject: [PATCH 016/158] [CR]: more readable docs 1. Explain action and body fields explicitly. 1. Explain performance token, PostResult. 1. Add ExtendTasksOwnership and consequent explanation for how to choose action lifetimes. 1. Clean up branch export explanations. --- design/task-management-api.md | 69 +++++++++++++++++++++++++++-------- 1 file changed, 54 insertions(+), 15 deletions(-) diff --git a/design/task-management-api.md b/design/task-management-api.md index 53b1ce83e30..ede628bbd92 100644 --- a/design/task-management-api.md +++ b/design/task-management-api.md @@ -30,6 +30,14 @@ A task is the basic atom of task management. It represents a single unit of wor perform, and can succeed or fail. Tasks may be retried on failure, so _executing a task must be idempotent_. +Tasks connect to application code via an action and a body. The _action_ identifies the +operation to complete these task. Examples of actions can include "copy a file", "delete a +path", "report success". It is essentially the _name_ of a procedure to perform at some future +time. The _body_ of a task gives information necessary to configure the specific task. +Examples of bodies can include "source file path X, destination path Z" (for a copy task), "path +Z" (for a delete task), or "date started, number of objects and a message" (for a report task). +It essentially holds the _parameters_ the action uses to perform the task. + Tasks include these attributes: - `Id`: a unique identifier for the task. Use a known-unique substring in the identifier (e.g. a UUID or [nanoid][nanoid]) to avoid collisions, or a well-known identifier to ensure @@ -98,9 +106,14 @@ task may only be claimed by a call to `OwnTasks` if: - the task is in state `in-progress`, but its `ActionDeadline` has elapsed (see "ownership expiry", below). -`OwnTasks` returns task IDs and a "performance token" for this performance of the task. -Both ID and token must be provided to _return_ the task from ownership. (The -performance token is used to resolve conflicts during "ownership expiry", below.) +`OwnTasks` returns task IDs and for each returned task a "performance token" for this +performance of it. Both ID and token must be provided to _return_ the task from ownership. +(The performance token is used to resolve conflicts during "ownership expiry", below.) + +A typical use is that a worker loop repeatedly calls `OwnTasks` on one or more actions, and +dispatches each to a separate function. The application controls concurrency by setting the +number of concurrent worker loops. For instance, it might set 20 worker loops to perform "copy" +and "delete" tasks and a single worker loop to perform "report to DataDog". Once a worker owns a task, it performs it. It can decide to return the task to the task queue and _complete_, _abort_ or _retry_ it by calling `ReturnTask`. Once completed, all @@ -146,7 +159,7 @@ type TaskData struct { ActorId ActorId // ID of current actor performing this task (if in-progress) ActionDeadline *time.Time // Deadline for current actor to finish performing this task (if in-progress) PerformanceToken *PerformanceToken // Token to allow ReturnTask - PostResult bool // If set allow waiting for this task + PostResult bool // If set allow waiting for this task using WaitForTask } ``` @@ -164,7 +177,8 @@ A variant allows inserting a task _by force_ ```go // ReplaceTasks atomically adds all tasks to the queue. If a task not yet in-process with the same // ID already exists then _replace it_ as though it were atomically aborted before this insert. If -// PostResult was set on any tasks then they can be waited upon after InsertTasks returns. +// PostResult was set on any tasks then they can be waited upon after InsertTasks returns. Tasks that +// are in process cannot be replaced. func ReplaceTasks(ctx context.Context, source *taskDataIterator) error ``` @@ -184,6 +198,20 @@ type OwnedTaskData struct { func OwnTasks(ctx context.Context, actor ActorId, maxTasks int, actions []string, maxDuration *time.Duration) ([]OwnedTaskData, error) ``` +`maxDuration` should be a time during which no other worker can access the task. It does not +have to be the time to _complete_ the task: workers can periodically call `ExtendTasksOwnership` +to extend the lifetime. + +##### ExtendTasksOwnership + +```go +// ExtendTasksOwnership extends the current action lifetime for each of task by another maxDuration, +// if that task is still owned by this actor with that performance token. It returns true for each +// task if it is still owned, or false if ownership extension failed because the task is no longer +// owned. +func ExtendTasksOwnership(ctx context.Context, actor ActorId, toExtend []OwnedTaskData, maxDuration time.Duration) ([]bool, error) +``` + ##### ReturnTask ```go @@ -294,12 +322,25 @@ networks][parallel-series] networks. Drawings appear below. ### Branch export -1. Under the merge/commit lock for the branch: - 1. Insert a task with ID `start-export-{branch}` to start generating export tasks and a task - `done-export-{branch}` that depends on it. - 1. If insertion failed, _replace_ the task with ID `next-export-{branch}` with a task to - export _this_ comit ID. And add a dependency on `done-export-{branch}` (which may fail if - that task has completed). +Each branch uses separate tasks arranged in a cycle. These names are task IDs with a matching +action name: e.g. the action name for `next-export-{branch}` is `next-export`. (The branch name +also appears in the body.) + +* `next-export-{branch}` is to start the next export if an export is already underway, by + creating a task `start-export-{branch}`. +* `start-export-{branch}` handles the actual logic of generating the copy tasks in a network, + leading eventually to the task `done-export-{branch}` (which is also generates) becoming + available. +* `done-export-{branch}` is there so that `next-export-{branch}` can depend on it -- and not + start before the current export operation terminates. (If it does not exist, + `next-export-{branch}` has no dependency blocking it and can run immediately.) + +The actual steps: +1. Under the merge/commit lock for the branch: _replace_ the task with ID `next-export-{branch}` + with a task to export _this_ commit ID. And add a dependency on `done-export-{branch}` + (which may fail if that task has completed; that is safe). +1. To handle `next-export-{branch}`: create `start-export-{branch}` (the previous one must have + ended), and return the task. 1. To handle `start-export-{branch}`: 1. Generate a task to copy or delete each file object (this is an opportunity to batch multiple file objects if performance doesn't match. `done-export-{branch}` depends on @@ -307,13 +348,11 @@ networks][parallel-series] networks. Drawings appear below. yet been returned). For every prefix for which such an object is configured, add a task to generate its `.../_SUCCESS` object on S3, dependent on all the objects under that prefix (or, to handle objects in sub-prefixes, on just the `_SUCCESS` of that sub-prefix). - 2. Add a task to generate manifests, dependent on `done-export-{branch}`. - 2. Return `start-export-{branch}` as completed. + 1. Add a task to generate manifests, dependent on `done-export-{branch}`. + 1. Return `start-export-{branch}` as completed. 1. To handle a copy or delete operation, perform it. 1. To handle `done-export-{branch}`: just return it, it can be spontaneous (if the task queue supports that). -1. To handle `next-export-{branch}`: create `start-export-{branch}` (the previous one must have - ended), and return the task. `next-export-{branch}` is used to serialize branch exports, achieving the requirement for single concurrent export per branch. Per-prefix `_SUCCESS` objects are generated on time due to their From 7715accf985d330f97d216074f4ea9c2ce2c7883 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Tue, 29 Sep 2020 17:11:53 +0300 Subject: [PATCH 017/158] [CR] Remove concurrent dedupe story Drivers for concurrent dedupe are somewhat unclear, we may do better by just removing it. --- design/task-management-api.md | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/design/task-management-api.md b/design/task-management-api.md index ede628bbd92..2d859f2a321 100644 --- a/design/task-management-api.md +++ b/design/task-management-api.md @@ -18,7 +18,8 @@ In particular: 1. We do *not* use the verbs or API calls of an existing task queue. This API definition comes with implementation sketches for how we to use these APIs to implement -the branch export and concurrent dedupe "user" stories. +the branch export story. We shall also (re-)implement retention expiry to use these APIs for +better ops; that story is considerably easier to imagine. ## API @@ -360,13 +361,6 @@ dependencies. (As an option, we could set priorities and return tasks in priori `OwnTasks`, to allow `_SUCCESS` objects to be created before copying other objects.) Retries are handled by setting multiple per-copy attempts. -### Concurrent dedupe batching - -1. For every created object, create a dedupe task. -2. To perform a dedupe task, acquire its checksum (from computation or ETag) and set a task to - write a dedupe record. -3. Acquire tasks _many_ dedupe records at a time, and process those in batches. - ## References ### Well-known task queues From 60762884e4da0bc5e714874d195f7ffbedd0ca98 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Sun, 4 Oct 2020 11:06:52 +0300 Subject: [PATCH 018/158] [CR] More "Id"->"ID"; fix "delete only one dep" test --- parade/ddl_test.go | 56 +++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/parade/ddl_test.go b/parade/ddl_test.go index 275bc0fbd16..a94076f3d7c 100644 --- a/parade/ddl_test.go +++ b/parade/ddl_test.go @@ -40,12 +40,12 @@ var ( numShards = flag.Int("num-shards", 400, "Number of intermediate fan-in shards") ) -// taskIdSlice attaches the methods of sort.Interface to []TaskId. -type taskIdSlice []parade.TaskID +// taskIDSlice attaches the methods of sort.Interface to []TaskID. +type taskIDSlice []parade.TaskID -func (p taskIdSlice) Len() int { return len(p) } -func (p taskIdSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p taskIdSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p taskIDSlice) Len() int { return len(p) } +func (p taskIDSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p taskIDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // runDBInstance starts a test Postgres server inside container pool, and returns a connection // URI and a closer function. @@ -192,9 +192,9 @@ func (w wrapper) insertTasks(tasks []parade.TaskData) func() { } func (w wrapper) deleteTasks(ids []parade.TaskID) error { - prefixedIds := make([]parade.TaskID, len(ids)) + prefixedIDs := make([]parade.TaskID, len(ids)) for i := 0; i < len(ids); i++ { - prefixedIds[i] = w.prefixTask(ids[i]) + prefixedIDs[i] = w.prefixTask(ids[i]) } ctx := context.Background() conn, err := pgx.Connect(ctx, databaseURI) @@ -211,7 +211,7 @@ func (w wrapper) deleteTasks(ids []parade.TaskID) error { } }() - if err = parade.DeleteTasks(ctx, tx, prefixedIds); err != nil { + if err = parade.DeleteTasks(ctx, tx, prefixedIDs); err != nil { return err } @@ -222,16 +222,16 @@ func (w wrapper) deleteTasks(ids []parade.TaskID) error { return nil } -func (w wrapper) returnTask(taskId parade.TaskID, token parade.PerformanceToken, resultStatus string, resultStatusCode parade.TaskStatusCodeValue) error { - return parade.ReturnTask(w.db, w.prefixTask(taskId), token, resultStatus, resultStatusCode) +func (w wrapper) returnTask(taskID parade.TaskID, token parade.PerformanceToken, resultStatus string, resultStatusCode parade.TaskStatusCodeValue) error { + return parade.ReturnTask(w.db, w.prefixTask(taskID), token, resultStatus, resultStatusCode) } -func (w wrapper) ownTasks(actorId parade.ActorID, maxTasks int, actions []string, maxDuration *time.Duration) ([]parade.OwnedTaskData, error) { +func (w wrapper) ownTasks(actorID parade.ActorID, maxTasks int, actions []string, maxDuration *time.Duration) ([]parade.OwnedTaskData, error) { prefixedActions := make([]string, len(actions)) for i, action := range actions { prefixedActions[i] = w.prefix(action) } - tasks, err := parade.OwnTasks(w.db, actorId, maxTasks, prefixedActions, maxDuration) + tasks, err := parade.OwnTasks(w.db, actorID, maxTasks, prefixedActions, maxDuration) if tasks != nil { for i := 0; i < len(tasks); i++ { task := &tasks[i] @@ -356,12 +356,12 @@ func TestOwn(t *testing.T) { } gotTasks = append(gotTasks, tasks...) - gotIds := make([]parade.TaskID, 0, len(gotTasks)) + gotIDs := make([]parade.TaskID, 0, len(gotTasks)) for _, got := range gotTasks { - gotIds = append(gotIds, got.ID) + gotIDs = append(gotIDs, got.ID) } - sort.Sort(taskIdSlice(gotIds)) - if diffs := deep.Equal([]parade.TaskID{"111", "123", "222"}, gotIds); diffs != nil { + sort.Sort(taskIDSlice(gotIDs)) + if diffs := deep.Equal([]parade.TaskID{"111", "123", "222"}, gotIDs); diffs != nil { t.Errorf("expected other task IDs: %s", diffs) } } @@ -444,22 +444,22 @@ func TestReturnTask_DirectlyAndRetry(t *testing.T) { t.Fatalf("acquire all tasks: %s", err) } - taskById := make(map[parade.TaskID]*parade.OwnedTaskData, len(tasks)) + taskByID := make(map[parade.TaskID]*parade.OwnedTaskData, len(tasks)) for index := range tasks { - taskById[tasks[index].ID] = &tasks[index] + taskByID[tasks[index].ID] = &tasks[index] } - if err = w.returnTask(taskById[parade.TaskID("111")].ID, taskById[parade.TaskID("111")].Token, "done", parade.TaskCompleted); err != nil { + if err = w.returnTask(taskByID[parade.TaskID("111")].ID, taskByID[parade.TaskID("111")].Token, "done", parade.TaskCompleted); err != nil { t.Errorf("return task 111: %s", err) } - if err = w.returnTask(taskById[parade.TaskID("111")].ID, taskById[parade.TaskID("111")].Token, "done", parade.TaskCompleted); !errors.Is(err, parade.ErrInvalidToken) { + if err = w.returnTask(taskByID[parade.TaskID("111")].ID, taskByID[parade.TaskID("111")].Token, "done", parade.TaskCompleted); !errors.Is(err, parade.ErrInvalidToken) { t.Errorf("expected second attempt to return task 111 to fail with InvalidTokenError, got %s", err) } // Now attempt to return a task to in-progress state. - if err = w.returnTask(taskById[parade.TaskID("123")].ID, taskById[parade.TaskID("123")].Token, "try-again", parade.TaskPending); err != nil { - t.Errorf("return task 123 (%+v) for another round: %s", taskById[parade.TaskID("123")], err) + if err = w.returnTask(taskByID[parade.TaskID("123")].ID, taskByID[parade.TaskID("123")].Token, "try-again", parade.TaskPending); err != nil { + t.Errorf("return task 123 (%+v) for another round: %s", taskByID[parade.TaskID("123")], err) } moreTasks, err := w.ownTasks(parade.ActorID("foo"), 4, []string{"frob", "broz"}, nil) if err != nil { @@ -594,7 +594,7 @@ func TestDeleteTasks(t *testing.T) { {ID: parade.TaskID("a2"), Action: "dep", ToSignal: []parade.TaskID{"a3"}, TotalDependencies: intAddr(1)}, {ID: parade.TaskID("a3"), Action: "leaf", TotalDependencies: intAddr(2)}, - {ID: parade.TaskID("b0"), Action: "root", ToSignal: []parade.TaskID{"b1"}}, + {ID: parade.TaskID("b0"), Action: "root", ToSignal: []parade.TaskID{"b2"}}, {ID: parade.TaskID("b1"), Action: "root-keep", ToSignal: []parade.TaskID{"b2"}}, {ID: parade.TaskID("b2"), Action: "leaf", TotalDependencies: intAddr(2)}, @@ -645,7 +645,7 @@ func TestDeleteTasks(t *testing.T) { } gotRemaining = append(gotRemaining, id) } - sort.Sort(taskIdSlice(gotRemaining)) + sort.Sort(taskIDSlice(gotRemaining)) expectedRemaining := c.expectedRemaining if expectedRemaining == nil { expectedRemaining = []parade.TaskID{} @@ -653,7 +653,7 @@ func TestDeleteTasks(t *testing.T) { for i, e := range expectedRemaining { expectedRemaining[i] = w.prefixTask(e) } - sort.Sort(taskIdSlice(expectedRemaining)) + sort.Sort(taskIDSlice(expectedRemaining)) if diffs := deep.Equal(expectedRemaining, gotRemaining); diffs != nil { t.Errorf("left with other IDs than expected: %s", diffs) } @@ -730,7 +730,7 @@ func BenchmarkFanIn(b *testing.B) { id := func(n int) parade.TaskID { return parade.TaskID(fmt.Sprintf("in:%08d", n)) } - shardId := func(n int) parade.TaskID { + shardID := func(n int) parade.TaskID { return parade.TaskID(fmt.Sprintf("done:%05d", n)) } @@ -738,14 +738,14 @@ func BenchmarkFanIn(b *testing.B) { totalShardDependencies := make([]int, *numShards) for i := 0; i < numTasks; i++ { shard := (i / 100) % *numShards - toSignal := []parade.TaskID{shardId(shard)} + toSignal := []parade.TaskID{shardID(shard)} tasks = append(tasks, parade.TaskData{ID: id(i), Action: "part", ToSignal: toSignal}) totalShardDependencies[shard]++ } toSignal := []parade.TaskID{"done"} for i := 0; i < *numShards; i++ { - tasks = append(tasks, parade.TaskData{ID: shardId(i), Action: "spontaneous", ToSignal: toSignal, TotalDependencies: &totalShardDependencies[i]}) + tasks = append(tasks, parade.TaskData{ID: shardID(i), Action: "spontaneous", ToSignal: toSignal, TotalDependencies: &totalShardDependencies[i]}) } tasks = append(tasks, parade.TaskData{ID: "done", Action: "done", TotalDependencies: numShards}) cleanup := w.insertTasks(tasks) From 589b4f0499a3af6922e31da2b283372b54033167 Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Thu, 1 Oct 2020 19:40:35 +0300 Subject: [PATCH 019/158] Use pgx/stdlib to access pgx Conns directly from the DB --- parade/ddl.go | 4 ++-- parade/ddl_test.go | 28 +++++++++++++++++++--------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/parade/ddl.go b/parade/ddl.go index 61c65c2dff0..03d3da4b528 100644 --- a/parade/ddl.go +++ b/parade/ddl.go @@ -156,8 +156,8 @@ var TaskDataColumnNames = []string{ var tasksTable = pgx.Identifier{"tasks"} // InsertTasks adds multiple tasks efficiently. -func InsertTasks(ctx context.Context, pgConn *pgx.Conn, source pgx.CopyFromSource) error { - _, err := pgConn.CopyFrom(ctx, tasksTable, TaskDataColumnNames, source) +func InsertTasks(ctx context.Context, conn *pgx.Conn, source pgx.CopyFromSource) error { + _, err := conn.CopyFrom(ctx, tasksTable, TaskDataColumnNames, source) return err } diff --git a/parade/ddl_test.go b/parade/ddl_test.go index a94076f3d7c..107fb15fdb5 100644 --- a/parade/ddl_test.go +++ b/parade/ddl_test.go @@ -15,7 +15,7 @@ import ( "time" "github.com/go-test/deep" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" _ "github.com/jackc/pgx/v4/stdlib" "github.com/treeverse/lakefs/parade" @@ -156,11 +156,17 @@ func (w wrapper) stripActor(actor parade.TaskID) parade.ActorID { func (w wrapper) insertTasks(tasks []parade.TaskData) func() { w.t.Helper() ctx := context.Background() - conn, err := pgx.Connect(ctx, databaseURI) + sqlConn, err := w.db.Conn(ctx) if err != nil { - w.t.Fatalf("pgx.Connect: %s", err) + w.t.Fatalf("sqlx.DB.Conn: %s", err) } - defer conn.Close(ctx) + defer sqlConn.Close() + + conn, err := stdlib.AcquireConn(w.db.DB) + if err != nil { + w.t.Fatalf("stdlib.AcquireConn: %s", err) + } + defer stdlib.ReleaseConn(w.db.DB, conn) prefixedTasks := make([]parade.TaskData, len(tasks)) for i := 0; i < len(tasks); i++ { @@ -185,9 +191,11 @@ func (w wrapper) insertTasks(tasks []parade.TaskData) func() { // Create cleanup callback. Compute the ids now, tasks may change later. ids := make([]parade.TaskID, 0, len(tasks)) + for _, task := range tasks { ids = append(ids, task.ID) } + return func() { w.deleteTasks(ids) } } @@ -197,10 +205,12 @@ func (w wrapper) deleteTasks(ids []parade.TaskID) error { prefixedIDs[i] = w.prefixTask(ids[i]) } ctx := context.Background() - conn, err := pgx.Connect(ctx, databaseURI) + conn, err := stdlib.AcquireConn(w.db.DB) if err != nil { - return fmt.Errorf("connect to DB: %w", err) + w.t.Fatalf("stdlib.AcquireConn: %s", err) } + defer stdlib.ReleaseConn(w.db.DB, conn) + tx, err := conn.Begin(ctx) if err != nil { return fmt.Errorf("BEGIN: %w", err) @@ -692,11 +702,11 @@ func TestNotification(t *testing.T) { t.Fatalf("expected to own single task but got %+v", tasks) } - conn, err := pgx.Connect(ctx, databaseURI) + conn, err := stdlib.AcquireConn(w.db.DB) if err != nil { - t.Fatalf("pgx.Connect: %s", err) + w.t.Fatalf("stdlib.AcquireConn: %s", err) } - defer conn.Close(ctx) + defer stdlib.ReleaseConn(w.db.DB, conn) type result struct { status string From 346057a3d40fbc3d0ce29c6c69bb9aa284c1d74c Mon Sep 17 00:00:00 2001 From: "Ariel Shaqed (Scolnicov)" Date: Sun, 4 Oct 2020 15:40:00 +0300 Subject: [PATCH 020/158] [CR] Fix TestNotification to work, then fix WaitForTask `TestNotification` was comparing structs with (just) internal fields by using `deep.Equal`, which *ignores* them. So it was passing. Fixing it exposed that... `WaitForTask` was never signalling because of a dumb PGSQL error: cannot drop the return value from `SELECT`, need to use `PERFORM` instead. Uggh. --- parade/ddl.go | 21 ++++++++++++++------- parade/ddl.sql | 2 +- parade/ddl_test.go | 33 ++++++++++++++++++++++----------- 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/parade/ddl.go b/parade/ddl.go index 03d3da4b528..cc4233b2a15 100644 --- a/parade/ddl.go +++ b/parade/ddl.go @@ -217,7 +217,10 @@ func ReturnTask(conn *sqlx.DB, taskID TaskID, token PerformanceToken, resultStat return nil } -var ErrBadStatus = errors.New("bad status for task") +var ( + ErrBadStatus = errors.New("bad status for task") + ErrNoFinishChannel = errors.New("task has no Finishchannel") +) // WaitForTask blocks until taskId ends, and returns its result status and status code. It // needs a pgx.Conn -- *not* a sqlx.Conn -- because it depends on PostgreSQL specific features. @@ -225,14 +228,17 @@ func WaitForTask(ctx context.Context, conn *pgx.Conn, taskID TaskID) (resultStat row := conn.QueryRow(ctx, `SELECT finish_channel, status_code FROM tasks WHERE id=$1`, taskID) var ( finishChannel string - statusCode TaskStatusCodeValue - status string + statusCode TaskStatusCodeValue = TaskInvalid + status string = "invalid" ) if err = row.Scan(&finishChannel, &statusCode); err != nil { - return "", TaskInvalid, fmt.Errorf("check task %s to listen: %w", taskID, err) + return status, statusCode, fmt.Errorf("check task %s to listen: %w", taskID, err) } if statusCode != TaskInProgress && statusCode != TaskPending { - return "", statusCode, fmt.Errorf("task %s already in status %s: %w", taskID, statusCode, ErrBadStatus) + return status, statusCode, fmt.Errorf("task %s already in status %s: %w", taskID, statusCode, ErrBadStatus) + } + if finishChannel == "" { + return status, statusCode, fmt.Errorf("cannot wait for task %s: %w", taskID, ErrNoFinishChannel) } if _, err = conn.Exec(ctx, "LISTEN "+pgx.Identifier{finishChannel}.Sanitize()); err != nil { @@ -245,9 +251,10 @@ func WaitForTask(ctx context.Context, conn *pgx.Conn, taskID TaskID) (resultStat } row = conn.QueryRow(ctx, `SELECT status, status_code FROM tasks WHERE id=$1`, taskID) - status = "" - statusCode = TaskInvalid err = row.Scan(&status, &statusCode) + if err != nil { + err = fmt.Errorf("query status for task %s: %w", taskID, err) + } return status, statusCode, err } diff --git a/parade/ddl.sql b/parade/ddl.sql index 381df9e511f..c79a871340c 100644 --- a/parade/ddl.sql +++ b/parade/ddl.sql @@ -98,7 +98,7 @@ BEGIN GET DIAGNOSTICS num_updated := ROW_COUNT; IF channel IS NOT NULL THEN - SELECT pg_notify(channel, NULL); + PERFORM pg_notify(channel, NULL); END IF; UPDATE tasks diff --git a/parade/ddl_test.go b/parade/ddl_test.go index 107fb15fdb5..e8f68e6ff61 100644 --- a/parade/ddl_test.go +++ b/parade/ddl_test.go @@ -11,6 +11,7 @@ import ( "sort" "strconv" "strings" + "sync" "testing" "time" @@ -246,6 +247,7 @@ func (w wrapper) ownTasks(actorID parade.ActorID, maxTasks int, actions []string for i := 0; i < len(tasks); i++ { task := &tasks[i] task.ID = w.stripTask(task.ID) + // TODO(ariels): Strip prefix from Action (so far unused in these tests) } } return tasks, err @@ -673,7 +675,6 @@ func TestDeleteTasks(t *testing.T) { func TestNotification(t *testing.T) { ctx := context.Background() - w := wrapper{t, db} type testCase struct { title string @@ -683,14 +684,16 @@ func TestNotification(t *testing.T) { } cases := []testCase{ - {"task aborted", parade.TaskID("111"), "b0rked!", parade.TaskAborted}, - {"task succeeded", parade.TaskID("222"), "yay!", parade.TaskCompleted}, + {"task aborted", parade.TaskID("111"), "b0rked", parade.TaskAborted}, + {"task succeeded", parade.TaskID("222"), "yay", parade.TaskCompleted}, } for _, c := range cases { t.Run(c.title, func(t *testing.T) { + w := wrapper{t, db} + cleanup := w.insertTasks([]parade.TaskData{ - {ID: c.id, Action: "frob"}, + {ID: c.id, Action: "frob", StatusCode: "pending", Body: stringAddr(""), FinishChannelName: stringAddr(w.prefix("done"))}, }) defer cleanup() @@ -701,29 +704,37 @@ func TestNotification(t *testing.T) { if len(tasks) != 1 { t.Fatalf("expected to own single task but got %+v", tasks) } + task := tasks[0] conn, err := stdlib.AcquireConn(w.db.DB) if err != nil { - w.t.Fatalf("stdlib.AcquireConn: %s", err) + t.Fatalf("stdlib.AcquireConn: %s", err) } defer stdlib.ReleaseConn(w.db.DB, conn) type result struct { - status string - statusCode parade.TaskStatusCodeValue - err error + Status string + StatusCode parade.TaskStatusCodeValue + Err error } ch := make(chan result) + wg := sync.WaitGroup{} go func() { - status, statusCode, err := parade.WaitForTask(ctx, conn, parade.TaskID("111")) + wg.Add(1) + status, statusCode, err := parade.WaitForTask(ctx, conn, w.prefixTask(c.id)) ch <- result{status, statusCode, err} }() + wg.Wait() - if err = w.returnTask(tasks[0].ID, tasks[0].Token, c.status, c.statusCode); err != nil { - t.Fatalf("return task %+v: %s", tasks[0], err) + if err = w.returnTask(task.ID, task.Token, c.status, c.statusCode); err != nil { + t.Fatalf("return task %+v: %s", task, err) } got := <-ch + if got.Err != nil { + t.Fatalf("wait for task %s: %s", c.id, got.Err) + } + expected := result{c.status, c.statusCode, nil} if diffs := deep.Equal(expected, got); diffs != nil { t.Errorf("WaitForTask returned unexpected values: %s", diffs) From b1751a8f6153a5490ef2b8451679945ec7442bb3 Mon Sep 17 00:00:00 2001 From: tzahij Date: Sun, 4 Oct 2020 19:50:25 +0300 Subject: [PATCH 021/158] change to db_read_branch - making max commit fixing more undestandable --- catalog/db_branch_reader.go | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/catalog/db_branch_reader.go b/catalog/db_branch_reader.go index 7720ed096bf..379691444cb 100644 --- a/catalog/db_branch_reader.go +++ b/catalog/db_branch_reader.go @@ -8,21 +8,22 @@ import ( ) type DBBranchReader struct { - tx db.Tx - branchID int64 - buf, initialBuf []*DBReaderEntry - bufSize int - EOF bool - after string - commitID, maxRequestedCommit CommitID + tx db.Tx + branchID int64 + buf, initialBuf []*DBReaderEntry + bufSize int + EOF bool + after string + commitID CommitID + maxCommitFixNeeded bool } func NewDBBranchReader(tx db.Tx, branchID int64, commitID CommitID, bufSize int, after string) *DBBranchReader { - var maxRequestedCommit CommitID + var maxCommitFixNeeded bool if commitID == CommittedID || commitID == UncommittedID { - maxRequestedCommit = MaxCommitID + maxCommitFixNeeded = false } else { - maxRequestedCommit = commitID + maxCommitFixNeeded = true } return &DBBranchReader{ tx: tx, @@ -31,7 +32,7 @@ func NewDBBranchReader(tx db.Tx, branchID int64, commitID CommitID, bufSize int, initialBuf: make([]*DBReaderEntry, 0, bufSize), after: after, commitID: commitID, - maxRequestedCommit: maxRequestedCommit, + maxCommitFixNeeded: maxCommitFixNeeded, } } @@ -57,7 +58,7 @@ func (r *DBBranchReader) Next() (*DBReaderEntry, error) { } nextPk := r.buf[0] // if entry was deleted after the max commit that can be read, it must be set to undeleted - if nextPk.MaxCommit >= r.maxRequestedCommit { + if r.maxCommitFixNeeded && nextPk.MaxCommit >= r.commitID { nextPk.MaxCommit = MaxCommitID } r.buf = r.buf[1:] From c8af210920032f5304af8ffb2f0b1fc691d48aff Mon Sep 17 00:00:00 2001 From: YaelRiv <67264175+YaelRiv@users.noreply.github.com> Date: Sun, 4 Oct 2020 18:00:33 +0100 Subject: [PATCH 022/158] new branching model page Added a new branching model page based on the new use-cases defined. This is in addition to the reccomondation page we currently have and might replace completely. We still need to add code snippets tto the relavnt sections. --- docs/assets/img/branching_1.png | Bin 0 -> 41584 bytes docs/assets/img/branching_2.png | Bin 0 -> 64572 bytes docs/assets/img/branching_3.png | Bin 0 -> 65145 bytes docs/assets/img/branching_4.png | Bin 0 -> 71323 bytes docs/assets/img/branching_5.png | Bin 0 -> 70889 bytes docs/assets/img/branching_6.png | Bin 0 -> 58430 bytes docs/assets/img/branching_7.png | Bin 0 -> 92082 bytes docs/assets/img/branching_8.png | Bin 0 -> 62218 bytes docs/branching/new_branching_models.md | 111 +++++++++++++++++++++++++ 9 files changed, 111 insertions(+) create mode 100644 docs/assets/img/branching_1.png create mode 100644 docs/assets/img/branching_2.png create mode 100644 docs/assets/img/branching_3.png create mode 100644 docs/assets/img/branching_4.png create mode 100644 docs/assets/img/branching_5.png create mode 100644 docs/assets/img/branching_6.png create mode 100644 docs/assets/img/branching_7.png create mode 100644 docs/assets/img/branching_8.png create mode 100644 docs/branching/new_branching_models.md diff --git a/docs/assets/img/branching_1.png b/docs/assets/img/branching_1.png new file mode 100644 index 0000000000000000000000000000000000000000..66c5e225a4c6679bc7c4f1ac5fb8b4037c4f113c GIT binary patch literal 41584 zcmeFY1y@yV+crvf=PkKtX+*ja>Fx$8X^>{oAc%AcN~3glr$~2qcgLax1olL@pXdF) z{r-VH#%7GknyfjmI`cS>>mo!+K@uH>2n7lX3SC<2tqK$roGTO*bQux?@XbKoS|k({ znz5yrn3A-Z7)Z&{{=KD*85ERMNW3PZUYsOxtD9+T84Pwb_9c$^d#D!_MMy9eqH1q& zr2ctD3y06GQ}Aw)GBoptI(C=_Bgh40oGR{c+~pst5~i+~=S7G!GH3s%W&5h}>K=Sl z|M1%Js>KazC;vCQHgo}|$oDF#G^`hWGP1+(V#r`9*$|xHAxe4qzv&8zih@=`xNYs< zFN9LQ`noWo_dxPcYWxz99u5kWh-Hq7AMob0E>xQ=JUbX0s)H@BHdTXK)#O!&gyTP; zj>-uh&5vIT9cmEliOZyAR^BYYK_zx=_d7w4*nafTC3z zg8Jp75XrmDkWFRey+20irQ3=j8YV;?i(>~fJTAo)K8)B_dl*nM_>7scoR@j^B0qyJ zVC6n92K_0s_&i7v8BNg9WpOdf8;eBN@u50noJPEf`fx#y_B*(PcY2~0E!$U8lN^bMjPZ+BexdWY& zo(>!>nI#qWVnF;Q3hMMmWOZ0p@0?e8W5tgVxWff9ZPBJFGP*Dn%1IC6&x3fKA#?S= zzfsolaQ<-*FM25?jZ2pn-%UUe1kH~ND6*T)km z0R$njcV}(0b<6Ln(hrfNkVpe>0~z7)SG!E5^ZyiTbwT5AM2);pxijmkgWWjq5<3;? z!ANwVwL@6=z`7Is5m9V-NvVIbt9U5&!X84>w-T0QefaVS?+bf4&KrjtkDQXQk}IsR zh98DglDO8FO6v7uRED?PKFa142brxk9Rx#(u+@6&U3;DGCs`; zh@sLVJ0d%R%4?-hnx2r|YO94YO1m!7g}QRgsA_F}7%|sB_k{AP`82P0p}p;n-GZ@l z`+EY#U&Wwkbaf9Ax&{4wC6XOJp^bp&XSjeLCK40{h$I}|0fe0OQMzo%xGrt;9nm-V zt&i+V&G8e$$vi6 z8wbr$m3&5Qmk5adK#d?RG8V%`O&CKdV@gl~l`o<}xgWzU#SFo9#Nz#wBEc6;+#{HU zdW{N>GJc89`G#2u(HXB#g4cp0DNy09(`dd0as!rrsK$5B9lI)M!B1)8r5Oz8FXqCa zbF{(Gh*US`)DoORd)SrzhO*zawE8y1zw!fj-KWeGQ$0V$ z&g~W1lZ)R9P0;qgSXZx3$o}B^E3nbk;ru}sctb?-l`TR^B9GdJjs%qA*O(u7k|agV z9uoQ1u#R{<1l*C>LDLb@A=p7+jCcxN{`DO63c36vVmmL+DQ{FfnD2P*Sni5h#N4u26gBhsHEVThMX!DwISGcQHIqr& z#~OqfWH0$NpLs0a;CUl@mk1g9M*39Wi{4q^G$WvW97S+I2ttrWbA6G*f^+V>+d_&2VGmb+VfeA6G&q3xirp&Mb+ z!W~Gch$?LdK8hrZ_^c#|sADm{WZbz0sh7F13D?p7_W5l4na$L*$0YJKT~uZAnZSgg z8kq;^9DO>SKY1SKQ;_(l_~7bb;!kwJrI@Y6Mz}G=%7ml1yQI9YD`-No@JXvF_lSlA z=vF2n9$hz)se(1kNd&3?Gz;DMgv*8t<;SG*X_FJx@7=U5+Px=Q(s7R@BW>C z+8Rz58AWa=c(2^P6R3?$hx;NV4%dyQMFB-#C&8It?A?e50*hH0N2rVa*i&+7K zt=il{Rpsvc-PrRSG!+S%uFLQx)YbX>1Afq7UfD&(mNK zXofb@==FN^hBf6i4T9|<>kzk%E5a;-1A1!0_xNOAh-A*PZB?0a<TZFYDz>YJs#6<3*ljoUfcdvox3`BMiYJ<#&2|gn^HGxs zb(m6WPkwU8EKkJmCiM1~coUx-Zi)Ypsamr6!NY&@mRuF z^Fse&EG7G}-z&jZM?sH8-^WG#Bs7%~T_d5$tITSl;L~K$+iub^(t=cE{&_2JYiqZ}V;zm)Df;~eSM#Hm z6O)-JdBD1LFWIit4&XiGJ=5>MT#Qe8kKXEBRbEX$*gTwlf(?h=M3?#%{wYnuA+vzK zIe&d+=40_7^dO?oi|hVKv8mt!Ok#2a*FX=`)tLKC8c9!GQ|85P$0$LI)CG@y_qo%4 z+x7W5#kc7^7uFr6-HMJ1WPeZhEM0kx z!?&UilA7>E>Y=r`AE?Y^8?v6Uq&MSuX0Ka8Cikb=OY=)mONHx|Kdg5St|fcdZY|O- zu(W3S=0ZxY7n7D#eU~#K%A_rXuEz^^9k;b)NMr(@dG5czJFQaPWcGDyL|Lc13-C_h zkGibx2Ii&?n-1SJ^xw1nHu>e0yDj|i0W`ICe9ZY8E%2HoF-T zwj6n?-cOw$4!Iq;(Jkk;_%3iC!tRPqN^VCDrvDILJYqdK{(8Fk$3GJafjB7+w#|?q zDkB%Rtqy7!6PLrS1zMEjY62SW9(QU16|n=+&toP(YJY(M>au-{4b$;gj!*$z5Y)uD zQ=X-G%=+&4)gM2JF;{JUA7oLXqz0k9zFb{h*&ZA}ARXt;phM~8SREx+q2Qk1Q2;(4f1-iwvCY#bZ0si}MBpzR;BwD^`S)!& z*9_Qy@1e_pXHcT5V$#yUr>cpgnVFrFg}w6^?LRKS7swx^w4I=!2xuNJXlWJd6QKWT zOEoQLEqOUU6MI`0BU5{0GZuH-50Cvo3ApnCx3*@^Mj&@v8#^aHcR|X(Z}0*4k6*J= zg8shZY%NHsC9eb$vv)KDak6l*uu%%3fIuJtN7MIwDsLs8n*)CdQd&4Wf8b+fb#rrL zaeK{T?`Y2YikFv{m5rU1ot+tYgV_me=WOK8Z0AJv)XBg7yft$&akTv4Y-w)?dhFN8 z*xtoikdpFop#S_l&C|@?^1mb5IXy26SRm`;6V_KOY^?w38)z!<_${B3rMsDp)>}(k zKxV)gLa$zNunYWc@c(-B-yxrxsymrEirL!&EuDq_EA{8b|MTJh-SF?2+W#Gsb85ElS*!arzti!+(817v0(fcJ z-fb(*^QeAXV0&ml@ZmP?N^SN9`!5g_G?EAu43-}h-2b?(h!nvOOfA}f&4Bj5Jtl!c z;@3m^zuNq54w6L%9hh2nE8G4ry@LG8esTWq<9&f@S4eOrnf&yBOyhY*z+kZd|6u=B z#Q*2S2oieiO1=c~~C)QBa0worc?z=-0|o8OXBJR6b}z5n5S?N=1eNUgIt3*5uz zx2ZO-L-dgO=aZzopI-(`LmAPrHl|95Jy|>?s;xl1&}Owg=<*k)Fi)v$ z0@LiK11^pwm(5JHuSlPTLn@yOi6S|&NaJx!Bd7h+KoX}lnaAQq?Yv!MyfRGVePRMkLm8>rBibZwM|gna=|kWF^sx0=2q%E;Uk4eJ1SaKO z#Ebi{14>gXsi>&zd9Qn{?zjvlaY_rIiqptsl$Eh<%vQ>7I5c}M9KP+R6#n;pGhC4w z`ebb0fBiSl6b6M)!1MB8to8bAdp;DOk)dAWXYMp!_TFS6KXmlPo_A?m*OKkAr`z&W zv2H48z$4<)Ks4#&C8LHCt?upRVX=OrYtD3y?R-*>r_sMlQVR!swDY$E$G_v!gFwT? znmviH%_Et$%0TeuA^KSX=-f6lBSf558-L_Io_9tupRgm4NN%!=@uOc zFXKml6;Vkd^*=8g;_C&f-UuKv8H|SX^XAUS7}TqMKoNvZQfTlZVx0LHzld66i@l zpybC;=ya^?_M~YM0;~}Rb=6#jYRmB)-qQXAW?8G^YLqyHKR4FnIT2?JqEND|(8&Ba z`%S*R1fCm~E^W|=TDc0TDVjNU*|kx;#DDuWr3EHEa6o7U|1|5*G9sr+8!@rHHSVXz zk{u54sq)EOG}#S04)iJ=%4j4^uaUEK4o*V_a4w!rPDBVtWTv;bSfB6=WMnE@@6!O_vb$z)F-BpJ$Yj^7mx=m z#Rfy3Ew7j2WWi~87QCJMHKm7jJE4hWG0xKB26{V5&kA^|#(Ra91d?(%b6 zBq{$v04mFez*JmCfmh6ORd|HjKf1Iqu! zckkr1k4J5uM z{Vqu+c(y&vs_D?cMFZI>1$=Rn;p4|=87GPW(E``QpYJ(ouTh`e?3d01)3*;5b~H@3%ncaA?>ai`C#6f=#E6B?!m0%tTtVIjlcEUviN5HJJA&2=zGAb|)q6IAV4a_hL zYuYDt;)GnJNdG6-P$L42Oh#-tf6S3XR`Stpwy1 z$e>YbAlS_)X*}eQNM<2dE`vm#7$@}(2wmCG%{1rK9?a7P>iwrHqiW2ZzU|KonSERc zu}28sU)4jw`A~l7rTIDo^P2oyIIG#P=dDYdNn*AGaTVB_+;J!*4gRWJiW^voGw!z6QyBAi z#L=qz@Lk_!QaxKT-S__5buT|R!Hy0h5io>E?)5GN&6Tq*p6=uQT&;b2&u$N@zYJYQ zoq(a&X^?yh&u9XxA(@-^-gK!wj@x-Kx6Z;$57J*Zw4?^olEh33%9Cd(VZ-1UlX;(y zj!{MmK%^QED*9z=*TJe}qLQjA3+jO!>hK>$xNv70TnedC1VXvB9g??q&t$CS&EF~S1=~)V&ny_vZ(p6;1yE@=NOR9 zw8SehVYFpM3Y+U;h7V>d*R3Q)MMd-5GKY~7o+3t`4dAx%i?Au=Pm{%>^Jgq=8cSf( z#!0vZG{`28AM!o~i4UctC_7#MIK{#&7Q!Vc2GTS;FQ3^DjyE%!gr?Ln<&9_H&;GiKcy=MK`&mVb?N_fee+b1A6L>QsPH6ukJ-d^W`2l8MQ; z18Th4D@=7p=DT5}r$601qm>%r{d4lGSUij%hn`OO&&kKT%i2~>=HqZtD412a8k^ZQ zBmo>SUR*BI{%B?})d`2iNJdf7LGY8eQ3nB;RfSCj={a#!!y%v(nTr0@=e$@SlVx>* zBW2zu>Bl>-R?d{jQTI<^*4;8ymrG?VsiT}SIsmfS8#pez1+BfGs#A`;I-Igg&fO$V zXa~=ZMo$N<#jzvh8RRQDz%+YY6u47h)9AUc2Dy$&(}`zQAVVW{P-eJmLY77i9UL5r zil(ZqH6>Rp7wQ}aybhl8uv#E6Pw^cGrssL0!)=5(oegr+9m<8~Isx%F**M@G5XP^O zih$g1;Ctsn{QAAz01S{sjJMM~S(f-;(U;@?6)6K?*twMFJOs~bMSv-7f`m5zUJH}v z9aoklHS@jOH;hvu2m&18eH^X4BV1ZV&KriF`W*9TK;mx-z#eHX<-X@^PMz3B>b79C z>~$s|BzriX8|8Am)ZD(Q1c-Qr?xc&@o(EedfpWm!TR(n+7~2!aJ* zC``_O<6l;c$a0H!YFqK9V7A8g3!V=dk4d{p`~wLb2n!LKIjm$?JKYN+J+W{&S6xYL z-}GwF$MxA~acBT}zkCIG#+YML?|NftTsPx26K=zG;w8yo)C)D3-CmFYfrDCNrE*xP z@~ffGEcH2@wz6_e#6PbnEeI{2XB7w_2@lLG{xfJzB&-*~QWt=%SH z0+1Q}L$RY@dOqA;&8P4>!JrlZDWhI^-XA{pDL8>;fhc0%nD*)Kltj3&MpW+{*1Krb z)R~Kmpn7fRYvgx$*nx+=bZqV??~;YQJ>LBcCikhkJGA#jU67=F*0%xRd#ACK=-~4~ga#Iz)>GlP|F8o=J)h>o)uP)xu(%iQILtvymFB~3-qg?0 zevQJf3>TB;-#EwM-_{jDEI*#V#)-^Fm+Gw2G~0^^jg%GBCd=AvdV|WYQH1*A4bk6$ zJ04eKptA9-_pC&}w3B^lU+o$oL(Kz0#|9_|SQ!~_DWLpis82d@6=!xvGhwYW;`cFl zH=_SL1!Ev;g@^}xKg$|;_k3J&e%`*7v=Sl7HFSYOqH8R8uB4OPecnq5yqoN|B*8C`Q9Ys$-!VUw;K#K z`G+=W|66ImA=#7LHhSX@FX!#A)FCq_QPneM;rgoq(Jw>dx|>re=!iKUt_b|Ak&vPWBtCLDP&zk(Ln+4UGe5hR4O8 zhL)B^1QBNi1}UFb6q#U=bR4Y%sVprp>(@v&^EHF-hf*w2(hlyu0r-14sc9IbUM3Mi zG}Y|sW}eOOeN?@cYM@(fWrj|~fz;oR@FWWnVEj6ae}gP#1`qFeu3~L(Tq|<(kkp?8DBv-7&|I791?l}PunRTB z3&Q%?_L%yT?OPn+Qu9@FyLzqb?ll^uUVT3d*sIAjEj*lQVm`kaU)~yV-MJeg3$)2 zY@HKcH{mMs9?zZhZ>;fOc+8}?-g2maQ85S}t}Bw%8qi1aHa$|XYu zk^DZbV_uxwe&~fp#fJ!6gSxqzlJMgN9qC-dRv(47$w0*(Xe>X!sJ5|e*~9wnw8{`% zx|>UQ8Z~Bs3Ark=hxgjQ~A+e=E^yq@QRA$%Jp`7?X;n<;1R9`NYf;$fkHI5McrPD|7g9hug-Rmb{sVP z;v@*cySzvyo?)ud&7Q8!vB>KYOBmz+{&O~bIBT9>z1jb@dqAsM`7rt$E{@}y^jji% zRFf@v)9pxkU9t4r=pNjg1N)nngM33AHybC*o(j1HAhz~aU}QG@Ri#or9Kbu0 zneVQ-TPas+3l=W6*JIXw}$pTJG4ZLhY)PTQzsfMgQ8q;I&Ok^}>`-OPi}5 zCPsq}5S}mv&y@-aMX#}XkYPfVtAU`EXPfW&0kv?kBn0=jfK?91!ARABiy?DWap8~d z<%ZpyWSiEzOIa~n9{8t$!T@yEAmv2%WD+_swF_RAgWq^xzrQQw%rirev^AL|o(xH3 zP37lgwjQK&vc2tM_RqCls4KraOMgK7o9D~I+Tt}0ECC`6xeGhVQvG3P>IKDC1O7sb zgY}NngZF@CB9&xh6)?N{;1iU;;?-6F@^L?Z^xhM?utIJEP~2r7V`+y#NB|WfX{+K~ zu3<;fgd%^n%}v2oOJ#BAPsJ18(=bDx{_owY1Yl6KJzN@i>xJijaV<=;ezgKR!s6b+ztAzjBGth;fM{n7y$l zKs*wH^iUmE^t1d2^OzDoTGAe?)e}WzNy%n8M%@UM0VZkk4xA{MR%EXgX%8o$6k(e| z3iL&N0Y>dW8!}r&R?CgT$ zwfJ)0gTKN4lYiF9A1vt7+>KX+aGt^w|0DGwDXc8}*B+#(puu}RCUi;kH4@{Mn4vZN z>*HTrg(hq8?IDw@{S<}(4$)9k+nASxfrA9K0U*VuJrKENOM|h%Oy0-ak5y0?XUgaF zqwTjEjtR>#`pX|9`~=Vg`Cu;BC~n1cAX{XSi{{e7}}pQDj)EI+(ss=hyU ze%$}#i~6yzxekut7{+t)MHM&ucdNjk=`r!pkcx9YEm{!n5r8CfT5C+i(JNKJ(3&^K zGrg=?dl+n-j)@aPIrQ$Hg69-_yTIj*jT_ctf2$iEx-_UiK3_kn-UvVz&dYU0zMDk>ee zO;ts~$syr~KNT3}!f=kEZ2+LORTRFfDwVIltdFAIb3K?DO$^AX&d#B2-_5>wBlUb8tzSgxbEB?|d1MJ`hs2%agEg_&oh|QyB;%^zpt&Xi^n-t4a}k9s|I;e@PPL4ZxX!`VqB_Olj56*p zD^8&Tm9=T)QD=@ZZGigB?VC%yJY!7+ho6w>ia>zfUMXMpV+JjpcnTJC+=YwI>Gk~{V&wZaPuaY4^ z0BTIF7O#3v%h(rWA4Z!l_DJi$PFl8lRwSpr+O*YfSMcT+NI|Jx~Ss+Xtw83Il0b3dZxqNC7((i8pqce&Uy2*RJGO9W+A(^ zeXg@3h`EPo71L~%eQt{n0g`3NZOSN^^u3_R`AfwZ65Nk|2|Q_n^R4H#7?v|8hJ~{~ zM8h+kOf-A{sB8R*@lD)b{S~bH>l3;<-hW6az58n5kv~q?Li*Ps{X`^idSs1qy>BmvM0N`b zODgz}>Zqvrb136w-rp{H>@)KpR*lOPo^2)DCnfys1Vtl3gWse$biEj&Xb53pV3653 z$=aTj5e_3@mZ@8ap^}^fs>hTQhY*s-x}{FoAmjc;S%H?ZInZVC$u_6nwUIIVwq>sKNG=K>{?_rjR{lH9tc{ zGM-~h*YhJ2-(JjtyJ@Q4$-Hqbl(9*=DOh9-dFS@vdoURaV=X{nL{9fSvNV7>V9b9l ziU4@EX6oJHD@e!&Wqg`QuQC(H!F^rpI zG;OsiCUNv`U@^3qE9is}6PuTo zajtLOSbYFUE?uBN^WJ^7ye$CViq6oV73h^42Q>r4gKHmsZXh~)zd$q^*6$DzE);%% z)2sl9q0vX=JKy5n*y}0rM)vAxQFW~JCgrilu5mronfHCTOQq}KGtaX0NYG%o8TP$j zKTtVQ4iVA1M%MGbn0(xqgd-^H1_$fJ6RE>qZ5pp_(OBC~dIEvfRZQx`!AN-~YbqQP zb+QPPgXDIBMsX4;E{=O73cOyzj4F0m4UxepvTgCZ(!Nx7y%<*-kD?r-yWfANTYe4k z?K6#&KT*Poicm&~Ms#PTDpubv;40*2u2(){9srZ6^ae;J49`x&Q9OS&q53b=J@2US-B z?ABDR{c;%x7buzmMrTF>we9U|=BDqv+oWQcFZQO2iJ6Ia@u}I|j`YUyToRAO3pEDQ zgLU!WYVHPaHk4_9;TZAlX^mYyx(m7K;I@q~*K^&Kug&Y7D57 zY*AYB@oP3Hi`H>p6$@Md&KZ<2mtrUTm4$PVe%hH%FsKyRwj1~nyC~FoJn|(DRH)xr zL%(Aqki)K)p!d5vDM~I40U#bD&dexE17R_E+O*fy%HOP2udNve*Y;uic2bS9=NBXo z2$tqVgqSqQG5#a}jqT^wVDh#*XZ{1;SK}zb%g@^I)uP@n0l07{bH-xWvLJN5fqT0a z#!BK7Oy-gII$F_`GauM%<(u9HNRAY!F97QG%4eg%{KD(xTI@6aLp-c(UoPC`UTQqb zGJd3@w4fA5f3LxQv7y>2)hei7iy*2j^Ig}`3SWgnd7r|Tkar|Nxyr=W(&5vt0JkaC5o}YDXmYN8B1?FY8 z@Pn&0PZtA@;(oCM*^e4q>36XEFTowM+xy$&;oixJxWLMVP62>Aa$N#%^L8^{nnYi@ z*Q0K?eZlDg8%eH3BCzA6dQs*G6Spu_E;<>1iuEU2 z(U7BSHPebp{7if}ez<4dV&4NCzY^I0pSNq+rCru@NB5f~gt#j4>Lh7$Q`tf39AC%Y z8LWN25#bJwp;eZ3QEA(?fJrq*?$H&I?UA*KS;?-Whx`-5u~;SZf#-6@gvu?*)>zsI?$gfUA)Hu( zWbK}13ysI+0VH<3cE=+n>3Or8OG@!d>N8^{6gv+bwVDJfnqH&6YN4;%4cwaCZfyMV zr?iEKf9^L79CDlojj*KX{NHwx*4hP=x_+ZNk=n>NXl_V8@SK!M9~Qc`(ekLifuG7U z&n>I+n$bF&^_u@}`}?DyAZYv!AHm-lOX}^6vbXawQxtzhX4wW1J*CO|oxcW_;fe?( zlx2!mB4hv_<`VfOboRR#qB&Rry-V*H6YpDGlQmXQ*unQb zq*`Q@?8wLvGD~3pcpW-xhMFDOy)TZ%Nb)W#_ao$9B{hmPF^7U;J}ivV5ix|h8# z&K)JXmZCWmQyYVD1-T!M$ZLA%tlz^paA7(c^$qR|XjPmpt}V-2Ax(rG%wpLWIaKV( zWxsSFu;>otry_vYQb=L*LJE~pFlO7*wrW9yHq=hq5+im z(g@%hdJ&6ge6op!J+cK#3r&UG;`d+SD9{4QZ>06!5dJ8*rRMUNQc?nafkiV+HvdD|0HCCNQ@-~4P9E6dCMk9 zeVKF=;NO(+I=!6Br3udD?e4mc+ z3pj&M3p2RE(Yl7Q$&y~)>GSVO@UKxj|p;dTD9+|n! z(Q#FC<)zrj>Rw&E(G>T|#))NX1G;X0=Atvf9KN0Qr$g3}*(3zf;;OK`Vw^l}d=85Fs zt4%7uGF2hF#1CqK)!DJ!6{bSchwTibe93}EX5z4pky36S;@Wl7tL zU{pYs^gq#-8AQ7wl*91grXG$#8C8=2xqx7V4ZDSJ&Pe=_o#<${`|E9f*y9Y3{0Cs4 zUK}`#^U8^G&{N(royQtQDY}0iffvmCDFMP40JpX|7CI?lNs-X&?>W$JE|X2?5f&h3>q5}=)M8C>am@C1v3 zCgE_fUDXJ|p8HG6>E@(iG{FGX=g6ZWL65|bjKohQXZ7cmoc$gw_+-1rj zQsiV5FJ?^g`q2Um1N$!-PFZ~N(^35KD&r3vgK{t>dfsM`5hAvCB7Fte)CW>%cy!ai z=0nTaE*la*OMGZ^7mh1Y+Qg6*m#lqJZfG?vucW;}QH4AT$G74NpxX7yiV9^o9 zDKlP-nS7Y`;-Ku*T*1|!k0y&w76ohw{%W{VYBS1SlHd+TIsY!1a)JJHCABM;B`*nBLcO`b9KEsNiO6IjZq*G#0c0S6a*@KlpYPuYMK8rNA0T zxH&iF97%e-vigZRsHdc-f;^Qp3bOZ+7n>AxRZ8sJFM-;h0*I$22K{XHut|6GoR?AW z;VlOI&By?@fG3Ew6MNJd4c&nAAGgdP$$b@X%eu3A{93A)E5!op99mO|8#yC>`uzl+ zh!1dr_PAaOm#>45t&!L&4x_5|G!fhAneb3C@({DV*VjIuA#kg%V3- zy^UnqM!kNAv9|EM=Y6=pYL(UW2nAj)+v?g3VbU9Kp+4$?mGq6-!grkzhQQn$R1T+* zn}pCoY-BnnmMY}#2=}`iiq@+Qh3jw}@e!rS%##>${8sYI0B5?R$5v=J+Mg;;osmC^ zQw>brIDb==uO+^CSsjpo9_#dji6Pr`iGlInU4dZoTfrNU0}K(|uBZO^&t{F>^&^ea zvyw@H=H2@HmW|*^jX$eQhOakgOTK;j2TnyEq--Qf!FUd(ch&0gne6ah_cfh!D_PY@ zVEenBG_CQYk8<4&uQ}GhZMh@(D8v-wYiz&)KU<~pp{t?nuLgVWU6SvW_?;CNhY@)Z zUbc2+q0Fyx4tk8-)sgX7=LAkucL)HdQd&Hn!NYVn*9j)=Yn@@uoo7N~0M*(-e*h4I zzYsqa6)$6Vn0*%9rs{%5yKo7)X*z3n@mp&fZP;trWT3NULrs&rdy`Wg3%Q#lLyWCZ5O(f#kKsd-Hi`_qV`1~dlOH^_ z1D!+$!?4lGT)Tjy9eB2)c4ZD(iX_SX%^r-SXYX?uM<)l=;9j#Fc1q0);*Qkp{U4>S zsv!)H@Wg_0lW5qBg%Uc^Wtq{?DE1k3MtmAeZ$ig+mYig%aZKFQ_2NUyaHY8#Yq4F| z@LV_aSI{GqrD_>Zin*bRue6Gpvab6>Lw>OON!9Y9zoYT$;qTzjJ4?X676N?Jt^TZs z&!e`wqbe}DhjqYWiGIn1YPr!YTWn^uo(AWoP_Ulpg;~*0->lyCH=(U?5mHV;q<49F zMOt=QN98v$(MTH?#$%XCjiGTqMbsEw?8k)QruIl53u=b=Zs6wXHw59X_;Xn%xytmu zxPKNWeg%_Lu8}LS)qFZf&*%%j+)tXsB?xhhfsM$3;PvPj*fhMSK8NotL*a|8`T&#U zLugxn))0JX8HC7zDh$G`neq`@<{Ch|GLC*nYRmAR4D>xDUQXmn>PT%Cq9(r9)Y8p~ zbsma`f<{u0V)RVW_BkaL3BA9Yps6|P4>GbH`BeZEqympe0kT3xDBB~a<7jn?R5~S_ z?r-)}_ZW_{1ninrGo6=9JS~Idqv>gLh_A+_fA6NhSKHy2dOY@(7q(`><#%*J1G9Te z%TACJIbElS>3iXw)6jD}$B+8f1X}QXLJnim&Du;?uqlFsd}p=xEq!8_qN7N(Wy7}| zmHJ8v4srRbdL41O%WmQOQ8LeiSKH0^`DomcnaO#-llth!VY_=?pblI) zu~}5)w1;ZQo$c{R7s+n7$;7y|c3i+_pLMd!V}2>jVOngD+$$!3p??fX80DZk4*gKR zcX%|u-Ek-CG)`$q<5$lZ2EGF47F&w=0;m1l-4I*cd?z-`sfV-GzUjGbwikLx4!L3Q zrSr$D!rHkBxhV;a8RL$~4+ni$zX&KAy7$y-{&i-L5|CqJq4R0_Je9K64Gm_Omd#=W zv|YO}-#Af!gAdz!6Jrk;k7)p8nQBI9<`=)l>9)8vyXW_+SUG6yURsk@_ zQ79fe^_RNuI89eeF`Hm=OQP>geW(6x!C&vwJEk;gT{1{6<3Dt?x zeit#M4&}!k{z6+Du7~Y9K~_)8&}OIf&aAe1NwYS}7@2?R&!#tPDdt6b3yFz5O{{LN zos1cgmo?TAXK5KOBTuonP^lHoP1sIcJIE`+(N{G>87Lw~0583)iDINiOqKnS z@Kz+`^Jxu59?6(1cC@DH1}@`a{$x!#Cr^(2l9A5o4ad9=30b$y5}&9d`xyoXx&k9K z`rP>lmBIKsY2(PW1j#VuZRUO8G{_k^3pg-fBtOj8lg!MzR7%D@%{6F%J*Baz*l@*W z#6ROFf`a58lnls#_Dhx&s{d$u89+)S%#U zO!Ld>V?fXl*b3zhL8#Rf@siKJ)QPuhoF&^P-;(bOyDAj=J;cSThdZU=fDYk9>hAz{ z1>QcQm8)3D6f%w!9-2007i--L%yMX>=;Y)t_D+FGSZ%kP1n%@EA2N>}itQzxtK}rf z2InCA$gi7f>uDX}9Fq-zIF=TNJngkGt2ryedtmVjG3cX%(DfQw8raRcLs24E7NGBlacHLK$!n zDJi+^&*=*~3MD4$e`HM0lbQqX_ncA~BOO0W?F_A5doRuz~(yQaW|?umZ#;qN1Uke9vQ z%lFCRFL>=G zhTO&)86+V@@02Bt>#e6>t|vS9fPW4h!_fEXrgD}GD5cG}bFi2K?|^?I2$PGn=7$)S zT!A!alv870x!@Re%W^j^ZD*Hdil{lN$6;lxWExxvp!1j%zo9vP0Uni#9#tdoKD7w3 zO+?5IJ^qn>$g!jHfgVg#2P<`){aqCD_%8?@V<;}2zQbV6|F0#wn90d*I z3ywy0GmHs=9i(_plIAk~%N8&<^Vg~^nqp}VcJzF2=a*$#^EgDNKyvL7TJ%f3z#+XY zub);rQEM+OlXL-ho|Fx(MYw4opdOPBbkZlr_cwSGzE{F(GARokM%8Qqq!BpY5Mdor z`BxTr^@i6MaH4M_d^(&X@bU0?gx|+N$5tz*@iZvM)6>QXVC$8V$-~D%WrLg=BFP`< zK|g%F&P_ndqO?Y$M?csBmaWUoDvHr(=k@xQn`^@9F#quk^k%e6ai{T-||nAmf{;q#vjQ>)nYl51sfN*htgqJ zTKfGE<5)`>|E5Z6w7vXDIMU+xIk~xZXk_|MqYKf^n-C!`=s{_>(x9G{2*u^)0`4Nx z#FWZ;;#S@F)+q;Cyh+Lx=q9U!T9_~1%q4AmVv@XIwmM24#jwk{UbS@%i z`uCiX(j}Bfoy;Jox|+6C`1S{(qpQWaKKB1ldtVurRo8VZhzerRs0f0TfM9`isDw1q zjS|uz-Km0rw16~{(hbt6NGe^@O1IJ>dgi`w^%c(ho!{rj_x$j>9^LoeE9RPOt~tjT z>#M`nFT7rxc$cm~N={C(AJ>N-3ZFJoDaT?R$7b1vnT|0VeEmeY#&`WqUbF~o!<(!h z#71wGi>yVFG~K&u>(F;e;!e=9oyF2o3Y%>!bCp}mY=>*f&4rijp2q4rJfdC@BxsFW ztb|~pQQ>DEbuCZ^vO_UR$fj3OYpVQFl^Nec)L;nNoj8J^>-k&2rO_ zu{7JV7NR*0$-U0N*e8*cp|4k?M}N>=<|yJm6Pj=_bc|Yfg)dZ@dbR#h$k>eUoz_-& zxAHPYm+@j{NGY`kz8aWtq@Hc^=1A=%k`rPnU%LKsscZtDF1X24 zVb?CZx0OM8gp0>MJ5f|c$WRA2sU7p-S;1P1s`e&gVj#cxaRHfHj8s!M2`hBa2lQHRyIXOEml5u#8bG_46acMX@+ZNey)sT zFAjYBm}a8l=u&TxyVXhX46|I0o!Ji0R$^d6UcHMJS)N=^=;vG-`&3Z_VXD*%Tf2d9JFN+)9wANQ zbN#}O%`=Cy-bO!(>czihN8R$>OextV^3uW^)2>59>+Tj^YmE19$DLGNQj?D81UE3W zb6v@qkoQ6NkG{KI#bb`1f=gW`Fn0o)eW8{56Cb|%K3TFL@On{`6;^01dHOs@d(%Ae zF&Z%guIMLJdoK;P>!Qudo4#KT7I{{8Ohpfyn=6-3F6#Z|2ZG;qCaMxIw8=-OCN|w@ zC7!9A?jn4!o?ZExAl>gxc^qpwOHa6D{l@ne!gGO<9TW(ZMU!9GFZEPhF5_k@-Q zTNH_El9uhalxd@P1;3u8>X=}YZmv75u{1o zv8xre>891`l3^o-K#A6-#jxd%dCV>EmLE)nF$J1J(0nmS;z5Id%B5w;5AqUT4DR{W z5xOG^xz-nEbD~nLgmR`PHXh)P@7O?Wc2k*Ck+r>@WT-+j=)EZrCS6<2iT9%q#1bbX4&>Wg(oQhA)iJ94~4-jq=T zT8b*$d0Nt{Yj-mFy1UK|cZU=AvyhN(o>NdyUWuAZ=A2kL)Ww#&Ya=x?|WQ*qm_d?wGaPYfcQ(LC*(OEF&AZ2-bp!72@~2jt5Gxn z;AfH=&-77R=N$Q2-_j1g&2dwcm(|_9Qa`9pB$)XUFI<47ihZf~6&ZI!>_AY4l@OCs zTtTphqVeKzsyK14FUWhbjg~g)hrp!V7zKmS81j6l(v6 z3+KNi2%3F5+Ws*>x$70aEhhey-o;8itS^TSk+ULN0PmzUZJm~Go_j!Vw;LXgq=H## zuUxW~FFsnF1+Y5husH=eji+KvZqvPsC0`fMmYA-m=5ft1OvV5DF^#I7U&dUyAYB3=pH6^b(#FiH#GVl8D`Df8I4wEU%`o8W|u6uC#c|x-RZ#bXPg#?qU>Q zL%tv*yVgXeHdd^|RHW3(Gkogvfo|p$F8WLyY~c@ARYK2G1 z?arpfb6MSg9DXH$C$*sTdw==@#n=zLtw?vXC8D9LjZhDJcl>n579-CsJ^kAQ0Pq}y z5|^{W>}X95;dEW;V=U|mJd9y>EQeG>#_&|#$qdt%_$$4UW1GDlM3)^n?;|BoU0m*r z3Pszh`J4)|@t2H`@_7yu-@K)R+gFqk_9VgnD}#{-34jY`l?kNdTz3pb+&kW}FU{4P zmEFo+sSiGrv{6o%dx3f6iw2K~-)m%)3V9>B-XhGvmkj-)tAFSw9}xckCC)PkE9*&Y1md73XkCHZzWK^wHE6)n|6J4SA10y9*q_PQKlL&!&jAOtC&>T0EH{ zAcf?8a1g@-T8dB^>mCKAr+PhKF8~Vogt%WZ)jiR-O~sF%Ra#e0vBV|q;(svnFOtXQ z=8E?e$c&9*Au+_}4i>zwPxgS`YR5CGjuHj%2g>XIb2ot zqQvpN$4pxFm&KL&*^276BYx7h7MBMqw%1Vx8Djxan_QYqQY}P84AOxTr>}k`ww8T+ zhv_2In?jPW9cx#6MBhfY+<3}Jyq@%N0cbO!8hc@-#bMkDQ!m2xjyv}z+vt;!m6UV@}O_Icigw09Eu|D%uZXA&kr+4;`P)bayNU_EO3a% z@E;5C;VTyz1mD=Zdc8!D#bv1{K9hfY1km!c^8LZn=ns`O z&t=zEE{28K>6|WGbWb55({KmE6bv)@BgxJUWq<402D*{trPRpWLj;sS!y&5C?tXo* znq;DR+I6w+NXdhp!0L@AW91R9EZgxrcMzPbM;0R6Ks+o+i-zgOVgaE=Uv=t6l%CU& z|9a@1%=<2jT1Uhed&XEU-Pd6yNxnlFfo(Ee-lJl~aku}|PMgb%H(YZyek|pUj%zc) z;u@lA2;6dl7@A%=XLb&a&;CyEY-3`CSF&ww-DZXrZ{{fnzCXfPzz;vR87&O|qz(b(ueumj_A@j?X zsT9Cp&6p3*c~>END!l*{K>68REQIms1jWe1(ebA7PHfsugoK9&6BRrFIFkB@cLsbb zphm=1C#IrM;4(N^W>xEoV414jH`uY6pQ2bDbZ~TsV$$qk=y2)s@n7pDlc!dUia)mqV)kJG|jvB7EmlNj|Bu1 z!0RF;iJIvI0u+61>a%gFu@S<5U{tX4LnsJ#W`hq9Q1U0gUP0bPpfqYjP9p#zWH#z! zs+E}J1HkwyqbHDH)b^;=RC>zUlBUw1lar--^Q_Swb(%@%gR}6BYo z*NTsjct5#dyL5ub4=V$KF@aH?eG@MTklvWufnwuDK(=155-#*q6jH2kk;^O-+0p4; z7%CsW`Gjv1q0TIll?Y}g?g2c;uKIoA!~1BylrAl-jI4#5jKH8n{0HS#vmp+$YubXd z>0Gu827r32ssa`l;E@8uQjb-o%T{Id;P`MC53=QFJWY>J&3Df)9L;JNf-ty=9#e-eD$4LqyzS~{mCUSP zveD6F?yF{Q=^G6HV0?#ytHng5srRMcd5ADRY{*1+%0R?J>=!mH0Z0}M|D zLTw0vMu=C4Cz`{igpKUisR{3bdy_;?SIi{(iG@zL#?C+E&R6`%`02c3(xFVmJw`E; zDc>@vRni=+w9W>q7u{OvKuNaG&le+Vu7LN7T}&eyXugI6O@Udw_j7MphmO8O3*Y1mxhzn=K>rjl>!%RWj<-@NqG-S2 zikW}ZL_f|UH5PCs)$iCe#Jg^QV9V4XDf26_9>#!Ufjo|^?St84X*E%=7Tb5pX$MUj zrn1poxge3&@C$u|t&7}=#Pi~#kg{0-o-8X4H^R>2!PIhD51ZZn3PeKD*D?;vobj9S zeDf6}^Q&6JMIjpP)2{Q){;9Q%-&FxQy)Y39gozF`K6wJ!NrsknRMD=-g1{ayOAF~~ zalTQzh~~yo@mptMKVAfpfjl569$l1jQZ0P=>HIB`!xct7&C^st8)8EiIXN%1td%H?bdoL{^A)#~r&JpzFAmo|O_mPDt&b5FSp*F&l1Q#bA5 zdRXj+$Id@;w^I3n6Qkf{T9HYm)QneN`n|nD^HpTZOIafF92ZnpQ zd^AhXDR>7`C8Hl^DrSqVeXIR}Xehlu#r+Y`)d@ifU2lPRPw+AjJV}Bjc9u$EJ(0J_ zHG7&hvweQX#Ml>cjXIQ*j9CI4-s|hcQX!JFp&7ui+~)aj5N_^$mUX<}9pcR>?v}M#chA zTTulG8|_$6;8_cJ;RhXSdw%vtOxq8TB8pXV`MLf%kQJwNC(G&AK#}1%b{Q}lfpAeaZpxnP4v9D&;ijidyk$-K~ihCZ-Za%`+kL$2b^z_O3lXD?dG+}8h z=MMvK*xOyPE3}q6U*vX<;1FnrBu`Jo96`;ar6+i8DQjwf^p+)v&GU0>9MPwswR~1= zJ~{&O3oqKQ>HaH52I(Bih2*HKlW3NkFupy2$0goM5*D|{at{(KsRz$InE0*dfHq1o z6&@-JE9x#`+0~8y$6QSP{2~n&HqMSpV)9H?Jc%l8y>Fh_GMCeCR7IDZ!7aWf$on*5o z%)%j$=Tt~^h*026n_n>8o0kXa7qtpo)9$?y^3q}3=wh?sf#^D?|1=Q;Tb>KNg4i-9 zBl@*Xba|lsCDI*vO~)&D29TR=2pv_VgIRd7$zs5zI z<;&Wg^?AvzTp$-Ks4Bufn+;fPTbPgmh+MuT(-YXG<>j?Q+2mE{ianUZf5uA!iB1@J zZ}FnsNWwcTK+Y~c;{uLHF<=DyfzvSgj1y4_QT}W-^)VlWXs9krl7TWzUQ>$*1;3_o zZ&u_mK*;~NWn@f{t%DjN+X=GE;Gn+s`jQ7RlOjh~E^Cii94AoMeN`vmOltya8r6FM zg)8Vt5*eJ1ccUng)*G?zl6+uj^0$e|+X#`j-M%7o4E;7dj}-BU0pNC@zm*1b-D@qX zdG|dutUj?-^LU@Pe0CD@X$TY9H*LI_I)LLHOU(~UbAn}*^Y|_L5rmHnU?SOTEL3}6 zMAU*5Mi%hlEP&61DIm&l(0EC$&G9Q-p*0{OpD=^zdquI`= zMGI7`<6u@2vp91Ys0n7E&2!$iM3i=%rwxF4&tPDgyguL8R@59Wu^V<8o0!*ZxNL{|VfS5?-Z7}sBgeLi?`G2&FRzt88GucJGnuiXgevd#i9bh0o z^VV*oBZo=kn7Ih#tMypWpxMY*Gb{TLk@Ex*je>;x(t;DH3}t~-lE?Pl-&!i@KI_5S zN|)G)AwzF3x5p4JIQxkGd%K(h?P-I#UWE4#njahF;%K_yZ!@{>0S=6A4bukgz+T|D z7fkv_6e6TJt0LG*fs;%SPEz(ANT(ce+`wlRnP4gTe;7ovZ!KWG3=j>LTm<#iAHe_9 zUaOq_0V1*nk<}n))-fiVx=HcF3>-s|_`r|;<5EiC!+dXsCl7A*P2BTi8 z*Ao78@d_O3c9&~LLEDKWy!E&E1bST;M8Xu@+@!EUkL(2xU_sY{?9@H29nR?(9sL*! zFN7TQx96~EZor~3S>l~TQ{(wC4<9>K1hPd>Q%IrQmTv<#N__AmvG9?SP0DrDM?7RP zL4K+j*mB`!73Pi`3x=TVlT*>egm#4Jc3C2((P~Y55^Wj~U-nM5#a}p@2(Onii3Ry5 zuarXcvi-fD)RFuEZq;2fly9i554>Ow>Qw zkfTB5RQL!8oX*`enGvV|EfR+xnJQbh=3P2>v%uO+xFZ)*Rf zLVk~wB_ha_vqrOsEU%yjEVHuhhtb!pQUeiQbQX{(4mRgstqKnw+HdQ9T7w#P!;{7? zzfS-Me;|&uKb_qW+0Z+HJ z7@kHohwADLqRZP;GVK$0=Q~sU)|W^5uSf0^be*69Gm2SIJco!7Qf}fHe8_}&aO)gn zej50ogKzgk-kz^yh$viooTtC4-+I2{ix-)5_QFtEp@zi=;)6fPM1D}LZ-|l>IZt<0 zyAGR--7F^eDYAD0A%6F-&JAk91ae?OH}%A&Xdu>P`J(p6_Uht*aqfEY63F=FdmKKt zbKnOo;69orM^a2tKM1OQe45YTg4>V$Ts;COh!iIdj=uyl{!S4k$Qicr8m1rVl@g4@ zN4HcFiN=1LK8@*vKS21#eNHboP(Pr;1b){`qR{rG;+9;Cq@R?k0*MXh+zfTL(;TpXg?RyLBe zhTO;~ouIi2`E9i#gL}%KjvqgM-aJ;d*y!m2$L@@rHV35G z4WrK=1&QiHW8%x%UQbb3r@srx6=uzqE*Wiyw0LJ&4T3%aU)}DZ4=Xw=9ofkfPg04_6Npj% z**BIy{?~m2ieZP|wYFWhM;8$c>8{$$$s@FJXMNt^8hHQLC!WCg<4nILff*fiD;4*nKQ>{etDChF%5bbogZ{n`J%YaD`l*ss%v4*mLN zS}VYH=nz{2Qa@cBKCP~>zPI(A-N)BW<{ov|@fQB=)3K2^)AfYd225JQv(@u`!%VgB zX=UY!KBZObFM1jtnAjO!tUfy)VOFl2a+XCUJw4?qEnRAIf~V)R?Nyfno6V87BVQa( z4>`5XuiE2nR`Pt0+j$jHdfTrGZT zsY9rL`u&G9u3%9A_mg*skpHix@BqGWQ)A;*#tX1rgL-SzGeUuXp7$s}a(AKI^!-`n z`k>bh)Zbu`^p{zsC8wp46`wlC zdKXb}sKfdDC_Eythe=~QxA_H>?BIV69nt*Q5<0oPE$q*AL_AIV9`ZrdSK*HY3wZS0 z^oJhQn_Z8;2oD!iRgJPJWeLynqm$eB=y&8%^Lldhqu3v#*`>o-G>nGQo8s`Ph0j?0 z(_U=KN0t}_-1O{Uk0W35kR!Tubsc<7#m1&UKtS-E<8Pah)*HM$V!1S;V`7+@ngLRHU zMnl8Yzc)Q2W93aDY&h3lcB})>97B!7??2k@p~L>{Gzp=kY@r{Lr8+oCZ_W=hksA zYgQ^l7;diqA|~dtsHmtgNP_;;99B)FyPw`1Ms*hhvpmc}MJ2K!?Ao*@u&Z7I@S+T%KY&fa=VL&J#XPPwzWl6R`QI0?9bQH1Jvt*wr^b#&~9+! zUJ^v!@r?>mX<`6nRt`d9Vp^!rk?cv``seK)wKzfjI5D(rRPgcuQspws6dovdc|8v3 z?!f0J`ghQ=v%qL-9c9U2q{a+9okRSgv!!Jci>y+nPg;S zD9g&qgm-O7FAtye+y5l=j?iAt2x;$mhM>qu1|cCK;X#`sDG7<^2NrS~i-7iY_sMh& z=-Q)7fd>y>Av~i;cg4g)CMI+bcoa-(0d4xL0o};gI8-T_n8bmMY;xvf45ys7j*kES z6@mR6=^TeFF48$=4=BKiNl68C{H&`J1oODI-})#Y!CJ=D7nKB)ai=~~P*|u3%vs?< zw$EKq9y!g2Q)dv9`7?W%m*K^=3G&{^i-iP8#e0Q9tHf{L_DoJqb@b-kXZ~d1 zANt+J!jJ|NVp%t2f;Q- zw!OZ8lxz85C^kOJ-ar}>jtM!)V8|q%G41+qE`9^dIKM4C3e^n4KOTPZ97Ht|aXg>* z?;aRV;0^lc#TQ{X1e9?mCMQK*U8_6jVm(vj@-O}42PoT%V5XybqtlS(LB%Aas>*0+ zXjs2HuiaE`a2>t3{hpI3#AkFxuL{BxQPLcNXq+BV%^1-L<)s#We)T}#T#>$!tzsb$ zPaJ7(Y0(3TF=1{HlcR+bE!+O56ATge?teoXS-H1W@BjRuLANhj0O(?f!S?jLZz zT`(nv=k((c!+(+`ASCpZO*O5gq&Mj#$Qg&k#xCNI|FI#IHAN{REq7dHMKuNs#Gxs1 zNm*D&oFLzR9ZEn43_1$g3%XQRY0!^nWM*b}a49G#P=GBt*+ufXl#4t+4FC4`8wQTh zSxNI&LS%A+@Y{pAxU?H8l9Q9~YHKIjz0}$FtTyuOiPmxWy@w3`ALsHv`E`$n1eEz* z65n`8U>2}#F@}$b+r3|5X=-W;dGTW6z=Dao1r5KEuzDA1I1!dL(`rG%^*|+mAD>`Q z#c-(O_)nAJ<5Pt2X6Fr_v&frO|2p+ejli$4QA1Ak#Bp=2R@9TfHFF1OH>=!vd$4rq4dPsQb(xsrGA@%X? za@_DGFQI)~%NUFxp|i_7GGy_5y7=3**BAa**Zv8%t+ef3cXnhFqBP)&51EG_Up}Z5 zk(CXhp{GCSCeI^{PP$-^i*9s5L4hu4hShWb58nM0G9DeLD~}-KVS))b->{gN^W)n- zK0YBKA=(EP&^K5>IC0fulBho7H@3Eh!;$DrTO8Q7_43H@fvF#ZsVC%dc^i!klq3tH zS{Dhzw%>=KINAg%eg_>a3(}aL3za94#w25+!pi@_$9qbE$p+bA=RStD%o^s7iYuKW zo;<-4m6TjRW%V5X`tS7Ix&#{nHuqDd+sF{!7O<;?in9Or*HB&-16`T}F(5W#fN%Tg zEAinK_bX~aGDZ)|SoK!I&(BbSlRFSrV5PyfFt$KCavTgiPl1x1T?r1l&-Ha5J`@AB zh=VVeLSFv)N+l9u-j5cru*lcd)jhL*nw(1>QqsBy2mCk;_(=7aPl(r0`y0*A{nu#z zEfaLpe%Ff~i7hZ$@~oz%M5U$K=X#z5#M?aaBv8T zk2mKLOuIiTfBE=7qlf(@9D3M3cVz_eCEwH%ii*0Y1C+{@3s|Bg_StniV%Lr?^<>mA z->x`+Bl*n1==qz|`xhcFjKixw{mN_brFH|4l@-fK5b3wj0$)kX#ME-YlG^d$&FA*& z)Q}A_jpnM`PiK`%`}}(%Mn)MlG&Bbe33%sV5!_CSw-<*l*P46y`W^>YV``c$LjKv^9LOyfDm1mn3G zDdqC|04E=R|1c1DD)-*nXWdkc@Vp>*Kp)})rRWkW?$4SjkI-^*s=x}ae19I9j6Z8Z z02?u=w^HL_h83A)6=7fuw5Ebv9z+{)jt1s(ZEjxW{{_ctH(bfPjelCV;Lhj zlN+spWb9JHffS|(tknQ{{FCc=HuAXRkRwb`j{Q^Km%+ii(Me@w9^Oemxuz zh>0f;91w1+A@jO1FnBX?7RTA+OFn|EM z2?>Wb-1!^7&!dEc=*Eu z6Jn28bDL|50yyf&wV)OXyT39c6VoIx;OMI>`1-oL?^f6@N;v}>tyGBh$*MzLFu$RR$6iW3VxH+)-JjsRbe_K>j^#X@`JWI1mm2YoN z?sMLQ$B=Q^QE6#1CxbvgZ0ziH-DhhHpI0Z?e{A}4T1_>=g80Z?tJ3eY*cdQ? z22K}AaeRR>8YEF4N-G}T2mm4}OTGKj^J`hgIc37&SHOHyN1E-Hh8SjNXY1L%fB()W z^8E}c!M@d<2Qet2;;^U=%;N=*kPrfONsM{()d7_sxFkE9mY`&07v|EKdMXzt;WO>70;(OZN6vqZW&Ptt{^VvKL zt{JUuP=}0wV%uWfS{A5gHi2Th|NL^rGGB)=!#~DKImQDUN9(+m>T~z;BcNMHj;OIE zmz0!f`t|a1`L6d=Jr=Yzky|eI=TwnXW$?CJ;*Xp7OfRxQk+IoJ!Ry!vx0g_X=vyd~ z^6J}I8dih&?!e~s$P2P4`fE3`1rK@rI{q=1TM6!hQOp&|?BD3j`E{0{r_idZFLtg; zMO0pa{`7Er&MMvWm-L`Ad*F1UGa`989W<$Mmu-zt_CjNZ`;@Yg> zh{;j9Y{-cjK!BvnK?AE#Kbl%q@a`_vaEeCF4Vpwf-1t6Lp}dE7PuU#PcII79Iu1eo ziW!t$4Ik%BUi>$X!Ip**{qi}t2%@hctR=W@g3GST2!B|WD`0^kyKQ02R%F#RYs=nS z-fSy%B%;08&eGCyuF5O)-)X`7iMYs`Ktn3b020t5^8)v~!)nmOtja~dK|Gww)8G6m zk=Jr;^n>NXj2)lWBIxG@kM1qm@wMTjvPpleej4Iq3n{HGS*-K)32Kk$NxoEz4X)~> zjGrENjy)drlFnx1^0vHDOQv&m7wBAFhQj)qLjdK!4vpiEdtJbdok+!tV;snB6)iW~ zc*YBdV{;dbO?8%5b@7fe^)0{fwV~x3T>#|bi^5i}mT6AU|EW8K2-a8^+#%oK@U>TI zMT=JDZB}p3U)IW3$S4gvo7-P%t_4A21QblkrvH73yTBH*A^qY@eTvN?1{V=Y#DUJ< z_7s`b_b;}G0jGi6c1k7C{MV>(d{@Em$e3!7Me{twh<}FTp9Gx%-tkJf52--6iD3Mb z>gLQPt|Lq@S~`5+%a%WS+?cN;d_z`Fj+Tn*?Sa7W8wt3Okv;Kwyhkr;;hGqRK%SLn z=Xo8wlZKfGN<&zbHVKCZc)G>cH!K<4N*fJ?A)cp!jMl}A7oS`G(-Jt?I)mXoF(2)` z4wG{gj)p45MqwRGFI$|%yE(53^T;2zVNP{!VJ>Zyc3laTF(p~{|4-Zm-W8_y`(b}I zc+nOX$Q?7mZMgM*LLWZd{is*vZ-41nbta#->0{TTg8HPh8s*mc!i#lvPoxQ2l05ds zRPI#Z&M1+C->XY-TNdl*Hx^0X=VINQtBaO;-#+tpOt9=?N#APg6TXoHkjgRuo*z6W z{m0}*a4G?iv;ucAkeQlUX_C&aC9OWU=fDae=X7-LZq=8qsA*2(3=R(NeH|Oe>vS;s z`bGqU8@DHxi45-AY}X{nq23%Anxx-L>6u;hw||3m%4~%B7~hZ?>oi=B^a3zjt$*ex zAs;g(C53>DY_Nau4Fxjk2{%CNf&s{WLIe zbjle*hdfg8w*Zi)3tN5td+nSV>U;Wj*S~%td5vD?^#+P&W;xbNLw}Q7wHGiXmv{hB zqi9J_MRf~cZBx>0L&(Vyc6RWVWHQke&FrJjavRH|S3pWU9009(Wm?|T)b}%M0rSJO zT=T2C;f9nkIYU1CPl1b4e&3#XkH7t5e(LUL{?ap&2-0y9usDAso^MLEVM86e+}1Oc zQDYtVc#tw(sn`;7F2loihb{XM-HD0Fd45tc!$Ms z<;v#k8|OxsdH9C8SRC6%`u{My1wsw&QXe{^y|Z9PTa zba=jlN8|7p(6?TDW`Pc{1SqkNdPRpL~ z9e~rM5c@X*Ny>z-Q|m1iF&14i9ka?(3_XJ_DL;lUCY8y#{&Jdi`*EfJ_dyVo`5S~i zO`wIZU|!7SDUxP1dr1~`@N{fIc>8MGQMfoI5WCP2KyLFGu zCpKZ`sAeJ#*2A_qW`IVo*vrbdo$)!_Ge|x49lEPtk0-1{WZp_HN7+k;D%B^x7*yB)8btjAp{M1^M+%-^z4q>NhKE>g)zr~-EBBG zY|mxpPO#T(`y*Aty%+1-&u_E_!Dr7|EZ2e^=|RF>kqnH55q2S>*eejogIOlD1rN+ZNvj287FPMkvxDw|+PPA(F>7uv0hi=jm~{&(-a#>gH)gf&Rznuf6!N zc8kDLlh3k0f#T5ufr6>ar^p)(VtBJl7&gQ%4Zyz=sowgEJ01dCY1a;w!=PztPvqT> z2}w=OI16(Z43{f=aC^lhA_Xh~_c!W$3fpnp0|G{n@NRW8z!K?O8g|toSzNi^tCqOf zp&ONHP$05<+k3_6;&V@dJ4YxSCh*7Gq#%5u-8t5}dva_fB{~Iymt(`Aj%_tx<3w@m z6nan0yM^5477BT%9gV`DvI=gW(?sKTTs>ws55K`je1i@~1C4D?^^TDPT#J?+OvS|| z;`g)t&1AUR1YEh2U-HldZh>X%6lwzqwJIXL9N2L)>iHXx&(iBi6ms-YuyzuO-6;a> z^yhpXZv)x8eS7vmFPhZ#A+Hi1%`7h;5Hpy$bTPY)Em3cIWZA9^gd+eCm@pqqF?u+* zKRr7;3|RKFb)H4cszrIiGw|%N$6XQ3^v46daCsr0n|ohaeqez+A_HGI9%m-g#MOA9Zkc=8T@DxJG~eMU4_o&xg3d>iPK5 z#=Fy(FD^cDP4x#yCU6yZ=F%EKK4LnVO8Bco!=<`9X#xXC($H&4z4n?9`QDd=BpGYf zZnvMS7v@VbcHWElnMLq`B3%bQdz4_ulc(W`DEvyM)?V{Y)~|L!|A6|A>j0UX{{g_i zES){?)@!}lYxCsJJ3Lm@Jf5 zIJ*~hRn{#wAXTpF5;m)#z%QOT`MaB)vz@ESpUO0b?gVUAx~<>jTr1-|(uwb%7L0QJ z4;T`X`fRWQfAap+6(@T_dk7AckT1G4_OdQub1ez(#Opq%kYxM`uJ@c4OuW@=dzchGnTV0G$tL71@Q;?t(%Mk?LA-4R8Uk)9rmWXj-@kLf~J$su)s>-8oU zbA^a_kIY>|9wV=D-zI|pQS7_-hM`H)CD&zvsuB61_-&MEd{%`XNzgCSsfV*|T%TY&?S zwF*Mqwz0TH<@B~AaG9hrV|1>VP4LVe?^7!CJ2PBDL|Er820{Lu*>}m+^dqqg&D?_2 z5|xNEpUfMLQ?BKM_@*i7S!~5`zc-b0kDDtj9##Ckf1HCZKWYAv!Nm^>>OSOJKBP+T zKJey`i#g6%r<+%EZkvCwq4A>=aqfX@4q`o%n@;U{tj{C`s?P7txTLB6kX)G2ZUnMJ zA%Swa|5}q6f@E8lw0_0Yx5m8eG(^bFB_t`ryUkdRokM45ZFg-ik+K~RBbMYf_0o`d zgP)Vw;7D49_xbM*qZwCHjOI7zEjbh4vJxPpPnmUOEh3lexMnDqjsJ<$#)r-!1y`~!w@BT=nzzMOmKuw*Dd z@V-Vwuc)E>P2DU11wSxevt&g_*A8pRAyf zVh~OikB2j9j@p4il)o-8?ik;h+vKwYif{`6;b+K80*2uqD2wQFE@5Md35fK}4#+H} z4A+`QR+7*s;`^nwuq9DvD&?B3=~%zVCyz|h+G&wQ z#Xth%NHl#lf7MUx$8F@IlCx|^FJ{hh(KD6He5(O@dPm40_q{UyX!5z>gV;BqjdTor z5brDKNR$h+E?2zciwr7l@~=>5xbOL<5r&Qi)_{x1l}$UE{PCQ((*n3iK?ow~@q4Dx zBN{1PAEq7aVjjutOgVC@s*HDZLpDPa*@*sTvawXhKujUWid0#vtut0e1nwB7wQ~LT z_`+8ClQYN(R)883e9+<~-1GA}@?4VQX)>`{GbCTiNu)KXZLO^XSl(s5m#~S7$dy%7 zGiDacdar7;7z?Pb?edS$wxj#Y;I^_X-A;e@5d#-?#tkG9b(+=8Fb_p9qm>ypH*irD znT_f1E@vU0yWg@$m!t= z;3LvJd}4vps$o{W{cwKtm}F7+rrI}vwKu_uKg)PtY~c-wRPS)1wXU>=-qKK6VvS9o ztcr>u^Hme_?$y)C%vV(`VW!^EhKrl50AFSmEB(2`F1PqbKW&8iMF>i2W^OlEABRQD zScggb4Y0*NOU+Z7szs)>uou7ae)rRA(Zgul_RGgI#vMBev=NM@e-Nd8@Ox_(ZDM7vl8Zsg74^OEoF&PoLOGtXNm`ntOAik4nbAGut9 zrXU<5cL}6cy4ap#3Vh3#a8=JOMDR0H($U|{D#wR@GjSes*724qT-nW1u22~s#-t=R zSTg#nt)yBG&~n7n7qGXO5`M+a-kuzx-SHgf*7?A#ub)+5SJL<;_889@YYeev{%8u) zN9f}c0LH3AxOVKgC8uTvIDe40Nh_NnL-7O7*2Y83`|Sl$Kl3RL<9 z11NZnXwCURaK%`+4Jd+N$|W=%XHy8}GSqL8udjk$w(`dEFIPY9J&)HoRJ^_G4(gL| z3J6BFY+eebtm@_Rc1x%Lw250E?}u>1InL?`8-8eLsFk)ii^~<$aggDLBS^DNB*Yzl zbe@$W>jv+P;OI%8v%3E3l`-*ldBhpaB}}fTGK2w3DGDY|vzC%>n=<{bKXc}uEx78$ zcJOI1&6m&T-1w9I=OD&dPHsIys{TluN_Wrc8Y+RDFpHS#_537_7xD2|;J%>O zE|!XYmogtX(@4458B`abS!2}0XsmyEIXih>9qNOVKz1rgHQwIAkkS?jyCg}QKs~tD z7HG%0S8ngKaAuPHRK<8kwv+B?M(%UlSvR+}Hb!)g8}jXMV18DGM5bm)2l}p=#!9 zlZG3Yz(f)D<|AbnzJfGa$R~a@kOQ~~*wWuB=oWPX^&1;qE>y26IbdS2^lQE;LZKBK zBi1)XuAoxf8^1whaqaXdL>>ru(;)&b7gc>fUNyeIT)2hQxoY@QgaDx>kS2uanD#DN zFZl;9;L&P`oDO;x{-yM#&Znir1$MI}n^UUs>+BijM-L?Mk&M^bJqad!Y$p%I-@=VY z4~lL$_J7ZAJGNc(Gwj}jAgpF3ldw5cz4}3(Z?%ULabNC(h}ND(^79>SMFbvsq#TwL z&x#)pL3*BNU?5S0$v8Wj8(muidyV|dExQxuzU#o$SfJ6tFD8=iTqCM?9OO?+cN_PnneXc3eD zt{rr0l(G`$GbG0hApQ+Zi(lkl?R)Q1|Cr6*e){`|kkM-Zo7=2{EqOwpe^({1nrj)IVfj4- zHz7HkfA-}Zz5pO_I=i~`rlm%c)TJ&EUI`{oj9Q*Nw>5D`%b95xw`9B0aiTH!y2V$) z_-X8a3kvQxAYR}|iZ`Ui3?S++)DIkjWn@#rOP3!g7~7NHy86lGYpq*#_~U5XZD)T7 zl;q+Z=NJyY#0q&yTCO1jykrXk;Wocw;MP%*FuT26J&xh|_AZY=^u&M4twgtG` zH)EBD{y!;dC{6vfE({S|MCch10{`rDosg8GBPqI;EvNEC@1vIkBNONnj>M(Zhe-sIZz+R zvoUk?c3NV}R0+q$4|JJ(4_m{d4S( z5uy2t_rwF!$QlD~;cZ_?zfK-x3PWfDhmwGc)%m1^E#sr{~rxvWE0L(Cods(Ym z9qLqckTp9==VO3Rm0fNVM6#%M;g^w~OJMi%qN=!HCW+#t>@Uzt0sdlI_jU|`3`DS@I<+b2ISpk;$q~2?Yo`&?Fbw=V9f46lKJ(!H zyDo1lL0xxdgWNpe5avy6lhJAoD0T88>3z7@@4XlOkKAV1_8XyitOqv+T7Do_`mu}^ z|L+JN@;C}5XG2P&2q>_vVgYSb7xrnz5Pdk$VPF_Q=BKWqLCtt(W`~oU+xEoWD*1i> ztren-@sF-)J>ci>dNenSF*U6MC0EqJp{zkkQqupP+U~|8EgfC`4wQpJ*Q?xq{p`gi zAJ_hOz+g9w2j}qP$v6sUOZ({C*ewPoCPIy8pTp{Wo<#t$z&oe~s`z3+(@! cwYBw2_09B>>sndhA^1;JNJ=p0_I=O)141CxQUCw| literal 0 HcmV?d00001 diff --git a/docs/assets/img/branching_2.png b/docs/assets/img/branching_2.png new file mode 100644 index 0000000000000000000000000000000000000000..e5005241179d3cab5d918c56d3f503e863318d78 GIT binary patch literal 64572 zcmeFYWmHvfw>}I=BOxI_8c7iZL^?MOBHi86-3_7$l9JLW-QB4mDcudyjdb(BH{x^7 zbDlHK=l8?Q7_c{M@3roA&z{%3R*;;GI65jJDhvz^x}=1N0t^hiISdROH!>nP;f|a#$Jn^vyrfja7)=ukB zZ`^ikZ(rJ&~%LEPYtx%vQOp6o(i`yfZHs+k_WECn!!xwF0 z_WzK#m5*?$`p4zlRUz6Cl}Nr?5SoF9iEm%)aey7L@^@D$CC5Kl=YKb${VZAncHre( zP6_f=v?nreT}AX4aoJLkN^vUU1RHP+oKNtRYJHaaNq?;!I(#UhDTaOWK@ztN?k426 zob^@{6)oDIpI9$FXhj}n^GYARcuh7)#gMRVc4&Ko)2Z-@H~k~?O$?YfJlBm0j8b@#1?MXtOifpcwIyVk(J31;HlfE( zZD75f7OEAW7B-JzP$fY~khj>u;;{S(MZ-h=ivhE4!E1)aALe$-pE&7WDUw;n`3hh) zqqLz@(9ya@h-XLy6Z{l?ii$SA999{e(KY2+T3_~c0DfnNR9&!PjFdJQm15MLD6kjz zN6=L5*=LFxPPQx8kbLz?b zZb7n-Koe<^&L>Fv#LZ0czF$NR?#5RF9#{BK2uVyd?Vn8n3^O)aj)<4c8obPuWV~-{ zutfaf&NR80rMQ=$Bk;Z>>=I}|(>q0A0*&t z_1}l#MdTOOJ8YS(nb)gG-9ZUQCV6-HjvgLwvE4{A_bOkl9TsmneBe#ewQ+mRgXN=k z;op!>jCea5Ys48_ru7g0NW%Sday_H%g?-7#HV4Gr3&9B%J5P6Uqgg_*h3qcevxRzsS%bPsePH(Pid#rz18a44^o7EZF0-j;}>DasER*d}L z1BC9JB>{+y)DPRDFx{wb_dCWflvEWKb=KxqZUlJ{KZ*G4fGta_WgAYOvW`c`=1pEJB91pF2}TePB7nf-@HzT;__f>_u> z;{OEF8N8pMEDA*WF7`IUmI_f4GW3ao3jY(uE2GC{Fu4#Fimgx2C7vJP*kf@8B#CiH z5Owlppq-<+g&RCYXA^oZhvbOcEyiWan($6W#9=Vk6r~PJ`=iPiwsq?YSiXQ1(c-jc zM+8&Bu&N~|d@w>viyl}lf^8HA~7JsDhMlM#qN%jLh zrIA$JCQ>IzCv(ob@z8zt0@n-4tB7CMC(OI@M)2C=q7m_-{~)3r;(J7?ht32^?5+k9 zERM_%IdBOXS!c{$XZB$q7*4R%JcKobH6&VsPah66T{dld+5Iw)`C>3fp6TJ)zrf*Z7luvl`D^W80iC)BVyXjD@Wz?vl$E@e^748GpH^fy2eA*dVKia99oQY*9n&(7*?QTzGHfz&$w7UVYo2SNefU~(_%`^Z zQk6Y!sw1Nv%D?IcD-GfG-_0X$1SUKhpE0#2t~RLVuJ)jer!*9c4YMRZ_l%y+36ZB4 zOC~<0XaLv!XGXcfq8Vv>%Ewn~C&sJ`u^|58`Z)aJ?*}{)mM~ zQc1B%*#DMxVf4Vg{UR)xuj+Zi+m$1={LlXLeVF-Ztz^9>UJyzVvGJt~3uH6+Lzif)k||4R?TZP+8^e zLjS29Gfj>0SoLE)wN{sVS8zj4L$907!P0@t@+p4C<83-B{5N={(S)xKGp!UEvR|hT zJ#mPM=~7a>&Mu0dq709Kv^f8Nq($!TU+fl(gy+M^&NlW|6S);q+dKf)RDmyiC zk;U+LNC)*JYJs0^Ca7us&q8amy0))d#)`fm$I z8Xb){@)2^;67V$`lB)N=aeSH|iP?zl>M8Of+TU3fJ)`LQdh#PBc>MW|5+j3v;-Zec z`fvRpy{`B@F(1`q?c1TG%$*+3SSt+~Ek@j1Jy zrT1;)^5vE$MK$XP=ZKOuwe|D43scSRswjaHpPX6isz!s$4w4~~ykr!fX>%_N3zzsk z4V4dLbX#@KCc91hh7;k^z`8Y0m`|0q5jG=(M=;E<3s3sQ;p_=Z5*ru+<@ZP2iT`V68T#BBc-i0RP(OxtrOA zeFuH9YGLE=Mi*WDrdpqbizBTh0=H&Y- zGp2oO7JnY?dzg`7ha5A}PfHtL7QcQY!d$fSxs^hLk?4i-j6OX*wc6ghMc&JqK!?%D zGT%)wfAYQl+o_+q;}4NY#3t-szN28`=3PH-IHrB4Tc*Lwz}cPD5YSq{d^A>*d?PIl z^9;O4hJlAAgh2rBV8JgxEYZK-i@`pHd2n|e4hH6f84UdWcVxgT^d|!RLeKg0`XDj@ z1_}I!4SrqI;QoFa-aPHW-}i9b;4>IOMPW%v@TzENZ)|MsU~1z?J1IyC4xrdds5`*G zJf?>J!b&Po?St!&n<=R|s!6}*Hng!~)Hkv*FlKbMvV~p;hS!xFytOiR)F*efvb1*K zcIBhE`vy074?WC8L4NlYM+-g*HEB6=VHgz2M^FVq#`tVqtj>zVY0_&Dv4l^|`eJ<)4fEy^e^n zgQ2~dt)rQZH97RU`UW;mj(ijp(1rf>^XEQ|UCsV$C2NQKZGjCkK|f)7!N|lh?be>Y zG(4$2b^6p|p@|J(_ z7xZ7`k$uAd)TX;| z+c%&m9ZJZSVZYL?@I?rrD}l`-viL_3at_oXkp4OZY|qzO5!lYWh_z5VtCl19Us^0H9mkE;AwaR+QnvNuuTSS`n+ym3oUReQP+h(dz4jWt@VfqPTD9{fNw zVrjo=^TrB9|Ebn&C;%g2S<)`SL46W_+YHBkp(%i+#p5B*xXVY4UC-2%aX40CKKhD~ z%^Y`Le_I$Ri1>M0tL(hTUZd;uKapHY-(LktogBQUGE7;PhVk4mjcz06rHsSqn70t8 z4YHZ8DjdmIWtyuieOZHmJOY1un0qse!}2dw8J0$3Shb@snJw~_oNFdraMfWCyNsyau#JnLwE zu+^FDJ=lgX4g!r#($lT+;%Y06F`h%oDi#EV9k0p5ERG_rIH}UT!8Hz2oY^X=x@RM3 zObvR~!DZyYZ*i8HU=qUb!*vkO0Cly;@h0_)7cZ)4-_UD9?3Eg&h01T))C|%O zCsO9G65F-~tUN}4{Rr+^A56D8m4~`Ar{uy)oNjCl&BH$B^3O)aYMi}^`Hf`hs~MMO>s@T7y>JnWf#EKAom3Ft?;D%0#H62;fZb9p$s}_e3AS+HTsN%y{Z5C~ zRxFppoA{R|VLSY&P&1-McHEwjXVls!!OjzwX-$lkiqc{I%_ZhlEz3y4W#JLnM>qCG z7zw&aHgpls-3r2gEn<7HEV(h)=#gadeqL9_kw4++wa+du;~-Jkl*$*=!LKaN+iKuU z3g8lc49u?kdV`~<4T@QClhju?_FtnAFNL|AH$H1kTzs%nW|eED+OJ|2RF_Djc}m6; zDs~?%+K{#Cou#>5b}etMthSMS;V@Mr2a?pW9lDfwPRy56vCn(0r`;^h#@I(EYMJ4X zGoo5Xautnx6WI7&`c;a2mx~m?XY)3qIX+cdR;{Z{jwsPHp(m2cNEat$&Ycri#9dz#x=VH+l_j! zF|#rFnx+`1h3s}8fBIs*nPa6Vu6kBne;*VSC@Ch=Qh$9F1o4q$Kd*9xeIDnf+T8X-m3Pt%Xy9a816GAi3$_x zQiC5?MSV(7cR<|BNM>_^o&pTr4+bZ2Yg(f#LJ(EJp?aMv<|fHnr;)K^&8njCYmt_6 zx)}Ps8g3YDSGw^AQ)tD*h=1q4xe=$A%xakQ?7l8t_%#=0Z@p8@uWT`p=I6LM(v_X` zb^+8lIK2G{u}~F4BR&Ou7@2^z7gmX;sE8_7mjab4MNzEx2OTcExrFledc+ud)!3Ws zOAq`gad2x_It#%}O%dVz7oFKDT>0~iTzPY=io5*?YX)(hsx>-{_zz zB9*7`uhun@MMOlPk>4|{+s#G3OTIQxwpIZDs&?z0$nElFZ@IT&{`^cNbnlQyoDQoY zcp~HTDRf)O=Tx%S3MrznrG%{5N)czkEAWwH+i3PEy%7HOh=63HUv0-UVEA3{{G)bV3U{cDSi1F^>^f z8tnic-Qk1CCx(G0b}g1$o@^$uyT8>7_hM9aK5;v3!oTLpG3^GWvm}>P(YyOtfbpm# z5yENFW!p8p(C2Hl7%HV|*jzbF54@RmYX9fU{!U>`KBMSQf>6=PL)J0|$?N*$m*NlD zpFP+Aap?XSVaEBWUwoyXq$AWlj$TzJ;30uQ^d}SyP@;bOon%IRUsY4d>Gi}uXLv9pIP`PA{pR70{n8LyI$b~CY0`|(?X?&IbTM)+I8c~GJyXu3TEpgF+k~(GfJw&Q z?J&wfGfDrLy&H}V=lqy4wbq=F-SR0YL>`XN$nR010#Q;BXWzFkzAq8#T=p1gWfSOa zmx~-e7}2W^JVGCneD7qh-=EH1A4wp|M#9@l`xiHkDuO zSg6b>`YK!{7G&m-14!@0vHQ5 zX6wIC$zyz?PvF)4pmd>o0}cCpg)x1z)_x^&V>p}0*a&Ks(O3g|y`W6V6bZssX~}Zl znNrA*_@&yXS?8GhGyY{qj>5?S0=4Q6Pe>W0Z>GUrJItp-emQIM;}_u%uO`ckdxh-7 z9)kvBy+dm0E|r1Zynxg7KF{<#Tl~;Yr!Mt_@%*B{Me|_P^qne=`49eD$D;yyZoL+m zTu{`=iy?x@Ab|p_TGH-R_z{o3&YvA6nM`U8W0;rglw;zw<7$?Ljq#d_x*t!%aC@|jqv z?}^bbyOSbn?J1lU7Hxh4wM|R}KB7z-;GK$q0AogZ_lp^*4;tKTt6D%r&GfoF5u<$h zEnM~|M^d8JzsOWeu&E5sA*n)-0H zcMD@Gnn_ou>2jCH?R3^fyS{Lq%K|rsSQ%SjJk;)yK=0M>Z~YV;KZDG(q!VdSNN85G zd%tP~VS$n#nE^xePfUkhLoLv*SD}$jv1DZ~P$TV{t_hJ}jSiXRWn1b9kNf@}jVCjx z0`%TjO5{(z5vq>bG+#S+#k|bkp0dI~>UP*mj;s`^*o@LGAHgC`B}E^2TA!E@;kn#8ft0COJdUA5n}Tz6Gzw?XdU@&Rlb) z=$vs8?Ux!A*9J$0dfw`X#R6ZbxirzdNiYfIT6>li_@rKW|z|EPkOfon1 z`QcjJv~~S+?PrcRS8jtr>+DujMx&ys;xW&Z3v6gUe}1k_iozkb8F7r-ks$;x zyNjD|608M3E>}=e_l?@X(7gUku3hK&iB2&qqm3HJ6aAV}Wp5JHfP*HB$hG$!V8-hTSz$rBc%POOdj=4QNTyHS^p zFp{An{ZhgE9;dMFD^A07zFGbU0!S0WI9SUh@_=!XO> zXOf{qO@teda%x?Ff5m0e(Q-sixM^iKY=1Z%um$gY;pwP0vQ^y72wL58zAzyW)0$d9 z3NT*jju8_+UD)wp368 zNG5YTEuXKY#u0OQHskSmT~sIw^jpl^ff-V@jjno`QIN7gVR%aZILI8)xb!MN4{c9R zK*dlf=|78bTm3uS=Sn!m>qLFefU=m6pWSB8vez)(A?G16=Rkt+3JCa7FAUoQ`EjnR zR8^@bn|+!`3e**8q!XxI=G?OFNL0nvOyYT5iYng*6**??jC@g>3NT4=YiDsG**fQW z_N<$bBEy&)WiOP7qoB*V8RQT3M)%6)zGTfjl|u9LHa6HjkS3Q;=Y3psPddnaDgyA8 zi?q}?$BXwE$I-3EK?Dvhi|$;9|Hjm?|8}`2uHW3lb6m;7t7wY!+Q%2vq2K7z4%N*{ z+Fz~|IM*-N(0i%m9~Ow4Tc*ytzGk$%Y^`w&C*>>f(G3w|ftLHIXs?qv<;RM3eR}dY z=aVn0msTcH`g@wM$2jM@{EigSs=F)Kd&E5E-M0!|n_XEkXmIvH;Dx(+MAHmAoF!eZ z(pGD`_;%+9sn?M_J!b@mjZW(Ig?@wQ`N3ByG7Fe5R?}7d9qS7A0HZ16hqsw15#)^% zx&eW@FwGB9fpsDMbja6N`@-jT?^fOK0s7no^-fXIx6l4am{U7do4Gg_DtShISj8P4 zNIVsqi-|T(beO#RzFM{RKWb8L{J%I=pedN{FR!Kgu!oKRbm-?ZLE!OpcE#nrIa?Z2 zm}_)Dj+Es;7jcvbqc@9q<-TyV zU?yEWTthhENX_a}imA~^^YnE*i+aEC2Rt`g^v?B&;&)VD*XuLPbjbmjWY)b>iiH|% z^rrap$H$e&-;l^8KzU&N``3@(B;EWV5R~}d@O*`}>yJL}^Nh^)rt7EZuuQ$a@ogjK zHmneDb{u4qm4NEO9EFNXLb2LfdywopQa;LCJttM2H%jrr#+b<7p+PfegWYg>?HKb- z4L=eAQl#BGfa;0!wr6k%dDzwPUkRKqM;-hMW_ksZUjZrdRHDe*HmA#Ol2ybHC)J(N z!9qH0W)n106gYb{AqrehBDk>lvjNnjSIhUdO_hOpQ3ql=P zZ>u&>o9$>3&I#4Okt_x4x(wyHnE}HQO@Ag!64=&2U{!a8?>RQFbU$` zFpBSqW2){Oy>KgbTqejFEG@Yp9Gs&$o%1Ljb$I1(HFYWZKx;FjUrSZ-;58aPV|2Mm ze_ikqkvV!BoAt~D>d#u2HtU>17ZTo6TBpm)z2-Q85ULl`qd#ts2_xZomdVSEd+A;nHkUCzx;DU%tMG-i`qiD!jbM15w;gtdc?txL$P=j#;Oc9b({ zgIST1z&em2NR3Xjj>X(RD#03&xpILyM_=6Vt0(}mm(?}vcET)9p}!@5i+ratolJ}s zlC`t1725q$E`7ya7Jbf*hNMrW&e4qipJ7nXxG;Ae{?os6^f|r#@W#$+%c5Mn-g&Hq z!786|Qv0h&Pz^>lfVzko)AoD_OO64oN6(KJqJ7>Kw9$6U-UFJ?-K-5P-91v#-DFE> zHQ$9>f;O~n>x1669+ze({d}RW&|9Af2WP4KRo2xcwa?+l-3CQ=1B|5Da)GPaa-Hdw z1m_kys|=EA(1STxPo+oVa#PuOfXZp|(KhrSKO6}m;ThY^UGxf}FK&FnDv{2ob;PSWH1~x2_m`7@25X&J z7YyTA*E59Qv;JVKEufc6Z!HI*2{S(~;wViau+)Li>>R?AV?IfLh(6-W{E zwE-e-NFt;j+b_PQl=+mQWmTH*`sp)#?2oRJI5&5Z z4jcH6=9)##qrxMrAyi6A?Zx~C0dF`;#ZC_1$D?|yEeJJgctfeO&NT4tejt3 zKaskxKS>qV_DlFG8vGHF0jN*3JN9{^Q`6}T`&p)GYkMB4Kk>3I1tmrs?==}yW--Rc6o6hINV$x^nwcq#o zMY_}HBE99w*7jYkJr)xG;=|vrw5nNG(@bY@d5wKbX7GDBZZFMgD`bl;j%cSUm~y4XQhnJmw>`)t?+ zQa6+kifZW|l*ub`OM<#3vTZda%26WnBzBOazoeuDp6!(Yb4MlNI~jxgPxk$oZ5R4# z0#7?)wEPm@75q$(g59zuk**Vpu?UPpBwX!JwL$mlj-r*-s%GhSIybG?5do&z7iC!O z9%c6l-e`zhE*Y%ucGB~CCaJYaMAxn*cfkKomS%fL z%lWx!!)FDi+(S-~hit4JNh#6W*X?KPzDHpUAEtD49wt}<8`?)*ze3_JPVk;wh(8!% zqEp|iz|`^}#YdjTulnJQHDT(Fm7`Yrbv?oq`wuq5eVW=-onbT-vvE|HLMOiRJ@RnZ z03fFIg@Q*G@;?2o>F9&xcAa`!@oNnB&FebA- z(&ScA51h$N2u zEb{v@ekrs>ocX<4$->Q=_3{iIRrIAWJ;JQ#Loc)8Wr+trnE04P6x&0X8ncPe%_ClQ3G?u>|v@hVEh zhG0V>$!?S8i~P0Hn;H@aX7<3QsfS-J_>LXav=03*40&=C|6uVwz73HU-uUQm*Sk3g z8SJQ&Q$wGUiXrquu)ua~rS zLw9@$K-pL@%h!mK^$0f(+ae{bU{GT&X$}p_w~7Yd{|o+zZ-{Xsxn4)IONE8;aziU9Scnis|c$gjbr`G_dj^*Kgr-6sAf z6r-2rLb#vRJ0um3;3ZUy`<(;~V)kcB#Z?7-PZIE%Lk)yh3*4##`_sMb-U}_T&{_Zb zaxkIZV30;*R8bMD!|Lm;I(M@EG(X@tjZUl)U!%TnE>{wA4oRA`Jxzxtl;+-kC9rH! z{Lfy7Aj&oSK&AU9nTUe%0hoalJ`a(tTuIvIQR*%}T3J#+A2`~ZpPwooA54MNY1OPz z)d6IwoF=mp&k~NLa}Sk+PePAgv7P( z$9e1UJ?3A0-dXf#6lzv$2okdmEySNL4*+Qou1skklIP}M)J66^_)`2OTBZmhZBVq; z2&ZDux1t6bv=m=Nb5^~jcoqOXHz9RPWnK9l7w*L1tWh5%&ZbD0R>dc_e|QH6Bn;!n zW--UteTaAPyeyIVsozJO-t2&ALKE;3p;t_lU;1 z_=m@!^d(JqXq`aNJK{`cdUJlTq5>llXHg|3FE6hWA?D9dcs;fft=$Dm<<>mbfv<;C zu1rL86=sU`6X`v}KjMx&wNW)5$bmpOwJt}Fu2Z8DY`^1jjBLNMKK16Oabrb_gv{(GQH_h=4{KD8lUAzfju?bHjp>c*6(oeVsAB>mPP?R)1CV&cI!Au8{zWs!m!R%~ z##7Yw1`EQ1QyKM-%aQw8D-FTdEM&*C-nEWY-PzVtnL71j07V7EMcjcevKR03Wtg{< zQLBrd5U`w|K9lp=rRVsKZExkw6BMR>MDi2`ZGqrqE8d{ za9S@u)k6M=gB*p{aV+SI^3KhT63`3ae-XmrB~X^*dVcqbt7y6suSim_>sWh)`&>f_ z5rf$DsP2?2@NF6x4B+5~Px=r^rnUQ|qO;viWzM7)K$%n>5Ip5&0j#0Gh%R!PDVOoq zS*DL+eoL_$FgR2lZ#bbze*xYPigFxv@VsCBNr6C`yX?Ye(Pwib&1iW(TVJQ5vzBqB zrS#*39MxY46pM1aZ*Hxq{0MKo*he0M-~m~%fhBo-1%`vS+6$#Ihr3@R)9oLHM;^`d zR^O=B)?5}N!17w8&yQ`&!QL%LE z>E^kJDloX$!cH?T%#=BHp_wrw%NOyBla5$@biRsRm(G`n*&}D`Og_2ZO3{O+Vwm70eXhN*VzUm0QQ7VoQV8HgCBPRdY08m zpwUkf8aD%fwv9^1)68^v?8aTyTJPU^R@X$JYv^KMTSuNH4VbJzxP< zRnW%+ZP6~L;Tb2xxPa%0RZ^`tck^z?Hf4m=dG04=MOWg`EuE2qhH(?#w^qi(~Mu+(ya| z*e|@36*d6j3EP45#OqwoC~2?9y?B4$c92{eKqLyAV_F^hpadsAftX?ZnYbcngtf%! zded=NC(uiOW)`eSyauR}mMDL-)99xH>cb!2{zRlatk3fMZ!&PTxV)a6XimMfr#oqd zXtgFC)w#?%#T{)7)6!Kh5Chec>%lu3H|aFG*KXzG9GLsMpYG1G0F0i??$Hr6*WJOWHJ+?2e^DxU*f{|sy?uFra5=|Y6s%OBCdlpVDUbS?Vg1=J4mf^r zzn&6L@8*=yKd8Xz1T*bc(?j6Dx_2( zvc>I#jZU-v^`g)5QL4%wfwuFdN-pr6?=v>l3I+mj$nbS7pLBgd(j*s@s=N+u?|_HafM8;|8Er4J`HU(>0r^(N2S^$%~~QITL|9#UQ%`;h~2TP}zRAe-)ssUp;7 z`JCO(hkG+e5ZV0{+HiV#omHh?j+F7HH$lgv6KW@Y*aKXxfR(pAoCD?Nf#F|>y>`no zKyE())SGa{?9(8Z11lt500A{)=gQb@kbn0RH;}VJM@;@b3B^|!{xPH0Q?$nn`)0j0 zc1t2_LbjQJAE@??5PfPUFTZgsDf&XY!Sy%L-OE@PX*#(g5GcF>uPp(J$!c~0#YiQx zTUnlhdO!wDiYYu!wo7MIBi|FS7}+A3S~p*xL^rUGqrw4C72J<7qqw(6D(O33h0~@I z6x8lVKtv~giuX+Ah;DpQj<8@b^-Sf%0oMx@1nwu9h^9xh4yz;D->6VVmvEux_XDBI zYMQ1lQd?^S0BVsPq#g`KvnsVRsKt$zM{7qLHLAuRPbuuS3seddlyc+*pQ#oxP)o&z zEIs}BC{P!~$t(Wt`_Zr;Io^P8FoxR@CW3RWR2-v1u?}Cown}`;-UWFH3g%&RhP*Djv-PU1 zPn(^#R0bE7f~2`mL;8Xh8{M6@_~MmMu<1L0|2wpB#Mr0}|7 zJCb1BMN)>6H@$=w|CkF+ae`?RopPR%5XQGFbRX=cT~41iU$w(C;JtkPbNo z#di(7WfhmPS_5k~)$fHx-@NZ~UkySmlP)9JeAp#SF(3E0x|0B^Cs^yi@Zqje6nySd z6X0{a@N07{9=EYx`xzf)aJnjV=dnI$o}jin0Mmsl?FipOp4}SCIlM2}wkQ~aEli+= zKl@>rhJE)|FhXkm3-O;_K>V1R&V=|c*3;ohuTS;C_&XBz z0pgW*%VRj$X8@yvkP(mmrDsLsxdLLtO8s7w)QD)dY--;}vLNgwJ*b^~`^gGiEAayq z^9=K&p$&B41~^echD<+@;lse3yTwgq1ov<1lhgx4P!@;PpCYjL~+CSVp?aN(=W@K?}b;w?Xs`Cs*zk=t%hyohJke|z=2gDj{i zvFIWrU*OXng(Q-8e{rVvU`17TQiqCx;WeXnol$*%c?r_p-6R-pmqx-O`>Oi5jv2TNHpojqKWo z8T-OejGoy6h*S!Yw5cq{3hdVhzQD_3%@pejfEMdj$-tlt5@7$@?YCx{-K!F-y5*Zc zlJV8qZ$Kd@SV{xPMVD3eEDeZ@%-ukAOh7N}-3hE_6g$(^9NnFFZnT45f}YHC-%nnp{EhU+VETtYj3mgy z7#wtP3>MVjGx^@yY9$+(&k@>(d2-k<&{I9{ZIN`EdVAHe`Q8vWQji=5TCXh#r+5D~ zUtYf-!T3E5Zih4YgUUjVl~~do)-w@czE3beviTtp5F-M;OVp~ZlP`JEHy@Jm&m^1N zJ)Q!V1$G||@Rzv*Nc|k^)YdA%nqvUIO|;hxfDC*RF;Srw6yR@3`?zKg?)62b2Dz;n zeI@vwYW>z6vm4T?G;rMZjj zan>MtD53@H#-+WIQgo$ujjjGx<81dn89ba82FFz( zaXMwmJ&?65^%~nRc7yjsk%BV7ROtaxbLP^CQ_0jcho}GUDH_ND!nXUcQlW&GbyZze8#FQYqGX4jl@2vzDYgj7Hpf zr-E2Z5l5&DFR$$aB$a4Aa;Ev_BA?Y}PHU>dY^gjI4jxV(2#cJbZ8PM)KncPQ3<{DQ zctxJbVG~W1E!G(VLscl#V&?@e1(qpP2|GdZx__0x28-Tc3e*1b z2M1Cw7%wvE>+28YDbx2WPTWOUiMJ4~Ml(Por9vlU4Ltk^$R|-_g_=?zwf*7^AVG$Y zggT_V#t7cuKq?K1+uEHdO}D#-=GBMq4|pLTls58Y=hoavTiNea_#FXIcNsx!`Lx%3 zs{^GzK}iWA|Gc1>yGtNnh2=ohAltLs79p2|BEQ{HUsL}2T^<|}yXAg7YYmuP41s>a zb+6v@+`bk}FP8WMlJC9+nC=U;h?Zc!Nd+pou+!zvPKmBWj^cOk-nDrU^E?6=6dar1 zK}HVky?15gLBP0%LA|W42FZc+A7Z0Bg8(T69m;NoWvTvKSl{BWPgfU%M@`yt)uX$NwfLU#!57A0t7S|HtXMdRYcwE_Oeqgtm%z`pT#RvKx84 zdU3zCg;O|g%E<|8@! z|M%Acs`Zf(Wh?&8T^+5TA__qJ`+Lb{1b#CbR0}PFpvTV2Te_lXGd)gs#qfG!=xK!m z9=^Db+tAO=pp&xa(X~i_zh2nV9Yb#nXlY{C8WpBb3Nm<<~pZ;pxqCejuy zsCEADt3yB{pk`qi(r%83zzPZsj2IY@b0HwQmzys{14?hEH@oY?q4(!a3r)fsv-Q>T z#o)%Iz@szJ>;e6Vj<&>Xn5GNRYXPZ-VTG|f2Fo@AR7XgBuM^(>^=llpD$Do=Fr;sn z1T@rWz;Btk*IR@=+){gJ6aZg)9$@m4L&rma5dU zw)X=(u2-vl$rev5tY92>1|kSwzsj)tV54)InSY@?(Z&J^^H z&@Br-+aJ@k=>nN6bS#{hOW{?_Gvj+>m1t^#KHqgbu6tTL0QglILhn)eS8&O(eu7Z- z_IzX5`H#WBRa=14r9OZDr`lJ>lneCBE-80UV){kneCBm40~FP<8a)6mWPfJTiJ9H?lac%caj&nvX_;7UK^;8Wuj1sl;JTW{@;^E?2kN8`L;>djME_h)h{ z=>cwDneC!bx)TYvV+3Gj5ZtlLf%It8=r@4Fch^XO3Odv0_U2Cb%wXxyq|5h#6c8%8 zRp0i-FT{gKpqRj8hbMm~XAp7%xXaVMI8Z)bYAxPBu>dtf7&sqLWS&pIeSRPAY-|3- zzNSaRdq#yr^P2f@B`K>1B2St9%Bv*`=rb?qbN}4oyFmTH|5Cr09%wP3PXVPjq7$)4 z133`>-2kKQ{(>L@r)^q40(iKG0rZKdblKp<*C0z|A#we^fsz(b>=^;Zlo;Cif}U*z z^x25W=gbyk4C8}C`~AL;>$+aA^E{v9 z@pwERr|0WA2{T)$c5-3c;*6aCPpzj8+jBA}U)kUw+o}5m1rb1)2`+tOP-U(4+SzJ< zhoe{&l@8~z`)bC6gTf@4aCLr22h>1!hHYTz0x2HL9eW>m0v6k0p8&(Grb+YYoN z7x7_r|1F*@I4JLPlCk?4v1x7d|7OI0Fq7WIn2zhTd)(o1w8>`%D+&SCc=dvjjEsep zl#~o62hJGpb{I@cxF9-9M6f&T$+(SJ5%|{=sIKHZA$ITsKjFa<;D90E3(1-__V