Browse Source

Merge tag '0.3.4' into develop

Deploy fix.

* tag '0.3.4':
  Use locally patched postgres gem to circumvent UUID error.
feature/region-slug-gone
Bèr Kessels 5 months ago
parent
commit
24592aebe6
44 changed files with 2887 additions and 7 deletions
  1. 1
    1
      Gemfile
  2. 10
    6
      Gemfile.lock
  3. 0
    0
      vendor/cache/event_sourcery-postgres/.bundlecache
  4. 14
    0
      vendor/cache/event_sourcery-postgres/.gitignore
  5. 1
    0
      vendor/cache/event_sourcery-postgres/.rspec
  6. 21
    0
      vendor/cache/event_sourcery-postgres/.travis.yml
  7. 92
    0
      vendor/cache/event_sourcery-postgres/CHANGELOG.md
  8. 74
    0
      vendor/cache/event_sourcery-postgres/CODE_OF_CONDUCT.md
  9. 7
    0
      vendor/cache/event_sourcery-postgres/Gemfile
  10. 21
    0
      vendor/cache/event_sourcery-postgres/LICENSE.txt
  11. 98
    0
      vendor/cache/event_sourcery-postgres/README.md
  12. 6
    0
      vendor/cache/event_sourcery-postgres/Rakefile
  13. 14
    0
      vendor/cache/event_sourcery-postgres/bin/console
  14. 15
    0
      vendor/cache/event_sourcery-postgres/bin/setup
  15. 38
    0
      vendor/cache/event_sourcery-postgres/event_sourcery-postgres.gemspec
  16. 27
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres.rb
  17. 69
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/config.rb
  18. 177
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/event_store.rb
  19. 77
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/optimised_event_poll_waiter.rb
  20. 49
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/projector.rb
  21. 33
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/queue_with_interval_callback.rb
  22. 81
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/reactor.rb
  23. 181
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/schema.rb
  24. 85
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/table_owner.rb
  25. 116
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/tracker.rb
  26. 5
    0
      vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/version.rb
  27. 61
    0
      vendor/cache/event_sourcery-postgres/script/bench_reading_events.rb
  28. 47
    0
      vendor/cache/event_sourcery-postgres/script/bench_writing_events.rb
  29. 179
    0
      vendor/cache/event_sourcery-postgres/script/demonstrate_event_sequence_id_gaps.rb
  30. 51
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/config_spec.rb
  31. 128
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/event_store_spec.rb
  32. 99
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/optimised_event_poll_waiter_spec.rb
  33. 202
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/projector_spec.rb
  34. 300
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/reactor_spec.rb
  35. 178
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/table_owner_spec.rb
  36. 163
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/tracker_spec.rb
  37. 5
    0
      vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres_spec.rb
  38. 101
    0
      vendor/cache/event_sourcery-postgres/spec/spec_helper.rb
  39. 41
    0
      vendor/cache/event_sourcery-postgres/spec/support/db_helpers.rb
  40. 15
    0
      vendor/cache/event_sourcery-postgres/spec/support/event_helpers.rb
  41. 2
    0
      vendor/cache/event_sourcery-postgres/spec/support/logging.rb
  42. 3
    0
      vendor/cache/event_sourcery-postgres/spec/support/test_events.rb
  43. BIN
      vendor/cache/pg-1.1.4.gem
  44. BIN
      vendor/cache/pg-1.2.2.gem

+ 1
- 1
Gemfile View File

@@ -8,7 +8,7 @@ gem 'babosa'
gem 'bootstrap', '~> 4.3'
gem 'erubis'
gem 'event_sourcery'
gem 'event_sourcery-postgres'
gem 'event_sourcery-postgres', path: '../libs/event_sourcery-postgres'
gem 'hashie'
gem 'json_expressions'
gem 'jsonapi-rb'

+ 10
- 6
Gemfile.lock View File

@@ -1,3 +1,11 @@
PATH
remote: ../libs/event_sourcery-postgres
specs:
event_sourcery-postgres (0.8.0)
event_sourcery (>= 0.14.0)
pg
sequel (>= 4.38)

PATH
remote: ../libs/offline_geocoder
specs:
@@ -66,10 +74,6 @@ GEM
erubi (1.8.0)
erubis (2.7.0)
event_sourcery (0.22.0)
event_sourcery-postgres (0.8.0)
event_sourcery (>= 0.14.0)
pg
sequel (>= 4.38)
execjs (2.7.0)
ffi (1.11.1)
ffi-compiler (1.0.1)
@@ -115,7 +119,7 @@ GEM
parallel (1.12.1)
parser (2.5.0.5)
ast (~> 2.4.0)
pg (1.1.4)
pg (1.2.2)
popper_js (1.14.5)
powerpack (0.1.2)
pry (0.12.2)
@@ -228,7 +232,7 @@ DEPENDENCIES
dotenv (~> 2.6)
erubis
event_sourcery
event_sourcery-postgres
event_sourcery-postgres!
foreman
hashie
json_expressions

+ 0
- 0
vendor/cache/event_sourcery-postgres/.bundlecache View File


+ 14
- 0
vendor/cache/event_sourcery-postgres/.gitignore View File

@@ -0,0 +1,14 @@
/.bundle/
/.ruby-version
/.yardoc
/Gemfile.lock
/_yardoc/
/coverage/
/doc/
/pkg/
/spec/reports/
/tmp/
/vendor/

# rspec failure tracking
.rspec_status

+ 1
- 0
vendor/cache/event_sourcery-postgres/.rspec View File

@@ -0,0 +1 @@
--require spec_helper

+ 21
- 0
vendor/cache/event_sourcery-postgres/.travis.yml View File

@@ -0,0 +1,21 @@
sudo: false
language: ruby
rvm:
- 2.3
- 2.4
- 2.5
- 2.6
before_install:
- gem update --system
- gem install bundler
before_script:
- psql -c 'create database event_sourcery_test;' -U postgres
addons:
postgresql: 9.4
notifications:
slack:
on_pull_requests: false
on_success: change
on_failure: always
rooms:
secure: MNBzPIAQOGxCxM+qV36yYQcrX3RBCKlOAmFP7FbyoavlGA0SHKZc3iuHS+Z23ci/6RD6vCqZNlHNcDosugETUmKRDJBhi3qN/W1HsDtThTT+UaTAYjqZAMYeQVnn/vlH5HGnFfPz75iQe87SkuaP/3y0EKnrJnoHMVR/kItHrv9JcxIsAryf8Lfu3c45TjYmwfRKmiblhoUzTfBpsxSM2s1OWiyQCG4vbjXZj3NIkkhUYC4Sd/Cd0u7BE734z1Uo8UKjuZTBCSyhynqUdXaZCzeDYWpnh27NkyWjl97r4gHNLYYPizpBonJUuX4F1E1Fr+kFGl8qFA1vR9hFLmATWmF3rodagarbAHPQ0/RcpgGW9m7+L6HCTtwLmsNr7ezueTdhYCiLgCO601lRF4fLI2OCGSeSxUYlHfexNZMiqd3/UC6nXGHbWUcTYzdgeTSKggeXiNwnmQYq90pxw7UIZ9r9gNiuBPKHeCOsF2//OynbYABO3bmvbktMsz5sZgl9zgeuXUQqCk6RVANxT+5uZUpPlg8WGsv17fxPmZTxjibk/yEgD6tT4gyIJw2Gn/y5W5WyzCpT8vHU/Dmhj9i/wIHONDLlaDIMO6N/S97UVk8KVhvKkcd//Isbw08HO29Rrv7yq4Hwsb7aIuXyayuEaXhxihz+7kPh1g6imHtXpOg=

+ 92
- 0
vendor/cache/event_sourcery-postgres/CHANGELOG.md View File

@@ -0,0 +1,92 @@
# Change Log

All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).

## [Unreleased]
### Added
- Add Ruby 2.6 to the CI test matrix.

### Removed
- Remove Ruby 2.2 from the CI test matrix.

## [0.8.0]
### Added
- Add a `on_events_recorded` config option, that defaults to a no-op proc, \
to handle any app specific logic after the events are recoded on `EventStore#sink`

## [0.7.0] - 2018-05-23
### Added
- Add a `projector_transaction_size` config option to control how many events
are processed before the transaction is commited. The default value is 1 to
match the existing behavour.

We suggest setting this to match the number of events returned from the event
store subscription. This is [now configurable](https://github.com/envato/event_sourcery/pull/197)
in event_sourcery by configuring `subscription_batch_size`.

### Removed
- Remove upper bound version restriction on `sequel` gem. Now accepts versions
5 and higher.

## [0.6.0] - 2018-01-02
### Changed

- Only send info log after processing a group of events

### Removed
- Remove `processes_events` and `projects_events` as these have been [removed
in event_sourcery](https://github.com/envato/event_sourcery/pull/161).

## [0.5.0] - 2017-07-27
- First Version of YARD documentation.
- Fix Sequel deprecation by globally loading pg extensions

## [0.4.0] - 2017-06-21
### Changed
- Reactors store the UUID of the event being processed in the `causation_id`
of any emitted events. This replaces the old behaviour of storing id of the
event being processed in a `_driven_by_event_id` attribute in the emitted
event's body.

### Added
- Reactors store the correlation id of the event being processed in the
`correlation_id` of any emitted events.
- Added index on the `events` table for `correlation_id` and `causation_id`
columns.

## [0.3.0] - 2017-06-16
### Changed
- The event store persists the event `correlation_id` and `causation_id`.
To facilitate this `correlation_id` and `causation_id` columns have been
added to the `events` table and the `write_events` function has been
altered. Event Sourcery apps will need to ensure these DB changes have
been applied to use this version of Event Sourcery.
- The emit_events method now accepts typed events instead of symbols
- Remove dynamic emit events methods from reactors (e.g. emit_item_added)

## [0.2.0] - 2017-06-01
### Changed
- Make `EventSourcery::Postgres::OptimisedEventPollWaiter#shutdown` private
- Updated `EventSourcery::Postgres::OptimisedEventPollWaiter#poll` to ensure that `#shutdown!` is run when an error is raised
or when the loop stops

### Added
- Configure projector tracker table name via `EventSourcery::Postgres.configure`

## 0.1.0 - 2017-05-26
### Changed
- Imported code from the [event_sourcery](https://github.com/envato/event_sourcery).
- Postgres related configuration is through `EventSourcery::Postgres.configure`
instead of `EventSourcery.configure`.

[Unreleased]: https://github.com/envato/event_sourcery-postgres/compare/v0.8.0...HEAD
[0.8.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.7.0...v0.8.0
[0.7.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.6.0...v0.7.0
[0.6.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.5.0...v0.6.0
[0.5.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.4.0...v0.5.0
[0.4.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.3.0...v0.4.0
[0.3.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.2.0...v0.3.0
[0.2.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.1.0...v0.2.0

+ 74
- 0
vendor/cache/event_sourcery-postgres/CODE_OF_CONDUCT.md View File

@@ -0,0 +1,74 @@
# Contributor Covenant Code of Conduct

## Our Pledge

In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
nationality, personal appearance, race, religion, or sexual identity and
orientation.

## Our Standards

Examples of behavior that contributes to creating a positive environment
include:

* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members

Examples of unacceptable behavior by participants include:

* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting

## Our Responsibilities

Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.

Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.

## Scope

This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.

## Enforcement

Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at odindutton@gmail.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.

Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.

## Attribution

This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at [http://contributor-covenant.org/version/1/4][version]

[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

+ 7
- 0
vendor/cache/event_sourcery-postgres/Gemfile View File

@@ -0,0 +1,7 @@
source 'https://rubygems.org'

ruby '>= 2.2.0'

gemspec

gem 'event_sourcery', git: 'https://github.com/envato/event_sourcery.git'

+ 21
- 0
vendor/cache/event_sourcery-postgres/LICENSE.txt View File

@@ -0,0 +1,21 @@
The MIT License (MIT)

Copyright (c) 2017 Envato

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

+ 98
- 0
vendor/cache/event_sourcery-postgres/README.md View File

@@ -0,0 +1,98 @@
# EventSourcery::Postgres

[![Build Status](https://travis-ci.org/envato/event_sourcery-postgres.svg?branch=master)](https://travis-ci.org/envato/event_sourcery-postgres)

## Development Status

EventSourcery is currently being used in production by multiple apps but we
haven't finalized the API yet and things are still moving rapidly. Until we
release a 1.0 things may change without first being deprecated.

## Installation

Add this line to your application's Gemfile:

```ruby
gem 'event_sourcery-postgres'
```

## Configure

```ruby
EventSourcery::Postgres.configure do |config|
config.event_store_database = Sequel.connect(...)
config.projections_database = Sequel.connect(...)
config.write_events_function_name = 'writeEvents'
config.events_table_name = :events
config.aggregates_table_name = :aggregates
config.callback_interval_if_no_new_events = 60
end
```

## Usage


### Event Store

```ruby
ItemAdded = EventSourcery::Event

EventSourcery::Postgres.event_store.sink(ItemAdded.new(aggregate_id: uuid, body: { }}))
EventSourcery::Postgres.event_store.get_next_from(0).each do |event|
puts event.inspect
end
```

### Projectors & Reactors

```ruby
class ItemProjector
include EventSourcery::Postgres::Projector

table :items do
column :item_uuid, 'UUID NOT NULL'
column :title, 'VARCHAR(255) NOT NULL'
end

project ItemAdded do |event|
table(:items).insert(item_uuid: event.aggregate_id,
title: event.body.fetch('title'))
end
end

class UserEmailer
include EventSourcery::Postgres::Reactor

emits_events SignUpEmailSent

process UserSignedUp do |event|
emit_event SignUpEmailSent.new(user_id: event.aggregate_id) do
UserMailer.signed_up(...).deliver
end
end
end

EventSourcery::EventProcessing::ESPRunner.new(
event_processors: [item_projector, user_emailer],
event_store: EventSourcery::Postgres.config.event_store,
stop_on_failure: true,
).start!
```


## Development

After checking out the repo, run `bin/setup` to install dependencies. (This will install dependencies and recreate the test database.) Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.

To install this gem onto your local machine, run `bundle exec rake install`.

To release a new version:

1. Update the version number in `lib/event_sourcery/postgres/version.rb`
2. Get this change onto master via the normal PR process
3. Run `bundle exec rake release`, this will create a git tag for the
version, push tags up to GitHub, and upload the gem to rubygems.org.

## Contributing

Bug reports and pull requests are welcome on GitHub at https://github.com/envato/event_sourcery-postgres.

+ 6
- 0
vendor/cache/event_sourcery-postgres/Rakefile View File

@@ -0,0 +1,6 @@
require 'bundler/gem_tasks'
require 'rspec/core/rake_task'

RSpec::Core::RakeTask.new(:spec)

task :default => :spec

+ 14
- 0
vendor/cache/event_sourcery-postgres/bin/console View File

@@ -0,0 +1,14 @@
#!/usr/bin/env ruby

require "bundler/setup"
require "event_sourcery/postgres"

# You can add fixtures and/or initialization code here to make experimenting
# with your gem easier. You can also use a different console, if you like.

# (If you use this, don't forget to add pry to your Gemfile!)
# require "pry"
# Pry.start

require "irb"
IRB.start(__FILE__)

+ 15
- 0
vendor/cache/event_sourcery-postgres/bin/setup View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
set -vx

bundle install

echo
echo "--- Preparing test databases"
echo

dropdb event_sourcery_test || echo 0
createdb event_sourcery_test

# Do any other automated setup that you need to do here

+ 38
- 0
vendor/cache/event_sourcery-postgres/event_sourcery-postgres.gemspec View File

@@ -0,0 +1,38 @@
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'event_sourcery/postgres/version'

Gem::Specification.new do |spec|
spec.name = 'event_sourcery-postgres'
spec.version = EventSourcery::Postgres::VERSION

spec.authors = ['Envato']
spec.email = ['rubygems@envato.com']

spec.summary = 'Postgres event store for use with EventSourcery'
spec.homepage = 'https://github.com/envato/event_sourcery-postgres'
spec.metadata = {
'bug_tracker_uri' => 'https://github.com/envato/event_sourcery-postgres/issues',
'changelog_uri' => 'https://github.com/envato/event_sourcery-postgres/blob/master/CHANGELOG.md',
'source_code_uri' => 'https://github.com/envato/event_sourcery-postgres',
}

spec.files = `git ls-files -z`.split("\x0").reject do |f|
f.match(%r{^(\.|bin/|Gemfile|Rakefile|script/|spec/)})
end
spec.bindir = 'exe'
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ['lib']

spec.required_ruby_version = '>= 2.2.0'

spec.add_dependency 'sequel', '>= 4.38'
spec.add_dependency 'pg'
spec.add_dependency 'event_sourcery', '>= 0.14.0'
spec.add_development_dependency 'bundler'
spec.add_development_dependency 'rake', '~> 10.0'
spec.add_development_dependency 'rspec', '~> 3.0'
spec.add_development_dependency 'pry'
spec.add_development_dependency 'benchmark-ips'
end

+ 27
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres.rb View File

@@ -0,0 +1,27 @@
require 'sequel'

Sequel.default_timezone = :utc

require 'event_sourcery'
require 'event_sourcery/postgres/version'
require 'event_sourcery/postgres/config'
require 'event_sourcery/postgres/queue_with_interval_callback'
require 'event_sourcery/postgres/schema'
require 'event_sourcery/postgres/optimised_event_poll_waiter'
require 'event_sourcery/postgres/event_store'
require 'event_sourcery/postgres/table_owner'
require 'event_sourcery/postgres/projector'
require 'event_sourcery/postgres/reactor'
require 'event_sourcery/postgres/tracker'

module EventSourcery
module Postgres
def self.configure
yield config
end

def self.config
@config ||= Config.new
end
end
end

+ 69
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/config.rb View File

@@ -0,0 +1,69 @@
module EventSourcery
module Postgres
class Config
attr_accessor :lock_table_to_guarantee_linear_sequence_id_growth,
:write_events_function_name,
:events_table_name,
:aggregates_table_name,
:tracker_table_name,
:callback_interval_if_no_new_events,
:auto_create_projector_tracker,
:event_tracker,
:projector_transaction_size,
:on_events_recorded

attr_writer :event_store,
:event_source,
:event_sink

attr_reader :event_store_database,
:projections_database

def initialize
@lock_table_to_guarantee_linear_sequence_id_growth = true
@write_events_function_name = 'writeEvents'
@events_table_name = :events
@aggregates_table_name = :aggregates
@tracker_table_name = :projector_tracker
@callback_interval_if_no_new_events = 10
@event_store_database = nil
@auto_create_projector_tracker = true
@projector_transaction_size = 1
@on_events_recorded = ->(events) {}
end

def event_store
@event_store ||= EventStore.new(event_store_database)
end

def event_source
@event_source ||= ::EventSourcery::EventStore::EventSource.new(event_store)
end

def event_sink
@event_sink ||= ::EventSourcery::EventStore::EventSink.new(event_store)
end

def event_store_database=(db_connection)
setup_connection(db_connection)

@event_store_database = db_connection
end

def projections_database=(db_connection)
setup_connection(db_connection)

@projections_database = db_connection
@event_tracker = Postgres::Tracker.new(db_connection)
end

private

def setup_connection(db_connection)
return unless db_connection

db_connection.extension :pg_json
end
end
end
end

+ 177
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/event_store.rb View File

@@ -0,0 +1,177 @@
module EventSourcery
module Postgres
class EventStore
include EventSourcery::EventStore::EachByRange

def initialize(db_connection,
events_table_name: EventSourcery::Postgres.config.events_table_name,
lock_table: EventSourcery::Postgres.config.lock_table_to_guarantee_linear_sequence_id_growth,
write_events_function_name: EventSourcery::Postgres.config.write_events_function_name,
event_builder: EventSourcery.config.event_builder,
on_events_recorded: EventSourcery::Postgres.config.on_events_recorded)
@db_connection = db_connection
@events_table_name = events_table_name
@write_events_function_name = write_events_function_name
@lock_table = lock_table
@event_builder = event_builder
@on_events_recorded = on_events_recorded
end

# Like water flowing into a sink eventually it will go down the drain
# into the goodness of the plumbing system.
# So to will the given events you put in this 'sink'. Except the plumbing
# system is the data base events table.
# This can raise db connection errors.
#
# @param event_or_events the event or events to save
# @param expected_version the version to save with the event, default nil
#
# @raise [DatabaseError] if something goes wrong with the database
# @raise [ConcurrencyError] if there was a concurrency conflict
def sink(event_or_events, expected_version: nil)
events = Array(event_or_events)
aggregate_ids = events.map(&:aggregate_id).uniq
raise AtomicWriteToMultipleAggregatesNotSupported unless aggregate_ids.count == 1
sql = write_events_sql(aggregate_ids.first, events, expected_version)
@db_connection.run(sql)
log_events_saved(events)
on_events_recorded.call(events)
true
rescue Sequel::DatabaseError => e
if e.message =~ /Concurrency conflict/
raise ConcurrencyError, "expected version was not #{expected_version}. Error: #{e.message}"
else
raise
end
end

# Get the next set of events from the given event id. You can
# specify event types and a limit.
# Default limit is 1000 and the default event types will be all.
#
# @param id the event id to get next events from
# @param event_types the event types to filter, default nil = all
# @param limit the limit to the results, default 1000
#
# @return [Array] array of found events
def get_next_from(id, event_types: nil, limit: 1000)
query = events_table.
order(:id).
where(Sequel.lit('id >= ?', id)).
limit(limit)
query = query.where(type: event_types) if event_types
query.map { |event_row| build_event(event_row) }
end

# Get last event id for a given event types.
#
# @param event_types the type of event(s) to filter
#
# @return the latest event id
def latest_event_id(event_types: nil)
latest_event = events_table
latest_event = latest_event.where(type: event_types) if event_types
latest_event = latest_event.order(:id).last
if latest_event
latest_event[:id]
else
0
end
end

# Get the events for a given aggregate id.
#
# @param aggregate_id the aggregate id to filter for
#
# @return [Array] of found events
def get_events_for_aggregate_id(aggregate_id)
events_table.where(aggregate_id: aggregate_id.to_str).order(:version).map do |event_hash|
build_event(event_hash)
end
end

# Subscribe to events.
#
# @param from_id subscribe from a starting event id. default will be from the start.
# @param event_types the event_types to subscribe to, default all.
# @param after_listen the after listen call back block. default nil.
# @param subscription_master the subscription master block
def subscribe(from_id:, event_types: nil, after_listen: nil, subscription_master:, &block)
poll_waiter = OptimisedEventPollWaiter.new(db_connection: @db_connection, after_listen: after_listen)
args = {
poll_waiter: poll_waiter,
event_store: self,
from_event_id: from_id,
event_types: event_types,
events_table_name: @events_table_name,
subscription_master: subscription_master,
on_new_events: block
}
EventSourcery::EventStore::Subscription.new(args).tap(&:start)
end

private

attr_reader :on_events_recorded

def events_table
@db_connection[@events_table_name]
end

def build_event(data)
@event_builder.build(data)
end

def write_events_sql(aggregate_id, events, expected_version)
bodies = sql_literal_array(events, 'json', &:body)
types = sql_literal_array(events, 'varchar', &:type)
created_ats = sql_literal_array(events, 'timestamp without time zone', &:created_at)
event_uuids = sql_literal_array(events, 'uuid', &:uuid)
correlation_ids = sql_literal_array(events, 'uuid', &:correlation_id)
causation_ids = sql_literal_array(events, 'uuid', &:causation_id)
<<-SQL
select #{@write_events_function_name}(
#{sql_literal(aggregate_id.to_str, 'uuid')},
#{types},
#{sql_literal(expected_version, 'int')},
#{bodies},
#{created_ats},
#{event_uuids},
#{correlation_ids},
#{causation_ids},
#{sql_literal(@lock_table, 'boolean')}
);
SQL
end

def sql_literal_array(events, type, &block)
sql_array = events.map do |event|
to_sql_literal(block.call(event))
end.join(', ')
"array[#{sql_array}]::#{type}[]"
end

def sql_literal(value, type)
"#{to_sql_literal(value)}::#{type}"
end

def to_sql_literal(value)
return 'null' unless value
wrapped_value = if Time === value
value.iso8601(6)
elsif Hash === value
Sequel.pg_json(value)
else
value
end
@db_connection.literal(wrapped_value)
end

def log_events_saved(events)
events.each do |event|
EventSourcery.logger.debug { "Saved event: #{event.inspect}" }
end
end
end
end
end

+ 77
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/optimised_event_poll_waiter.rb View File

@@ -0,0 +1,77 @@
module EventSourcery
module Postgres
# Optimise poll interval with Postgres listen/notify
class OptimisedEventPollWaiter
ListenThreadDied = Class.new(StandardError)

def initialize(db_connection:, timeout: 30, after_listen: proc {})
@db_connection = db_connection
@timeout = timeout
@events_queue = QueueWithIntervalCallback.new
@after_listen = after_listen
end

def poll(after_listen: proc { }, &block)
@events_queue.callback = proc do
ensure_listen_thread_alive!
block.call
end
start_async(after_listen: after_listen)
catch(:stop) do
block.call
loop do
ensure_listen_thread_alive!
wait_for_new_event_to_appear
clear_new_event_queue
block.call
end
end
ensure
shutdown!
end

private

def shutdown!
@listen_thread.kill if @listen_thread.alive?
end

def ensure_listen_thread_alive!
raise ListenThreadDied unless @listen_thread.alive?
end

def wait_for_new_event_to_appear
@events_queue.pop
end

def clear_new_event_queue
@events_queue.clear
end

def start_async(after_listen: nil)
after_listen_callback = if after_listen
proc do
after_listen.call
@after_listen.call if @after_listen
end
else
@after_listen
end
@listen_thread = Thread.new do
listen_for_new_events(loop: true,
after_listen: after_listen_callback,
timeout: @timeout)
end
end

def listen_for_new_events(loop: true, after_listen: nil, timeout: 30)
@db_connection.listen('new_event',
loop: loop,
after_listen: after_listen,
timeout: timeout) do |_channel, _pid, _payload|
@events_queue.push(:new_event_arrived) if @events_queue.empty?
end
end
end
end
end

+ 49
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/projector.rb View File

@@ -0,0 +1,49 @@
module EventSourcery
module Postgres
module Projector
def self.included(base)
base.include(EventProcessing::EventStreamProcessor)
base.prepend(TableOwner)
base.include(InstanceMethods)
base.class_eval do
alias_method :project, :process

class << self
alias_method :project, :process
alias_method :projector_name, :processor_name
end
end
end

module InstanceMethods
def initialize(tracker: EventSourcery::Postgres.config.event_tracker,
db_connection: EventSourcery::Postgres.config.projections_database,
transaction_size: EventSourcery::Postgres.config.projector_transaction_size)
@tracker = tracker
@db_connection = db_connection
@transaction_size = transaction_size
end

private

attr_reader :transaction_size

def process_events(events, subscription_master)
events.each_slice(transaction_size) do |slice_of_events|
subscription_master.shutdown_if_requested

db_connection.transaction do
slice_of_events.each do |event|
process(event)
EventSourcery.logger.debug { "[#{processor_name}] Processed event: #{event.inspect}" }
end
tracker.processed_event(processor_name, slice_of_events.last.id)
end
end

EventSourcery.logger.info { "[#{processor_name}] Processed up to event id: #{events.last.id}" }
end
end
end
end
end

+ 33
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/queue_with_interval_callback.rb View File

@@ -0,0 +1,33 @@
module EventSourcery
module Postgres
class QueueWithIntervalCallback < ::Queue
attr_accessor :callback

def initialize(callback: proc {}, callback_interval: EventSourcery::Postgres.config.callback_interval_if_no_new_events, poll_interval: 0.1)
@callback = callback
@callback_interval = callback_interval
@poll_interval = poll_interval
super()
end

def pop(non_block_without_callback = false)
return super if non_block_without_callback
pop_with_interval_callback
end

private

def pop_with_interval_callback
time = Time.now
loop do
return pop(true) unless empty?
if @callback_interval && Time.now > time + @callback_interval
@callback.call
time = Time.now
end
sleep @poll_interval
end
end
end
end
end

+ 81
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/reactor.rb View File

@@ -0,0 +1,81 @@
module EventSourcery
module Postgres
module Reactor
UndeclaredEventEmissionError = Class.new(StandardError)

def self.included(base)
base.include(EventProcessing::EventStreamProcessor)
base.extend(ClassMethods)
base.prepend(TableOwner)
base.include(InstanceMethods)
end

module ClassMethods
# Assign the types of events this reactor can emit.
#
# @param event_types the types of events this reactor can emit
def emits_events(*event_types)
@emits_event_types = event_types
end

# @return [Array] an array of the types of events this reactor can emit
def emit_events
@emits_event_types ||= []
end

# This will tell you if this reactor emits any type of event.
#
# @return [true, false] true if this emits events, false if not
def emits_events?
!emit_events.empty?
end

# Will check if this reactor emits the given type of event.
#
# @param event_type the event type to check
# @return [true, false] true if it does emit the given event false if not
def emits_event?(event_type)
emit_events.include?(event_type)
end
end

module InstanceMethods
def initialize(tracker: EventSourcery::Postgres.config.event_tracker,
db_connection: EventSourcery::Postgres.config.projections_database,
event_source: EventSourcery::Postgres.config.event_source,
event_sink: EventSourcery::Postgres.config.event_sink)
@tracker = tracker
@event_source = event_source
@event_sink = event_sink
@db_connection = db_connection
if self.class.emits_events?
if event_sink.nil? || event_source.nil?
raise ArgumentError, 'An event sink and source is required for processors that emit events'
end
end
end
end

private

attr_reader :event_sink, :event_source

def emit_event(event_or_hash, &block)
event = if Event === event_or_hash
event_or_hash
else
Event.new(event_or_hash)
end
raise UndeclaredEventEmissionError unless self.class.emits_event?(event.class)
event = event.with(causation_id: _event.uuid, correlation_id: _event.correlation_id)
invoke_action_and_emit_event(event, block)
EventSourcery.logger.debug { "[#{self.processor_name}] Emitted event: #{event.inspect}" }
end

def invoke_action_and_emit_event(event, action)
action.call(event.body) if action
event_sink.sink(event)
end
end
end
end

+ 181
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/schema.rb View File

@@ -0,0 +1,181 @@
module EventSourcery
module Postgres
module Schema
module_function

# This will create the event store tables and functions
# (event, aggregates, tracker and create or update functions)
# for the given Postgres database.
# The default will be the one specified in the config.
#
# @param db the Postgres database to use
def create_event_store(db: EventSourcery::Postgres.config.event_store_database,
events_table_name: EventSourcery::Postgres.config.events_table_name,
aggregates_table_name: EventSourcery::Postgres.config.aggregates_table_name,
write_events_function_name: EventSourcery::Postgres.config.write_events_function_name)
create_events(db: db, table_name: events_table_name)
create_aggregates(db: db, table_name: aggregates_table_name)
create_or_update_functions(db: db, events_table_name: events_table_name, function_name: write_events_function_name, aggregates_table_name: aggregates_table_name)
end

# Create the events table. Needs the database and the table name.
# The defaults will be whats specified in config.
#
# @param db the Postgres database to use
# @param table_name the name of the events table
def create_events(db: EventSourcery::Postgres.config.event_store_database,
table_name: EventSourcery::Postgres.config.events_table_name)
db.run 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"'
db.create_table(table_name) do
primary_key :id, type: :Bignum
column :uuid, :uuid, null: false, default: Sequel.lit('uuid_generate_v4()')
column :aggregate_id, :uuid, null: false
column :type, :varchar, null: false, size: 255
column :body, :json, null: false
column :version, :bigint, null: false
column :correlation_id, :uuid
column :causation_id, :uuid
column :created_at, :'timestamp without time zone', null: false, default: Sequel.lit("(now() at time zone 'utc')")
index [:aggregate_id, :version], unique: true
index :uuid, unique: true
index :type
index :correlation_id
index :causation_id
index :created_at
end
end

# Create the aggregates table. Needs the database and the table name.
# The defaults will be whats specified in config.
#
# @param db the Postgres database to use
# @param table_name the name of the aggregates table
def create_aggregates(db: EventSourcery::Postgres.config.event_store_database,
table_name: EventSourcery::Postgres.config.aggregates_table_name)
db.create_table(table_name) do
uuid :aggregate_id, primary_key: true
column :version, :bigint, default: 1
end
end

# Create the 'create or update' functions.
# Needs the database, table name, function name and aggregates table name.
# The defaults will be whats specified in config.
#
# @param db the Postgres database to use
# @param function_name the name of the write events function
# @param events_table_name the name of the events table
# @param aggregates_table_name the name of the aggregates table
def create_or_update_functions(db: EventSourcery::Postgres.config.event_store_database,
function_name: EventSourcery::Postgres.config.write_events_function_name,
events_table_name: EventSourcery::Postgres.config.events_table_name,
aggregates_table_name: EventSourcery::Postgres.config.aggregates_table_name)
db.run <<-SQL
create or replace function #{function_name}(_aggregateId uuid,
_eventTypes varchar[],
_expectedVersion int,
_bodies json[],
_createdAtTimes timestamp without time zone[],
_eventUUIDs uuid[],
_correlationIds uuid[],
_causationIds uuid[],
_lockTable boolean) returns void as $$
declare
currentVersion int;
body json;
eventVersion int;
eventId text;
index int;
newVersion int;
numEvents int;
createdAt timestamp without time zone;
begin
numEvents := array_length(_bodies, 1);
select version into currentVersion from #{aggregates_table_name} where aggregate_id = _aggregateId;
if not found then
-- when we have no existing version for this aggregate
if _expectedVersion = 0 or _expectedVersion is null then
-- set the version to 1 if expected version is null or 0
insert into #{aggregates_table_name}(aggregate_id, version) values(_aggregateId, numEvents);
currentVersion := 0;
else
raise 'Concurrency conflict. Current version: 0, expected version: %', _expectedVersion;
end if;
else
if _expectedVersion is null then
-- automatically increment the version
update #{aggregates_table_name} set version = version + numEvents where aggregate_id = _aggregateId returning version into newVersion;
currentVersion := newVersion - numEvents;
else
-- increment the version if it's at our expected version
update #{aggregates_table_name} set version = version + numEvents where aggregate_id = _aggregateId and version = _expectedVersion;
if not found then
-- version was not at expected_version, raise an error.
-- currentVersion may not equal what it did in the database when the
-- above update statement is executed (it may have been incremented by another
-- process)
raise 'Concurrency conflict. Last known current version: %, expected version: %', currentVersion, _expectedVersion;
end if;
end if;
end if;
index := 1;
eventVersion := currentVersion + 1;
if _lockTable then
-- Ensure this transaction is the only one writing events to guarantee
-- linear growth of sequence IDs.
-- Any value that won't conflict with other advisory locks will work.
-- The Postgres tracker currently obtains an advisory lock using it's
-- integer row ID, so values 1 to the number of ESP's in the system would
-- be taken if the tracker is running in the same database as your
-- projections.
perform pg_advisory_xact_lock(-1);
end if;
foreach body IN ARRAY(_bodies)
loop
if _createdAtTimes[index] is not null then
createdAt := _createdAtTimes[index];
else
createdAt := now() at time zone 'utc';
end if;

insert into #{events_table_name}
(uuid, aggregate_id, type, body, version, correlation_id, causation_id, created_at)
values
(
_eventUUIDs[index],
_aggregateId,
_eventTypes[index],
body,
eventVersion,
_correlationIds[index],
_causationIds[index],
createdAt
)
returning id into eventId;

eventVersion := eventVersion + 1;
index := index + 1;
end loop;
perform pg_notify('new_event', eventId);
end;
$$ language plpgsql;
SQL
end

# Create the projector tracker table. Needs the database and the table name.
# The defaults will be whats specified in config.
#
# @param db the Postgres database to use
# @param table_name the name of the aggregates table
def create_projector_tracker(db: EventSourcery::Postgres.config.projections_database,
table_name: EventSourcery::Postgres.config.tracker_table_name)
db.create_table(table_name) do
primary_key :id, type: :Bignum
column :name, 'varchar(255) not null'
column :last_processed_event_id, 'bigint not null default 0'
index :name, unique: true
end
end
end
end
end

+ 85
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/table_owner.rb View File

@@ -0,0 +1,85 @@
module EventSourcery
module Postgres
module TableOwner
DefaultTableError = Class.new(StandardError)
NoSuchTableError = Class.new(StandardError)

def self.prepended(base)
base.extend(ClassMethods)
end

module ClassMethods
# Hash of the tables and their corresponding blocks.
#
# @return [Hash] hash keyed by table names and block values
def tables
@tables ||= {}
end

# For the given table name assign to give block as the value.
#
# @param name the name of the table
# @param block the block of code to assign for the table
def table(name, &block)
tables[name] = block
end
end

# Create each table.
def setup
self.class.tables.each do |table_name, schema_block|
prefixed_name = table_name_prefixed(table_name)
@db_connection.create_table?(prefixed_name, &schema_block)
end
super if defined?(super)
end

# Reset by dropping each table.
def reset
self.class.tables.keys.each do |table_name|
prefixed_name = table_name_prefixed(table_name)
if @db_connection.table_exists?(prefixed_name)
@db_connection.drop_table(prefixed_name, cascade: true)
end
end
super if defined?(super)
setup
end

# This will truncate all the tables and reset the tracker back to 0,
# done as a transaction.
def truncate
self.class.tables.each do |table_name, _|
@db_connection.transaction do
prefixed_name = table_name_prefixed(table_name)
@db_connection[prefixed_name].truncate
tracker.reset_last_processed_event_id(self.class.processor_name)
end
end
end

private

attr_reader :db_connection
attr_accessor :table_prefix

def table(name = nil)
if name.nil? && self.class.tables.length != 1
raise DefaultTableError, 'You must specify table name when when 0 or multiple tables are defined'
end

name ||= self.class.tables.keys.first

unless self.class.tables[name.to_sym]
raise NoSuchTableError, "There is no table with the name '#{name}' defined"
end

db_connection[table_name_prefixed(name)]
end

def table_name_prefixed(name)
[table_prefix, name].compact.join('_').to_sym
end
end
end
end

+ 116
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/tracker.rb View File

@@ -0,0 +1,116 @@
module EventSourcery
module Postgres
# This will set up a persisted event id tracker for processors.
class Tracker

def initialize(db_connection = EventSourcery::Postgres.config.projections_database,
table_name: EventSourcery::Postgres.config.tracker_table_name,
obtain_processor_lock: true)
@db_connection = db_connection
@table_name = table_name.to_sym
@obtain_processor_lock = obtain_processor_lock
end

# Set up the given processor.
# This will create the projector tracker table if it does not exist.
# If given a processor_name it will then attempt to get a lock on the db.
#
# @param processor_name the name of the processor
def setup(processor_name = nil)
create_table_if_not_exists if EventSourcery::Postgres.config.auto_create_projector_tracker

unless tracker_table_exists?
raise UnableToLockProcessorError, 'Projector tracker table does not exist'
end

if processor_name
create_track_entry_if_not_exists(processor_name)
if @obtain_processor_lock
obtain_global_lock_on_processor(processor_name)
end
end
end

# This will updated the tracker table to the given event id value
# for the given processor name.
#
# @param processor_name the name of the processor to update
# @param event_id the event id number to update to
def processed_event(processor_name, event_id)
table.
where(name: processor_name.to_s).
update(last_processed_event_id: event_id)
true
end

# This allows you to process an event and update the tracker table in
# a single transaction. Will yield the given block first then update the
# the tracker table to the give event id for the given processor name.
#
# @param processor_name the name of the processor to update
# @param event_id the event id number to update to
def processing_event(processor_name, event_id)
@db_connection.transaction do
yield
processed_event(processor_name, event_id)
end
end

# This will reset the tracker to the start (0) for the given processor name.
#
# @param processor_name the name of the processor to reset to 0
def reset_last_processed_event_id(processor_name)
table.where(name: processor_name.to_s).update(last_processed_event_id: 0)
end

# This will return the last processed event id for the given processor name.
#
# @param processor_name the name of the processor you want to look up
# @return [Int, nil] the value of the last event_id processed
def last_processed_event_id(processor_name)
track_entry = table.where(name: processor_name.to_s).first
track_entry[:last_processed_event_id] if track_entry
end

# Will return an array of all known tracked processors.
#
# @return [Array] array of all known tracked processors
def tracked_processors
table.select_map(:name)
end

private

def obtain_global_lock_on_processor(processor_name)
lock_obtained = @db_connection.fetch("select pg_try_advisory_lock(#{@track_entry_id})").to_a.first[:pg_try_advisory_lock]
if lock_obtained == false
raise UnableToLockProcessorError, "Unable to get a lock on #{processor_name} #{@track_entry_id}"
end
end

def create_table_if_not_exists
unless tracker_table_exists?
EventSourcery.logger.info { "Projector tracker missing - attempting to create 'projector_tracker' table" }
EventSourcery::Postgres::Schema.create_projector_tracker(db: @db_connection, table_name: @table_name)
end
end

def create_track_entry_if_not_exists(processor_name)
track_entry = table.where(name: processor_name.to_s).first
@track_entry_id = if track_entry
track_entry[:id]
else
table.insert(name: processor_name.to_s, last_processed_event_id: 0)
end
end

def table
@db_connection[@table_name]
end

def tracker_table_exists?
@db_connection.table_exists?(@table_name)
end
end
end
end

+ 5
- 0
vendor/cache/event_sourcery-postgres/lib/event_sourcery/postgres/version.rb View File

@@ -0,0 +1,5 @@
module EventSourcery
module Postgres
VERSION = '0.8.0'.freeze
end
end

+ 61
- 0
vendor/cache/event_sourcery-postgres/script/bench_reading_events.rb View File

@@ -0,0 +1,61 @@
# Usage:
#
# ❯ bundle exec ruby script/bench_reading_events.rb
# Creating 10000 events
# Took 42.35533199999918 to create events
# Took 4.9821800000027 to read all events
# ^ results from running on a 2016 MacBook

require 'benchmark'
require 'securerandom'
require 'sequel'
require 'event_sourcery/postgres'

pg_uri = ENV.fetch('BOXEN_POSTGRESQL_URL') { 'postgres://127.0.0.1:5432/' }.dup
pg_uri << 'event_sourcery_test'
pg_connection = Sequel.connect(pg_uri)

EventSourcery.configure do |config|
config.postgres.event_store_database = pg_connection
config.postgres.projections_database = pg_connection
config.logger.level = :fatal
end

def create_events_schema(pg_connection)
pg_connection.execute 'drop table if exists events'
pg_connection.execute 'drop table if exists aggregates'
EventSourcery::Postgres::Schema.create_event_store(db: pg_connection)
end

event_store = EventSourcery::Postgres.config.event_store

EVENT_TYPES = %i[
item_added
item_removed
item_starred
].freeze

def new_event(uuid)
EventSourcery::Event.new(type: EVENT_TYPES.sample,
aggregate_id: uuid,
body: { 'something' => 'simple' })
end

create_events_schema(pg_connection)

NUM_EVENTS = 10_000
puts "Creating #{NUM_EVENTS} events"
time = Benchmark.realtime do
uuid = SecureRandom.uuid
NUM_EVENTS.times { event_store.sink(new_event(uuid)) }
end
puts "Took #{time} to create events"

seen_events_count = 0
time = Benchmark.realtime do
event_store.subscribe(from_id: 0, subscription_master: EventSourcery::EventStore::SignalHandlingSubscriptionMaster.new) do |events|
seen_events_count += events.count
throw :stop if seen_events_count >= NUM_EVENTS
end
end
puts "Took #{time} to read all events"

+ 47
- 0
vendor/cache/event_sourcery-postgres/script/bench_writing_events.rb View File

@@ -0,0 +1,47 @@
# Usage:
#
# ❯ bundle exec ruby script/bench_writing_events.rb
# Warming up --------------------------------------
# event_store.sink
# 70.000 i/100ms
# Calculating -------------------------------------
# event_store.sink
# 522.007 (±10.9%) i/s - 2.590k in 5.021909s
#
# ^ results from running on a 2016 MacBook

require 'benchmark/ips'
require 'securerandom'
require 'sequel'
require 'event_sourcery/postgres'

pg_uri = ENV.fetch('BOXEN_POSTGRESQL_URL') { 'postgres://127.0.0.1:5432/' }.dup
pg_uri << 'event_sourcery_test'
pg_connection = Sequel.connect(pg_uri)

EventSourcery.configure do |config|
config.postgres.event_store_database = pg_connection
config.postgres.projections_database = pg_connection
config.logger.level = :fatal
end

def create_schema(pg_connection)
pg_connection.execute 'drop table if exists events'
pg_connection.execute 'drop table if exists aggregates'
EventSourcery::Postgres::Schema.create_event_store(db: pg_connection)
end

create_schema(pg_connection)
event_store = EventSourcery::Postgres::EventStore.new(pg_connection)

def new_event
EventSourcery::Event.new(type: :item_added,
aggregate_id: SecureRandom.uuid,
body: { 'something' => 'simple' })
end

Benchmark.ips do |b|
b.report('event_store.sink') do
event_store.sink(new_event)
end
end

+ 179
- 0
vendor/cache/event_sourcery-postgres/script/demonstrate_event_sequence_id_gaps.rb View File

@@ -0,0 +1,179 @@
# Demonstrates that sequence IDs may not be inserted linearly with concurrent
# writers.
#
# This script writes events in parallel from a number of forked processes,
# writing events in a continious loop until the program is interrupted.
# The parent process detects gaps in sequence IDs by selecting the last 2
# events based on sequence ID. A gap is detected when the 2 IDs returned from
# that query aren't sequential. The script will proceed to execute 2 subsequent
# queries to see if they show up in the time it takes to complete those before
# moving on.
#
# An easier way to demonstrate this is by using 2 psql consoles:
#
# - Simulate a transaction taking a long time to commit:
# ```
# begin;
# insert into events (..) values (..);
# ```
# - Then, in another console:
# ```
# insert into events (..) values (..);
# select * from events;
# ```
#
# The result is that event sequence ID 2 is visible, but only when the first
# transaction commits is event sequence ID 1 visible.
#
# Why does this happen?
#
# Sequences in Postgres (and most other DBs) are not transactional, changes
# to the sequence are visible to other transactions immediately. Also, inserts
# from the forked writers may be executed in parallel by postgres.
#
# The process of inserting into a table that has a sequence or serial column is
# to first get the next sequence ID (changing global state), then perform the
# insert statement and later commit. In between these 2 steps the sequence ID
# is taken but not visible in the table until the insert statement is
# committed. Gaps in sequence IDs occur when a process takes a sequence ID and
# commits it while another process is in between those 2 steps.
#
# This means another transaction could have taken the next sequence
# ID and committed before that one commits, resulting in a gap in sequence ID's
# when reading.
#
# Why is this a problem?
#
# Event stream processors use the sequence ID to keep track of where they're up to
# in the events table. If a projector processes an event with sequence ID n, it
# assumes that the next event it needs to process will have a sequence ID > n.
# This approach isn't reliable when sequence IDs appear non-linearly, making it
# possible for event stream processors to skip over events.
#
# How does EventSourcery deal with this?
#
# EventSourcery uses n transaction level advisory lock to synchronise inserts
# to the events table within the writeEvents function. Alternatives:
#
# - Write events from 1 process only (serialize at the application level)
# - Detect gaps when reading events and allow time for in-flight transactions
# (the gaps) to commit.
# - Built in eventual consistency. Selects would be restricted to events older
# than 500ms-1s or the transaction timeout to give enough time for in-flight
# transactions to commit.
# - Only query events when catching up, after that rely on events to be
# delivered through the pub/sub mechanism. Given events would be received out
# of order under concurrent writes there's potential for processors to process
# a given event twice if they shutdown after processing a sequence that was
# part of a gap.
#
# Usage:
#
# ❯ bundle exec ruby script/demonstrate_event_sequence_id_gaps.rb
# 89847: starting to write events89846: starting to write events

# 89848: starting to write events
# 89849: starting to write events
# 89850: starting to write events
# GAP: 1 missing sequence IDs. 78 != 76 + 1. Missing events showed up after 1 subsequent query. IDs: [77]
# GAP: 1 missing sequence IDs. 168 != 166 + 1. Missing events showed up after 1 subsequent query. IDs: [167]
# GAP: 1 missing sequence IDs. 274 != 272 + 1. Missing events showed up after 1 subsequent query. IDs: [273]
# GAP: 1 missing sequence IDs. 341 != 339 + 1. Missing events showed up after 1 subsequent query. IDs: [340]
# GAP: 1 missing sequence IDs. 461 != 459 + 1. Missing events showed up after 1 subsequent query. IDs: [460]
# GAP: 1 missing sequence IDs. 493 != 491 + 1. Missing events showed up after 1 subsequent query. IDs: [492]
# GAP: 2 missing sequence IDs. 621 != 618 + 1. Missing events showed up after 1 subsequent query. IDs: [619, 620]

require 'sequel'
require 'securerandom'
require 'event_sourcery/postgres'

def connect
pg_uri = ENV.fetch('BOXEN_POSTGRESQL_URL') { 'postgres://127.0.0.1:5432/' }.dup
pg_uri << 'event_sourcery_test'
Sequel.connect(pg_uri)
end

EventSourcery.logger.level = :info

def new_event
EventSourcery::Event.new(type: :item_added,
aggregate_id: SecureRandom.uuid,
body: { 'something' => 'simple' })
end

def create_events_schema(db)
db.execute 'drop table if exists events'
db.execute 'drop table if exists aggregates'
EventSourcery::Postgres::Schema.create_event_store(db: db)
end

db = connect
create_events_schema(db)
db.disconnect
sleep 0.3

NUM_WRITER_PROCESSES = 5
NUM_WRITER_PROCESSES.times do
fork do |pid|
stop = false
Signal.trap(:INT) { stop = true }
db = connect
# when lock_table is set to true an advisory lock is used to synchronise
# inserts and no gaps are detected
event_store = EventSourcery::Postgres::EventStore.new(db, lock_table: false)
puts "#{Process.pid}: starting to write events"
event_store.sink(new_event) until stop
end
end

stop = false
Signal.trap(:INT) { stop = true }

def wait_for_missing_ids(db, first_sequence, last_sequence, attempt: 1)
missing_ids = db[:events].where(Sequel.lit('id > ? AND id < ?', first_sequence, last_sequence)).order(:id).map {|e| e[:id] }
expected_missing_ids = (first_sequence+1)..(last_sequence-1)
if missing_ids == expected_missing_ids.to_a
print "Missing events showed up after #{attempt} subsequent query. IDs: #{missing_ids}"
else
if attempt < 2
wait_for_missing_ids(db, first_sequence, last_sequence, attempt: attempt + 1)
else
print "Missing events didn't show up after #{attempt} subsequent queries"
end
end
end

until stop

# query for the last 2 sequences in the events table
first_sequence, last_sequence = *db[:events].
order(Sequel.desc(:id)).
select(:id).
limit(2).
map { |e| e[:id] }.
reverse

next if first_sequence.nil? || last_sequence.nil?

if last_sequence != first_sequence + 1
num_missing = last_sequence - first_sequence - 1
print "GAP: #{num_missing} missing sequence IDs. #{last_sequence} != #{first_sequence} + 1. "
wait_for_missing_ids(db, first_sequence, last_sequence)
puts
end
end

Process.waitall

puts
puts 'Looking for gaps in sequence IDs in events table:'
ids = db[:events].select(:id).order(:id).all.map { |e| e[:id] }
expected_ids = (ids.min..ids.max).to_a
missing_ids = (expected_ids - ids)
if missing_ids.empty?
puts 'No remaining gaps'
else
missing_ids.each do |id|
puts "Unable to find row with sequence ID #{id}"
end
end

+ 51
- 0
vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/config_spec.rb View File

@@ -0,0 +1,51 @@
RSpec.describe EventSourcery::Postgres::Config do
subject(:config) { described_class.new }

context 'when reading the event_store' do
context 'and an event_store_database is set' do
before do
allow(db_connection).to receive(:extension).with(:pg_json)
config.event_store_database = db_connection
end

it 'returns a EventSourcery::Postgres::EventStore' do
expect(config.event_store).to be_instance_of(EventSourcery::Postgres::EventStore)
end

it 'loads pg_json extension on database' do
expect(db_connection).to have_received(:extension).with(:pg_json)
end
end

context 'and an event_store is set' do
let(:event_store) { double(:event_store) }
before do
config.event_store = event_store
config.event_store_database = nil
end

it 'returns the event_store' do
expect(config.event_store).to eq(event_store)
end
end
end

context 'setting the projections database' do
before do
allow(db_connection).to receive(:extension).with(:pg_json)
config.projections_database = db_connection
end

it 'sets the projections_database' do
expect(config.projections_database).to eq db_connection
end

it 'sets the event_tracker' do
expect(config.event_tracker).to be_instance_of(EventSourcery::Postgres::Tracker)
end

it 'loads pg_json extension on database' do
expect(db_connection).to have_received(:extension).with(:pg_json)
end
end
end

+ 128
- 0
vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/event_store_spec.rb View File

@@ -0,0 +1,128 @@
require 'timeout'

RSpec.describe EventSourcery::Postgres::EventStore do
let(:supports_versions) { true }
subject(:event_store) { described_class.new(db_connection) }

include_examples 'an event store'

describe '#sink' do
let(:on_events_recorded_proc) { double("OnEventsRecordedProc", call: nil) }

before do
allow(EventSourcery::Postgres.config)
.to receive(:on_events_recorded)
.and_return(on_events_recorded_proc)
end

it 'notifies about a new event' do
event_id = nil
Timeout.timeout(1) do
db_connection.listen('new_event', loop: false, after_listen: proc { add_event }) do |channel, pid, payload|
event_id = Integer(payload)
end
end

expect(event_id).not_to be_nil
expect(on_events_recorded_proc).to have_received(:call)
end
end

describe '#get_events_for_aggregate_id' do
context 'when aggregate_id is a string' do
include_examples 'gets events for a specific aggregate id' do
let(:uuid) { aggregate_id }
end
end

context 'when aggregate_id is convertible to a string' do
include_examples 'gets events for a specific aggregate id' do
let(:uuid) { double(to_str: aggregate_id) }
end
end
end

describe '#subscribe' do
let(:event) { new_event(aggregate_id: aggregate_id) }
let(:subscription_master) { spy(EventSourcery::EventStore::SignalHandlingSubscriptionMaster) }

it 'notifies of new events' do
event_store.subscribe(from_id: 0,
after_listen: proc { event_store.sink(event) },
subscription_master: subscription_master) do |events|
@events = events
throw :stop
end
expect(@events.count).to eq 1
expect(@events.first.aggregate_id).to eq aggregate_id
end
end

context 'aggregates table version' do
def save_event(expected_version: nil)
event_store.sink(new_event(aggregate_id: aggregate_id,
type: :billing_details_provided,
body: { my_event: 'data' }),
expected_version: expected_version)
end

def add_event
event_store.sink(new_event(aggregate_id: aggregate_id))
end

def last_event
event_store.get_next_from(0).last
end

def aggregate_version
result = db_connection[:aggregates].
where(aggregate_id: aggregate_id).
first
result[:version] if result
end

context "when the aggregate doesn't exist" do
context 'and the expected version is correct - 0' do
it 'saves the event with and sets the aggregate version to version 1' do
save_event(expected_version: 0)
expect(aggregate_version).to eq 1
end
end

context 'with no expected version' do
it 'saves the event with and sets the aggregate version to version 1' do
save_event
expect(aggregate_version).to eq 1
end
end
end

context 'when the aggregate exists' do
before { add_event }

context 'with a correct expected version - 1' do
it 'saves the event with and sets the aggregate version to version 2' do
save_event
expect(aggregate_version).to eq 2
end
end

context 'with no aggregate version' do
it 'automatically sets the version on the event and aggregate' do
save_event
expect(aggregate_version).to eq 2
end
end
end

context 'when a database error occurs that is not a concurrency error' do
before do
allow(db_connection).to receive(:run).and_raise(Sequel::DatabaseError)
end

it 'raises it' do
expect { add_event }.to raise_error(Sequel::DatabaseError)
end
end
end
end

+ 99
- 0
vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/optimised_event_poll_waiter_spec.rb View File

@@ -0,0 +1,99 @@
RSpec.describe EventSourcery::Postgres::OptimisedEventPollWaiter do
let(:after_listen) { proc {} }
subject(:waiter) { described_class.new(db_connection: db_connection, after_listen: after_listen) }

before do
allow(EventSourcery::Postgres::QueueWithIntervalCallback).to receive(:new)
.and_return(EventSourcery::Postgres::QueueWithIntervalCallback.new(callback_interval: 0))
end

def notify_event_ids(*ids)
ids.each do |id|
db_connection.notify('new_event', payload: id)
end
end

it 'does an initial call' do
waiter.poll(after_listen: proc {}) do
@called = true
throw :stop
end

expect(@called).to eq true
end

it 'calls on new event' do
waiter.poll(after_listen: proc { notify_event_ids(1) }) do
@called = true
throw :stop
end

expect(@called).to eq true
end

it 'calls once when multiple events are in the queue' do
waiter.poll(after_listen: proc { notify_event_ids(1, 2) }) do
@called = true
throw :stop
end

expect(@called).to eq true
end

context 'when the listening thread dies' do
before do
allow(db_connection).to receive(:listen).and_raise(StandardError)
end

it 'raise an error' do
quiet_thread_report_on_exception do
expect {
waiter.poll {}
}.to raise_error(described_class::ListenThreadDied)
end
end
end

context 'when an error is raised' do
let(:thread) { double }

before { allow(Thread).to receive(:new).and_return(thread) }

context 'when the listening thread is alive' do
it 'kills the listening thread' do
allow(thread).to receive(:alive?).and_return(true)
expect(thread).to receive(:kill)

waiter.poll(after_listen: proc { notify_event_ids(1) }) do
@called = true
throw :stop
end
end
end

context 'when the listening thread is not alive' do
it 'does not try to kill any listening threads' do
allow(thread).to receive(:alive?).and_return(false)
expect(thread).to_not receive(:kill)

waiter.poll(after_listen: proc { notify_event_ids(1) }) do
@called = true
throw :stop
end
end
end
end

def quiet_thread_report_on_exception(&block)
if Thread.respond_to?(:report_on_exception)
orig_report_on_exception = Thread.report_on_exception
Thread.report_on_exception = false

block.call

Thread.report_on_exception = orig_report_on_exception
else
block.call
end
end
end

+ 202
- 0
vendor/cache/event_sourcery-postgres/spec/event_sourcery/postgres/projector_spec.rb View File

@@ -0,0 +1,202 @@
RSpec.describe EventSourcery::Postgres::Projector do
let(:projector_class) do
Class.new do
include EventSourcery::Postgres::Projector
processor_name 'test_processor'

table :profiles do
column :user_uuid, 'UUID NOT NULL'
column :terms_accepted, 'BOOLEAN DEFAULT FALSE'
end

process TermsAccepted do |event|
@processed_event = event
table.insert(user_uuid: event.aggregate_id,
terms_accepted: true)
end

attr_reader :processed_event
end
end
let(:projector_name) { 'my_projector' }
let(:tracker) { EventSourcery::Postgres::Tracker.new(db_connection) }
let(:events) { [] }
def new_projector(&block)
Class.new do
include EventSourcery::Postgres::Projector
processor_name 'test_processor'

table :profiles do
column :user_uuid, 'UUID NOT NULL'
column :terms_accepted, 'BOOLEAN DEFAULT FALSE'
end

class_eval(&block) if block_given?

attr_reader :processed_event
end.new(tracker: tracker, db_connection: db_connection)
end

let(:projector_transaction_size) { 1 }
subject(:projector) do
projector_class.new(
tracker: tracker,
db_connection: db_connection,
transaction_size: projector_transaction_size,
)
end
let(:aggregate_id) { SecureRandom.uuid }

after { release_advisory_locks }

describe '.new' do
let(:projections_database) { double }
let(:event_tracker) { double }

before do
allow(EventSourcery::Postgres::Tracker).to receive(:new).with(projections_database).and_return(event_tracker)
allow(projections_database).to receive(:extension).with(:pg_json)

EventSourcery::Postgres.configure do |config|
config.projections_database = projections_database
end
end

subject(:projector) { projector_class.new }

it 'uses the configured projections database by default' do
expect(projector.instance_variable_get('@db_connection')).to eq projections_database
end

it 'uses the inferred event tracker database by default' do
expect(projector.instance_variable_get('@tracker')).to eq event_tracker
end
end

describe '.projector_name' do
it 'delegates to processor_name' do
expect(projector_class.projector_name).to eq 'test_processor'
end
end

describe '#project' do
let(:event) { ItemAdded.new }

it 'processes events with custom classes' do
projector = new_projector do
project ItemAdded do |event|
@processed_event = event
end
end

projector.project(event)

expect(projector.processed_event).to eq(event)
end
end

describe '#process' do
before { projector.reset }

let(:event) { EventSourcery::Event.new(body: {}, aggregate_id: aggregate_id, type: :terms_accepted, id: 1) }

it "processes events it's interested in" do
projector.process(event)
expect(projector.processed_event).to eq(event)
end
end

describe '#subscribe_to' do
let(:event_store) { double(:event_store) }
let(:events) { [new_event(id: 1), new_event(id: 2)] }
let(:subscription_master) { spy(EventSourcery::EventStore::SignalHandlingSubscriptionMaster) }
let(:projector_class) do
Class.new do
include EventSourcery::Postgres::Projector
processor_name 'test_processor'

table :profiles do
column :user_uuid, 'UUID NOT NULL'
column :terms_accepted, 'BOOLEAN DEFAULT FALSE'
end

attr_accessor :raise_error, :raise_error_on_event_id

process do |event|
table.insert(user_uuid: event.aggregate_id,
terms_accepted: true)
raise 'boo' if raise_error || raise_error_on_event_id == event.id
end
end
end

before do
allow(event_store).to receive(:subscribe).and_yield(events).once
projector.reset
end

it 'marks the safe shutdown points' do
projector.subscribe_to(event_store, subscription_master: subscription_master)
expect(subscription_master).to have_received(:shutdown_if_requested).twice
end