Skip to content

Commit

Permalink
Merge pull request #17020 from cben/deserializable_keys
Browse files Browse the repository at this point in the history
Compare decimal columns correctly in batch saver
  • Loading branch information
agrare authored Feb 26, 2018
2 parents c57a11b + 500fb84 commit d3f7348
Show file tree
Hide file tree
Showing 5 changed files with 99 additions and 7 deletions.
17 changes: 13 additions & 4 deletions app/models/manager_refresh/save_collection/saver/base.rb
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,30 @@ def initialize(inventory_collection)
.try(:[], "sql_type")
end

@serializable_keys = @model_class.attribute_names.each_with_object({}) do |key, obj|
@serializable_keys = {}
@deserializable_keys = {}
@model_class.attribute_names.each do |key|
attribute_type = @model_class.type_for_attribute(key.to_s)
pg_type = @pg_types[key.to_sym]

if inventory_collection.use_ar_object?
# When using AR object, lets make sure we type.serialize(value) every value, so we have a slow but always
# working way driven by a configuration
obj[key.to_sym] = attribute_type
@serializable_keys[key.to_sym] = attribute_type
@deserializable_keys[key.to_sym] = attribute_type
elsif attribute_type.respond_to?(:coder) ||
attribute_type.type == :int4range ||
pg_type == "text[]" ||
pg_type == "character varying[]"
# Identify columns that needs to be encoded by type.serialize(value), it's a costy operations so lets do
# do it only for columns we need it for.
obj[key.to_sym] = attribute_type
# TODO: should these set @deserializable_keys too?
@serializable_keys[key.to_sym] = attribute_type
elsif attribute_type.type == :decimal
# Postgres formats decimal columns with fixed number of digits e.g. '0.100'
# Need to parse and let Ruby format the value to have a comparable string.
@serializable_keys[key.to_sym] = attribute_type
@deserializable_keys[key.to_sym] = attribute_type
end
end
end
Expand Down Expand Up @@ -85,7 +94,7 @@ def values_for_database!(all_attribute_keys, attributes)

attr_reader :unique_index_keys, :unique_index_keys_to_s, :select_keys, :unique_db_primary_keys, :unique_db_indexes,
:primary_key, :arel_primary_key, :record_key_method, :pure_sql_records_fetching, :select_keys_indexes,
:batch_size, :batch_size_for_persisting, :model_class, :serializable_keys, :pg_types, :table_name
:batch_size, :batch_size_for_persisting, :model_class, :serializable_keys, :deserializable_keys, :pg_types, :table_name

def save!(association)
attributes_index = {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,15 +130,20 @@ def update_or_destroy_records!(records_batch_iterator, inventory_objects_index,

next unless assert_distinct_relation(primary_key_value)

# Incoming values are in SQL string form.
# TODO(lsmola) unify this behavior with object_index_with_keys method in InventoryCollection
# TODO(lsmola) maybe we can drop the whole pure sql fetching, since everything will be targeted refresh
# with streaming refresh? Maybe just metrics and events will not be, but those should be upsert only
index = unique_index_keys_to_s.map do |attribute|
value = record_key(record, attribute)
if attribute == "timestamp"
# TODO: can this be covered by @deserializable_keys?
type = model_class.type_for_attribute(attribute)
type.cast(record_key(record, attribute)).utc.iso8601.to_s
type.cast(value).utc.iso8601.to_s
elsif (type = deserializable_keys[attribute.to_sym])
type.deserialize(value).to_s
else
record_key(record, attribute).to_s
value.to_s
end
end.join("__")

Expand Down Expand Up @@ -303,7 +308,11 @@ def map_ids_to_inventory_objects(indexed_inventory_objects, all_attribute_keys,
end
elsif !supports_remote_data_timestamp?(all_attribute_keys) || result.count == batch_size_for_persisting
result.each do |inserted_record|
key = unique_index_columns.map { |x| inserted_record[x.to_s] }
key = unique_index_columns.map do |x|
value = inserted_record[x.to_s]
type = deserializable_keys[x]
type ? type.deserialize(value) : value
end
inventory_object = indexed_inventory_objects[key]
inventory_object.id = inserted_record[primary_key] if inventory_object
end
Expand Down
15 changes: 15 additions & 0 deletions spec/models/manager_refresh/helpers/spec_parsed_data.rb
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
require 'bigdecimal'

module SpecParsedData
def vm_data(i, data = {})
{
Expand Down Expand Up @@ -100,4 +102,17 @@ def network_port_data(i, data = {})
:mac_address => "network_port_mac_#{i}",
}.merge(data)
end

def container_quota_items_data(i, data = {})
{
:quota_desired => BigDecimal("#{i}.#{i}"),
}.merge(data)
end

def container_quota_items_attrs_data(i, data = {})
{
:name => "container_quota_items_attrs_#{i}",
:resource_type => "ContainerQuotaItem",
}.merge(data)
end
end
19 changes: 19 additions & 0 deletions spec/models/manager_refresh/save_inventory/init_data_helper.rb
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,25 @@ def network_ports_init_data(extra_attributes = {})
init_data(network.network_ports(extra_attributes))
end

# Following 2 are fictional, not like this in practice.
def container_quota_items_init_data(extra_attributes = {})
init_data(extra_attributes).merge(
:model_class => ContainerQuotaItem,
:arel => ContainerQuotaItem.all,
:manager_ref => [:quota_desired], # a decimal column
)
end

# Quota items don't even have custom attrs; this is just to have a dependent
# for quota items collection to test their .id are set correctly.
def container_quota_items_attrs_init_data(extra_attributes = {})
init_data(extra_attributes).merge(
:model_class => CustomAttribute,
:arel => CustomAttribute.where(:resource_type => 'ContainerQuotaItem'),
:manager_ref => [:name],
)
end

def cloud
ManagerRefresh::InventoryCollectionDefault::CloudManager
end
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,12 @@
)
)
)
@data[:container_quota_items] = ::ManagerRefresh::InventoryCollection.new(
container_quota_items_init_data(inventory_collection_options(options))
)
@data[:container_quota_items_attrs] = ::ManagerRefresh::InventoryCollection.new(
container_quota_items_attrs_init_data(inventory_collection_options(options))
)

# Parse data for InventoryCollections
@network_port_data_1 = network_port_data(1).merge(
Expand Down Expand Up @@ -211,6 +217,16 @@
@image_data_2 = image_data(2).merge(:name => "image_changed_name_2")
@image_data_3 = image_data(3).merge(:name => "image_changed_name_3")

@container_quota_items_data_1 = container_quota_items_data(1)
@container_quota_items_data_2 = container_quota_items_data(2)

@container_quota_items_attrs_data_1 = container_quota_items_attrs_data(1).merge(
:resource => @data[:container_quota_items].lazy_find(container_quota_items_data(1)[:quota_desired])
)
@container_quota_items_attrs_data_2 = container_quota_items_attrs_data(2).merge(
:resource => @data[:container_quota_items].lazy_find(container_quota_items_data(2)[:quota_desired])
)

# Fill InventoryCollections with data
add_data_to_inventory_collection(@data[:network_ports],
@network_port_data_1,
Expand All @@ -227,6 +243,12 @@
add_data_to_inventory_collection(@data[:miq_templates],
@image_data_2,
@image_data_3)
add_data_to_inventory_collection(@data[:container_quota_items],
@container_quota_items_data_1,
@container_quota_items_data_2)
add_data_to_inventory_collection(@data[:container_quota_items_attrs],
@container_quota_items_attrs_data_1,
@container_quota_items_attrs_data_2)
# Assert data before save
expect(@network_port1.device).to eq @vm1
expect(@network_port1.name).to eq "network_port_name_1"
Expand All @@ -253,6 +275,11 @@

@image2 = MiqTemplate.find(@image2.id)

@quota_item_1 = ContainerQuotaItem.find_by(:quota_desired => container_quota_items_data(1)[:quota_desired])
@quota_item_2 = ContainerQuotaItem.find_by(:quota_desired => container_quota_items_data(2)[:quota_desired])
@quota_attr_1 = CustomAttribute.find_by(:name => container_quota_items_attrs_data(1)[:name])
@quota_attr_2 = CustomAttribute.find_by(:name => container_quota_items_attrs_data(2)[:name])

# Check ICs stats
expect(@data[:vms].created_records).to match_array(record_stats([@vm3, @vm31]))
expect(@data[:vms].deleted_records).to match_array(record_stats([@vm1, @vm12, @vm4]))
Expand Down Expand Up @@ -391,6 +418,19 @@
)

expect(::ManageIQ::Providers::CloudManager::AuthKeyPair.all).to eq([])

assert_all_records_match_hashes(
[CustomAttribute.where(:resource_type => 'ContainerQuotaItem')],
{
:id => @quota_attr_1.id,
:name => @quota_attr_1.name,
:resource_id => @quota_item_1.id,
}, {
:id => @quota_attr_2.id,
:name => @quota_attr_2.name,
:resource_id => @quota_item_2.id,
}
)
end
end
end
Expand Down

0 comments on commit d3f7348

Please sign in to comment.