Skip to content

Commit

Permalink
Group Data Provider: Added missing group mapping iterator. (#12613)
Browse files Browse the repository at this point in the history
* Group Data Provider: Added missing group mapping iterator.

* Group Data Provider: Code review changes.
  • Loading branch information
rcasallas-silabs authored Dec 6, 2021
1 parent 299393b commit d25a3ef
Show file tree
Hide file tree
Showing 4 changed files with 310 additions and 129 deletions.
22 changes: 13 additions & 9 deletions src/credentials/GroupDataProvider.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
*/
#pragma once

#include <app-common/zap-generated/cluster-objects.h>
#include <app/util/basic-types.h>
#include <crypto/CHIPCryptoPAL.h>
#include <lib/core/CHIPError.h>
Expand Down Expand Up @@ -94,29 +95,24 @@ class GroupDataProvider
{
GroupState() = default;
GroupState(chip::FabricIndex fabric, chip::GroupId group_id, uint16_t key_set) :
fabric_index(fabric), group(group_id), keyset_index(key_set)
fabric_index(fabric), group(group_id), keyset_id(key_set)
{}
// Fabric Index associated with the group state entry's fabric scoping
chip::FabricIndex fabric_index = kUndefinedFabricIndex;
// Identifies the group within the scope of the given fabric
chip::GroupId group = kUndefinedGroupId;
// References the set of group keys that generate operationa group keys for use with the given group
uint16_t keyset_index = 0;
uint16_t keyset_id = 0;
bool operator==(const GroupState & other)
{
return this->fabric_index == other.fabric_index && this->group == other.group &&
this->keyset_index == other.keyset_index;
return this->fabric_index == other.fabric_index && this->group == other.group && this->keyset_id == other.keyset_id;
}
};

// A operational group key set, usable by many GroupState mappings
struct KeySet
{
enum class SecurityPolicy : uint8_t
{
kStandard = 0,
kLowLatency = 1
};
using SecurityPolicy = chip::app::Clusters::GroupKeyManagement::GroupKeySecurityPolicy;

KeySet() = default;
KeySet(uint16_t id) : keyset_id(id) {}
Expand Down Expand Up @@ -225,6 +221,14 @@ class GroupDataProvider
virtual CHIP_ERROR AddGroupMapping(chip::FabricIndex fabric_index, const GroupMapping & mapping) = 0;
virtual CHIP_ERROR RemoveGroupMapping(chip::FabricIndex fabric_index, const GroupMapping & mapping) = 0;
virtual CHIP_ERROR RemoveAllGroupMappings(chip::FabricIndex fabric_index, EndpointId endpoint) = 0;
/**
* Creates an iterator that may be used to obtain the groups associated with the given fabric.
* The number of concurrent instances of this iterator is limited. In order to release the allocated memory,
* the iterator's Release() method must be called after the iteration is finished.
* @retval An instance of GroupMappingIterator on success
* @retval nullptr if no iterator instances are available.
*/
virtual GroupMappingIterator * IterateGroupMappings(chip::FabricIndex fabric_index) = 0;
/**
* Creates an iterator that may be used to obtain the groups associated with the given fabric and endpoint.
* The number of concurrent instances of this iterator is limited. In order to release the allocated memory,
Expand Down
119 changes: 102 additions & 17 deletions src/credentials/GroupDataProviderImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ struct StateData : public GroupDataProvider::GroupState, PersistentData<kPersist
{
fabric_index = 0;
group = kUndefinedGroupId;
keyset_index = 0;
keyset_id = 0;
next = 0;
}

Expand All @@ -354,7 +354,7 @@ struct StateData : public GroupDataProvider::GroupState, PersistentData<kPersist

ReturnErrorOnFailure(writer.Put(kTagFabric, static_cast<uint8_t>(fabric_index)));
ReturnErrorOnFailure(writer.Put(kTagGroup, static_cast<uint16_t>(group)));
ReturnErrorOnFailure(writer.Put(kTagKeySet, static_cast<uint16_t>(keyset_index)));
ReturnErrorOnFailure(writer.Put(kTagKeySet, static_cast<uint16_t>(keyset_id)));
ReturnErrorOnFailure(writer.Put(kTagNext, static_cast<uint16_t>(next)));

return writer.EndContainer(container);
Expand All @@ -373,9 +373,9 @@ struct StateData : public GroupDataProvider::GroupState, PersistentData<kPersist
// group
ReturnErrorOnFailure(reader.Next(kTagGroup));
ReturnErrorOnFailure(reader.Get(group));
// keyset_index
// keyset_id
ReturnErrorOnFailure(reader.Next(kTagKeySet));
ReturnErrorOnFailure(reader.Get(keyset_index));
ReturnErrorOnFailure(reader.Get(keyset_id));
// next
ReturnErrorOnFailure(reader.Next(kTagNext));
ReturnErrorOnFailure(reader.Get(next));
Expand Down Expand Up @@ -799,11 +799,97 @@ CHIP_ERROR GroupDataProviderImpl::RemoveAllGroupMappings(chip::FabricIndex fabri
}
}

GroupDataProvider::GroupMappingIterator * GroupDataProviderImpl::IterateGroupMappings(chip::FabricIndex fabric_index)
{
VerifyOrReturnError(mInitialized, nullptr);
return mAllGroupsIterators.CreateObject(*this, fabric_index);
}

GroupDataProviderImpl::AllGroupMappingsIteratorImpl::AllGroupMappingsIteratorImpl(GroupDataProviderImpl & provider,
chip::FabricIndex fabric_index) :
mProvider(provider),
mFabric(fabric_index)
{
FabricData fabric(fabric_index);
ReturnOnFailure(fabric.Load(provider.mStorage));

// Existing fabric
mFirstEndpoint = fabric.first_endpoint;
mFirstGroup = true;
mEndpointCount = fabric.endpoint_count;
mEndpoint = fabric.first_endpoint;
mEndpointIndex = 0;
}

size_t GroupDataProviderImpl::AllGroupMappingsIteratorImpl::Count()
{
size_t count = 0;
size_t endpoint_index = 0;
EndpointData endpoint_data(mFabric, mFirstEndpoint);

// Loop through the fabric's endpoints
while ((endpoint_index < mEndpointCount) && (CHIP_NO_ERROR == endpoint_data.Load(mProvider.mStorage)))
{
GroupData group_data(mFabric, endpoint_data.endpoint_id, endpoint_data.first_group);
// Loop through the endpoint's groups
while ((kUndefinedGroupId != group_data.group) && (CHIP_NO_ERROR == group_data.Load(mProvider.mStorage)))
{
group_data.group = group_data.next;
count++;
}
endpoint_data.endpoint_id = endpoint_data.next;
endpoint_index++;
}

return count;
}

bool GroupDataProviderImpl::AllGroupMappingsIteratorImpl::Next(GroupMapping & item)
{
while (mEndpointIndex < mEndpointCount)
{
EndpointData endpoint_data(mFabric, mEndpoint);
if (CHIP_NO_ERROR != endpoint_data.Load(mProvider.mStorage))
{
mEndpointIndex = mEndpointCount;
return false;
}

if (mFirstGroup)
{
mGroup = endpoint_data.first_group;
mFirstGroup = false;
}

GroupData group_data(mFabric, mEndpoint, mGroup);
if ((kUndefinedGroupId != mGroup) && CHIP_NO_ERROR == group_data.Load(mProvider.mStorage))
{
item.endpoint = mEndpoint;
item.group = mGroup;
size_t size = strnlen(group_data.name, GroupData::kGroupNameMax);
strncpy(item.name, group_data.name, size);
item.name[size] = 0;
mGroup = group_data.next;
return true;
}

mEndpoint = endpoint_data.next;
mEndpointIndex++;
mFirstGroup = true;
}
return false;
}

void GroupDataProviderImpl::AllGroupMappingsIteratorImpl::Release()
{
mProvider.mAllGroupsIterators.ReleaseObject(this);
}

GroupDataProvider::GroupMappingIterator * GroupDataProviderImpl::IterateGroupMappings(chip::FabricIndex fabric_index,
EndpointId endpoint_id)
{
VerifyOrReturnError(mInitialized, nullptr);
return mEndpointIterators.CreateObject(*this, fabric_index, endpoint_id);
return mEndpointGroupsIterators.CreateObject(*this, fabric_index, endpoint_id);
}

GroupDataProviderImpl::GroupMappingIteratorImpl::GroupMappingIteratorImpl(GroupDataProviderImpl & provider,
Expand All @@ -827,18 +913,20 @@ GroupDataProviderImpl::GroupMappingIteratorImpl::GroupMappingIteratorImpl(GroupD
if (endpoint.endpoint_id == endpoint_id)
{
// Target endpoint found
mGroup = endpoint.first_group;
mFirstGroup = endpoint.first_group;
break;
}
endpoint.endpoint_id = endpoint.next;
} while (++count < fabric.endpoint_count);

mGroup = mFirstGroup;
}

size_t GroupDataProviderImpl::GroupMappingIteratorImpl::Count()
{
size_t count = 0;

GroupData group(mFabric, mEndpoint, mGroup);
GroupData group(mFabric, mEndpoint, mFirstGroup);
chip::GroupId prev_gid = kUndefinedGroupId;

while ((kUndefinedGroupId != group.group) && (prev_gid != group.group))
Expand Down Expand Up @@ -873,7 +961,7 @@ bool GroupDataProviderImpl::GroupMappingIteratorImpl::Next(GroupMapping & item)

void GroupDataProviderImpl::GroupMappingIteratorImpl::Release()
{
mProvider.mEndpointIterators.ReleaseObject(this);
mProvider.mEndpointGroupsIterators.ReleaseObject(this);
}

//
Expand All @@ -893,8 +981,7 @@ CHIP_ERROR GroupDataProviderImpl::SetGroupState(size_t state_index, const GroupS
VerifyOrReturnError(0 == state_index, CHIP_ERROR_INVALID_ARGUMENT);
states.first = 0;
states.count = 1;
ReturnLogErrorOnFailure(
StateData(states.first, in_state.fabric_index, in_state.group, in_state.keyset_index).Save(mStorage));
ReturnLogErrorOnFailure(StateData(states.first, in_state.fabric_index, in_state.group, in_state.keyset_id).Save(mStorage));
return states.Save(mStorage);
}

Expand Down Expand Up @@ -936,7 +1023,7 @@ CHIP_ERROR GroupDataProviderImpl::SetGroupState(size_t state_index, const GroupS
VerifyOrReturnError(state.fabric_index == in_state.fabric_index, CHIP_ERROR_ACCESS_DENIED);
GroupState old_state = state;
state.group = in_state.group;
state.keyset_index = in_state.keyset_index;
state.keyset_id = in_state.keyset_id;
ReturnErrorOnFailure(state.Save(mStorage));
if (nullptr != mListener)
{
Expand All @@ -946,7 +1033,7 @@ CHIP_ERROR GroupDataProviderImpl::SetGroupState(size_t state_index, const GroupS
}

// New state
ReturnErrorOnFailure(StateData(new_id, in_state.fabric_index, in_state.group, in_state.keyset_index).Save(mStorage));
ReturnErrorOnFailure(StateData(new_id, in_state.fabric_index, in_state.group, in_state.keyset_id).Save(mStorage));

if (previous)
{
Expand All @@ -962,8 +1049,6 @@ CHIP_ERROR GroupDataProviderImpl::SetGroupState(size_t state_index, const GroupS
// Update main list
states.count = static_cast<uint16_t>(index + 1);
return states.Save(mStorage);

return CHIP_ERROR_INTERNAL;
}

CHIP_ERROR GroupDataProviderImpl::GetGroupState(size_t state_index, GroupState & out_state)
Expand All @@ -990,7 +1075,7 @@ CHIP_ERROR GroupDataProviderImpl::GetGroupState(size_t state_index, GroupState &
// Target index found
out_state.fabric_index = state.fabric_index;
out_state.group = state.group;
out_state.keyset_index = state.keyset_index;
out_state.keyset_id = state.keyset_id;
return CHIP_NO_ERROR;
}
state.id = state.next;
Expand Down Expand Up @@ -1097,7 +1182,7 @@ bool GroupDataProviderImpl::AllStatesIterator::Next(GroupState & item)
mIndex = state.next;
item.fabric_index = state.fabric_index;
item.group = state.group;
item.keyset_index = state.keyset_index;
item.keyset_id = state.keyset_id;
return true;
}

Expand Down Expand Up @@ -1156,7 +1241,7 @@ bool GroupDataProviderImpl::FabricStatesIterator::Next(GroupState & item)
{
item.fabric_index = state.fabric_index;
item.group = state.group;
item.keyset_index = state.keyset_index;
item.keyset_id = state.keyset_id;
mIndex = state.id;
return true;
}
Expand Down
32 changes: 27 additions & 5 deletions src/credentials/GroupDataProviderImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ class GroupDataProviderImpl : public GroupDataProvider
CHIP_ERROR AddGroupMapping(chip::FabricIndex fabric_index, const GroupMapping & mapping) override;
CHIP_ERROR RemoveGroupMapping(chip::FabricIndex fabric_index, const GroupMapping & mapping) override;
CHIP_ERROR RemoveAllGroupMappings(chip::FabricIndex fabric_index, EndpointId endpoint) override;
GroupMappingIterator * IterateGroupMappings(chip::FabricIndex fabric_index) override;
GroupMappingIterator * IterateGroupMappings(chip::FabricIndex fabric_index, EndpointId endpoint) override;

//
Expand Down Expand Up @@ -71,6 +72,25 @@ class GroupDataProviderImpl : public GroupDataProvider
CHIP_ERROR Decrypt(PacketHeader packetHeader, PayloadHeader & payloadHeader, System::PacketBufferHandle & msg) override;

private:
class AllGroupMappingsIteratorImpl : public GroupMappingIterator
{
public:
AllGroupMappingsIteratorImpl(GroupDataProviderImpl & provider, chip::FabricIndex fabric);
size_t Count() override;
bool Next(GroupMapping & item) override;
void Release() override;

private:
GroupDataProviderImpl & mProvider;
chip::FabricIndex mFabric = kUndefinedFabricIndex;
chip::EndpointId mEndpoint = kInvalidEndpointId;
size_t mEndpointIndex = 0;
size_t mEndpointCount = 0;
chip::GroupId mGroup = kUndefinedGroupId;
chip::EndpointId mFirstEndpoint = kInvalidEndpointId;
bool mFirstGroup = true;
};

class GroupMappingIteratorImpl : public GroupMappingIterator
{
public:
Expand All @@ -81,8 +101,9 @@ class GroupDataProviderImpl : public GroupDataProvider

private:
GroupDataProviderImpl & mProvider;
chip::FabricIndex mFabric = 0;
chip::EndpointId mEndpoint = 0;
chip::FabricIndex mFabric = kUndefinedFabricIndex;
chip::EndpointId mEndpoint = kInvalidEndpointId;
chip::GroupId mFirstGroup = kUndefinedGroupId;
chip::GroupId mGroup = kUndefinedGroupId;
};

Expand Down Expand Up @@ -111,7 +132,7 @@ class GroupDataProviderImpl : public GroupDataProvider

private:
GroupDataProviderImpl & mProvider;
chip::FabricIndex mFabric = 0;
chip::FabricIndex mFabric = kUndefinedFabricIndex;
uint16_t mIndex = 0;
size_t mCount = 0;
size_t mTotalCount = 0;
Expand All @@ -127,15 +148,16 @@ class GroupDataProviderImpl : public GroupDataProvider

private:
GroupDataProviderImpl & mProvider;
chip::FabricIndex mFabric = 0;
chip::FabricIndex mFabric = kUndefinedFabricIndex;
uint16_t mNextId = 0;
size_t mCount = 0;
size_t mIndex = 0;
};

chip::PersistentStorageDelegate & mStorage;
bool mInitialized = false;
BitMapObjectPool<GroupMappingIteratorImpl, kIteratorsMax> mEndpointIterators;
BitMapObjectPool<AllGroupMappingsIteratorImpl, kIteratorsMax> mAllGroupsIterators;
BitMapObjectPool<GroupMappingIteratorImpl, kIteratorsMax> mEndpointGroupsIterators;
BitMapObjectPool<AllStatesIterator, kIteratorsMax> mAllStatesIterators;
BitMapObjectPool<FabricStatesIterator, kIteratorsMax> mFabricStatesIterators;
BitMapObjectPool<KeySetIteratorImpl, kIteratorsMax, OnObjectPoolDestruction::IgnoreUnsafeDoNotUseInNewCode> mKeySetIterators;
Expand Down
Loading

0 comments on commit d25a3ef

Please sign in to comment.