Skip to content

Commit

Permalink
Added check for Size ahead of loop in handler SerializeAdd/Deserializ…
Browse files Browse the repository at this point in the history
…e (default) fized TLV containers and Deserialize error ocndition for extension field sets and scene map
  • Loading branch information
lpbeliveau-silabs authored and pull[bot] committed Oct 10, 2023
1 parent 09c470b commit 2360312
Show file tree
Hide file tree
Showing 5 changed files with 183 additions and 72 deletions.
40 changes: 25 additions & 15 deletions src/app/clusters/scenes/ExtensionFieldSetsImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,29 @@ namespace scenes {

CHIP_ERROR ExtensionFieldSetsImpl::Serialize(TLV::TLVWriter & writer, TLV::Tag structTag) const
{
TLV::TLVType container;
ReturnErrorOnFailure(writer.StartContainer(structTag, TLV::kTLVType_Structure, container));
ReturnErrorOnFailure(writer.StartContainer(TLV::ContextTag(TagEFS::kFieldSetArrayContainer), TLV::kTLVType_Array, container));
TLV::TLVType structureContainer;
ReturnErrorOnFailure(writer.StartContainer(structTag, TLV::kTLVType_Structure, structureContainer));
TLV::TLVType arrayContainer;
ReturnErrorOnFailure(
writer.StartContainer(TLV::ContextTag(TagEFS::kFieldSetArrayContainer), TLV::kTLVType_Array, arrayContainer));
for (uint8_t i = 0; i < mFieldSetsCount; i++)
{
ReturnErrorOnFailure(mFieldSets[i].Serialize(writer));
}

return writer.EndContainer(container);
return writer.EndContainer(container);
ReturnErrorOnFailure(writer.EndContainer(arrayContainer));
return writer.EndContainer(structureContainer);
}

CHIP_ERROR ExtensionFieldSetsImpl::Deserialize(TLV::TLVReader & reader, TLV::Tag structTag)
{
TLV::TLVType container;
TLV::TLVType structureContainer;
ReturnErrorOnFailure(reader.Next(TLV::kTLVType_Structure, structTag));
ReturnErrorOnFailure(reader.EnterContainer(container));
ReturnErrorOnFailure(reader.EnterContainer(structureContainer));

TLV::TLVType arrayContainer;
ReturnErrorOnFailure(reader.Next(TLV::kTLVType_Array, TLV::ContextTag(TagEFS::kFieldSetArrayContainer)));
ReturnErrorOnFailure(reader.EnterContainer(container));
ReturnErrorOnFailure(reader.EnterContainer(arrayContainer));

uint8_t i = 0;
CHIP_ERROR err;
Expand All @@ -54,19 +57,26 @@ CHIP_ERROR ExtensionFieldSetsImpl::Deserialize(TLV::TLVReader & reader, TLV::Tag
}
mFieldSetsCount = i;

VerifyOrReturnError(err == CHIP_END_OF_TLV, err);
return reader.ExitContainer(container);
if (err != CHIP_END_OF_TLV)
{
if (err == CHIP_NO_ERROR)
return CHIP_ERROR_BUFFER_TOO_SMALL;

return err;
}

ReturnErrorOnFailure(reader.ExitContainer(arrayContainer));
return reader.ExitContainer(structureContainer);
}

void ExtensionFieldSetsImpl::Clear()
{
if (!this->IsEmpty())

for (uint8_t i = 0; i < mFieldSetsCount; i++)
{
for (uint8_t i = 0; i < mFieldSetsCount; i++)
{
mFieldSets[i].Clear();
}
mFieldSets[i].Clear();
}

mFieldSetsCount = 0;
}

Expand Down
12 changes: 5 additions & 7 deletions src/app/clusters/scenes/SceneTable.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ class SceneHandler : public IntrusiveListNodeBase<>
/// @param clusterBuffer Buffer to hold the supported cluster IDs, cannot hold more than
/// CHIP_CONFIG_SCENES_MAX_CLUSTERS_PER_SCENE. The function shall use the reduce_size() method in the event it is supporting
/// less than CHIP_CONFIG_SCENES_MAX_CLUSTERS_PER_SCENE clusters.

virtual void GetSupportedClusters(EndpointId endpoint, Span<ClusterId> & clusterBuffer) = 0;

/// @brief Returns whether or not a cluster for scenes is supported on an endpoint
Expand All @@ -81,14 +80,13 @@ class SceneHandler : public IntrusiveListNodeBase<>
///
/// @param endpoint[in] Endpoint ID
/// @param extensionFieldSet[in] ExtensionFieldSets provided by the AddScene Command, pre initialized
/// @param cluster[out] Cluster in the Extension field set, filled by the function
/// @param serialisedBytes[out] Buffer to fill from the ExtensionFieldSet in command
/// @return CHIP_NO_ERROR if successful, CHIP_ERROR value otherwise
/// @note Only gets called after the scene-cluster has previously verified that the endpoint,cluster valuer pair is supported by
/// the handler. It is therefore the implementation's reponsibility to also implement the SupportsCluster method.
virtual CHIP_ERROR SerializeAdd(EndpointId endpoint,
const app::Clusters::Scenes::Structs::ExtensionFieldSet::DecodableType & extensionFieldSet,
ClusterId & cluster, MutableByteSpan & serialisedBytes) = 0;
MutableByteSpan & serialisedBytes) = 0;

/// @brief Called when handling StoreScene, and only if the handler supports the given endpoint and cluster.
///
Expand All @@ -98,7 +96,7 @@ class SceneHandler : public IntrusiveListNodeBase<>
/// @param cluster[in] Target Cluster
/// @param serializedBytes[out] Output buffer, data needs to be writen in there and size adjusted to the size of the data
/// written.

///
/// @return CHIP_NO_ERROR if successful, CHIP_ERROR value otherwise
virtual CHIP_ERROR SerializeSave(EndpointId endpoint, ClusterId cluster, MutableByteSpan & serializedBytes) = 0;

Expand All @@ -123,6 +121,7 @@ class SceneHandler : public IntrusiveListNodeBase<>
///
/// @param timeMs[in] Transition time in ms to apply the scene
/// @return CHIP_NO_ERROR if successful, CHIP_ERROR value otherwise
/// @note Only gets called for handlers for which SupportsCluster() is true for the given endpoint and cluster.
virtual CHIP_ERROR ApplyScene(EndpointId endpoint, ClusterId cluster, const ByteSpan & serializedBytes,
TransitionTimeMs timeMs) = 0;
};
Expand Down Expand Up @@ -219,9 +218,8 @@ class SceneTable

bool operator==(const SceneData & other)
{
return (mNameLength == other.mNameLength && !memcmp(mName, other.mName,mNameLength) &&
(mSceneTransitionTimeMs == other.mSceneTransitionTimeMs) &&
(mExtensionFieldSets == other.mExtensionFieldSets));
return (mNameLength == other.mNameLength && !memcmp(mName, other.mName, mNameLength) &&
(mSceneTransitionTimeMs == other.mSceneTransitionTimeMs) && (mExtensionFieldSets == other.mExtensionFieldSets));
}

void operator=(const SceneData & other)
Expand Down
35 changes: 20 additions & 15 deletions src/app/clusters/scenes/SceneTableImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,55 +176,60 @@ struct FabricSceneData : public PersistentData<kPersistentFabricBufferMax>

CHIP_ERROR Serialize(TLV::TLVWriter & writer) const override
{
TLV::TLVType container;
ReturnErrorOnFailure(writer.StartContainer(TLV::AnonymousTag(), TLV::kTLVType_Structure, container));
TLV::TLVType fabricSceneContainer;
ReturnErrorOnFailure(writer.StartContainer(TLV::AnonymousTag(), TLV::kTLVType_Structure, fabricSceneContainer));
ReturnErrorOnFailure(writer.Put(TLV::ContextTag(TagScene::kSceneCount), scene_count));
ReturnErrorOnFailure(writer.StartContainer(TLV::ContextTag(TagScene::kStorageIDArray), TLV::kTLVType_Array, container));
TLV::TLVType sceneMapContainer;
ReturnErrorOnFailure(
writer.StartContainer(TLV::ContextTag(TagScene::kStorageIDArray), TLV::kTLVType_Array, sceneMapContainer));

// Storing the scene map
for (uint8_t i = 0; i < kMaxScenesPerFabric; i++)
{
ReturnErrorOnFailure(writer.StartContainer(TLV::AnonymousTag(), TLV::kTLVType_Structure, container));
TLV::TLVType sceneIdContainer;
ReturnErrorOnFailure(writer.StartContainer(TLV::AnonymousTag(), TLV::kTLVType_Structure, sceneIdContainer));
ReturnErrorOnFailure(writer.Put(TLV::ContextTag(TagScene::kEndpointID), (scene_map[i].mEndpointId)));
ReturnErrorOnFailure(writer.Put(TLV::ContextTag(TagScene::kGroupID), (scene_map[i].mGroupId)));
ReturnErrorOnFailure(writer.Put(TLV::ContextTag(TagScene::kSceneID), (scene_map[i].mSceneId)));
ReturnErrorOnFailure(writer.EndContainer(container));
ReturnErrorOnFailure(writer.EndContainer(sceneIdContainer));
}
ReturnErrorOnFailure(writer.EndContainer(container));
return writer.EndContainer(container);
ReturnErrorOnFailure(writer.EndContainer(sceneMapContainer));
return writer.EndContainer(fabricSceneContainer);
}

CHIP_ERROR Deserialize(TLV::TLVReader & reader) override
{
ReturnErrorOnFailure(reader.Next(TLV::kTLVType_Structure, TLV::AnonymousTag()));

TLV::TLVType container;
ReturnErrorOnFailure(reader.EnterContainer(container));
TLV::TLVType fabricSceneContainer;
ReturnErrorOnFailure(reader.EnterContainer(fabricSceneContainer));

ReturnErrorOnFailure(reader.Next(TLV::ContextTag(TagScene::kSceneCount)));
ReturnErrorOnFailure(reader.Get(scene_count));
ReturnErrorOnFailure(reader.Next(TLV::kTLVType_Array, TLV::ContextTag(TagScene::kStorageIDArray)));
ReturnErrorOnFailure(reader.EnterContainer(container));
TLV::TLVType sceneMapContainer;
ReturnErrorOnFailure(reader.EnterContainer(sceneMapContainer));

uint8_t i = 0;
CHIP_ERROR err;
while ((err = reader.Next(TLV::AnonymousTag())) == CHIP_NO_ERROR && i < kMaxScenesPerFabric)
{
ReturnErrorOnFailure(reader.EnterContainer(container));
TLV::TLVType sceneIdContainer;
ReturnErrorOnFailure(reader.EnterContainer(sceneIdContainer));
ReturnErrorOnFailure(reader.Next(TLV::ContextTag(TagScene::kEndpointID)));
ReturnErrorOnFailure(reader.Get(scene_map[i].mEndpointId));
ReturnErrorOnFailure(reader.Next(TLV::ContextTag(TagScene::kGroupID)));
ReturnErrorOnFailure(reader.Get(scene_map[i].mGroupId));
ReturnErrorOnFailure(reader.Next(TLV::ContextTag(TagScene::kSceneID)));
ReturnErrorOnFailure(reader.Get(scene_map[i].mSceneId));
ReturnErrorOnFailure(reader.ExitContainer(container));
ReturnErrorOnFailure(reader.ExitContainer(sceneIdContainer));

i++;
}
VerifyOrReturnError(err == CHIP_END_OF_TLV, err);
VerifyOrReturnError(err == CHIP_END_OF_TLV || err == CHIP_NO_ERROR, err);

ReturnErrorOnFailure(reader.ExitContainer(container));
return reader.ExitContainer(container);
ReturnErrorOnFailure(reader.ExitContainer(sceneMapContainer));
return reader.ExitContainer(fabricSceneContainer);
}

/// @brief Finds the index where to insert current scene by going through the whole table and looking if the scene is already in
Expand Down
64 changes: 36 additions & 28 deletions src/app/clusters/scenes/SceneTableImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,42 +45,44 @@ class DefaultSceneHandlerImpl : public scenes::SceneHandler
/// @brief From command AddScene, allows handler to filter through clusters in command to serialize only the supported ones.
/// @param endpoint[in] Endpoint ID
/// @param extensionFieldSet[in] ExtensionFieldSets provided by the AddScene Command, pre initialized
/// @param cluster[out] Cluster in the Extension field set, filled by the function
/// @param serialisedBytes[out] Buffer to fill from the ExtensionFieldSet in command
/// @return CHIP_NO_ERROR if successful, CHIP_ERROR_INVALID_ARGUMENT if the cluster is not supported, CHIP_ERROR value otherwise
virtual CHIP_ERROR SerializeAdd(EndpointId endpoint,
const app::Clusters::Scenes::Structs::ExtensionFieldSet::DecodableType & extensionFieldSet,
ClusterId & cluster, MutableByteSpan & serializedBytes) override
MutableByteSpan & serializedBytes) override
{
app::Clusters::Scenes::Structs::AttributeValuePair::DecodableType aVPair;
TLV::TLVWriter writer;
TLV::TLVType outer;
size_t pairTotal = 0;
uint8_t pairCount = 0;

uint8_t pairCount = 0;
uint8_t valueBytes = 0;

VerifyOrReturnError(SupportsCluster(endpoint, extensionFieldSet.clusterID), CHIP_ERROR_INVALID_ARGUMENT);

cluster = extensionFieldSet.clusterID;
// Verify size of list
extensionFieldSet.attributeValueList.ComputeSize(&pairTotal);
VerifyOrReturnError(pairTotal <= kMaxAvPair, CHIP_ERROR_BUFFER_TOO_SMALL);

auto pair_iterator = extensionFieldSet.attributeValueList.begin();
while (pair_iterator.Next() && pairCount < kMaxAvPair)
{
aVPair = pair_iterator.GetValue();
mAVPairs[pairCount].attributeID = aVPair.attributeID;
auto value_iterator = aVPair.attributeValue.begin();
size_t valueBytesTotal = 0;
uint8_t valueBytesCount = 0;

valueBytes = 0;
while (value_iterator.Next() && valueBytes < kMaxValueSize)
aVPair.attributeValue.ComputeSize(&valueBytesTotal);
VerifyOrReturnError(valueBytesTotal <= kMaxValueSize, CHIP_ERROR_BUFFER_TOO_SMALL);

auto value_iterator = aVPair.attributeValue.begin();
while (value_iterator.Next())
{
mValueBuffer[pairCount][valueBytes] = value_iterator.GetValue();
valueBytes++;
mValueBuffer[pairCount][valueBytesCount] = value_iterator.GetValue();
valueBytesCount++;
}
// Check we could go through all bytes of the value
ReturnErrorOnFailure(value_iterator.GetStatus());

mAVPairs[pairCount].attributeValue = mValueBuffer[pairCount];
mAVPairs[pairCount].attributeValue.reduce_size(valueBytes);
mAVPairs[pairCount].attributeValue.reduce_size(valueBytesCount);
pairCount++;
}
// Check we could go through all pairs in incomming command
Expand All @@ -90,7 +92,7 @@ class DefaultSceneHandlerImpl : public scenes::SceneHandler
attributeValueList = mAVPairs;
attributeValueList.reduce_size(pairCount);

writer.Init(serialisedBytes);
writer.Init(serializedBytes);
ReturnErrorOnFailure(writer.StartContainer(TLV::AnonymousTag(), TLV::kTLVType_Structure, outer));
ReturnErrorOnFailure(app::DataModel::Encode(
writer, TLV::ContextTag(app::Clusters::Scenes::Structs::ExtensionFieldSet::Fields::kAttributeValueList),
Expand All @@ -114,38 +116,44 @@ class DefaultSceneHandlerImpl : public scenes::SceneHandler

TLV::TLVReader reader;
TLV::TLVType outer;
uint8_t pairCount = 0;
uint8_t valueBytes = 0;

VerifyOrReturnError(SupportsCluster(endpoint, cluster), CHIP_ERROR_INVALID_ARGUMENT);
size_t pairTotal = 0;
uint8_t pairCount = 0;

extensionFieldSet.clusterID = cluster;
reader.Init(serialisedBytes);
ReturnErrorOnFailure(reader.Next(TLV::kTLVType_Structure, TLV::AnonymousTag()));
ReturnErrorOnFailure(reader.EnterContainer(outer));
ReturnErrorOnFailure(reader.Next(
TLV::kTLVType_Array,
TLV::ContextTag(app::Clusters::Scenes::Structs::ExtensionFieldSet::Fields::kAttributeValueList)));
TLV::kTLVType_Array, TLV::ContextTag(app::Clusters::Scenes::Structs::ExtensionFieldSet::Fields::kAttributeValueList)));
attributeValueList.Decode(reader);

// Verify size of list
attributeValueList.ComputeSize(&pairTotal);
VerifyOrReturnError(pairTotal <= kMaxAvPair, CHIP_ERROR_BUFFER_TOO_SMALL);

auto pair_iterator = attributeValueList.begin();
while (pair_iterator.Next() && pairCount < kMaxAvPair)
while (pair_iterator.Next())
{
decodePair = pair_iterator.GetValue();
mAVPairs[pairCount].attributeID = decodePair.attributeID;
auto value_iterator = decodePair.attributeValue.begin();
valueBytes = 0;
size_t valueBytesTotal = 0;
uint8_t valueBytesCount = 0;

// Verify size of attribute value
decodePair.attributeValue.ComputeSize(&valueBytesTotal);
VerifyOrReturnError(valueBytesTotal <= kMaxValueSize, CHIP_ERROR_BUFFER_TOO_SMALL);

while (value_iterator.Next() && valueBytes < kMaxValueSize)
auto value_iterator = decodePair.attributeValue.begin();
while (value_iterator.Next() && valueBytesCount < kMaxValueSize)
{
mValueBuffer[pairCount][valueBytes] = value_iterator.GetValue();
valueBytes++;
mValueBuffer[pairCount][valueBytesCount] = value_iterator.GetValue();
valueBytesCount++;
}
// Check we could go through all bytes of the value
ReturnErrorOnFailure(value_iterator.GetStatus());

mAVPairs[pairCount].attributeValue = mValueBuffer[pairCount];
mAVPairs[pairCount].attributeValue.reduce_size(valueBytes);
mAVPairs[pairCount].attributeValue.reduce_size(valueBytesCount);
pairCount++;
};
// Check we could go through all pairs stored in memory
Expand Down
Loading

0 comments on commit 2360312

Please sign in to comment.