From 0436784dc24b1d6e9716210227a2b072f51fc5ad Mon Sep 17 00:00:00 2001 From: Stephen McCarthy <29098561+smccarthy-ie@users.noreply.github.com> Date: Tue, 17 Nov 2020 20:46:37 +0000 Subject: [PATCH] more doc clean up, fomatting, modularization- no technical changes (#1016) --- .../assembly-installing-registry-docker.adoc | 1 - ...ssembly-installing-registry-openshift.adoc | 1 - .../assembly-intro-to-registry-rules.adoc | 2 +- .../assembly-intro-to-the-registry.adoc | 1 - ...embly-managing-registry-artifacts-api.adoc | 1 - ...bly-managing-registry-artifacts-maven.adoc | 1 - ...sembly-managing-registry-artifacts-ui.adoc | 1 - .../assembly-registry-reference.adoc | 1 - .../assembly-using-kafka-client-serdes.adoc | 8 +- .../con-registry-artifacts.adoc | 2 +- .../con-registry-serdes-avro.adoc | 90 ++++++++ .../con-registry-serdes-concepts.adoc | 6 +- .../con-registry-serdes-json.adoc | 42 ++++ .../con-registry-serdes-protobuf.adoc | 47 +++++ .../con-registry-serdes-strategy.adoc | 1 + .../con-registry-serdes-types.adoc | 195 ++---------------- ...roc-installing-postgresql-operatorhub.adoc | 9 - .../proc-registry-serdes-config-consumer.adoc | 2 +- .../proc-registry-serdes-config-producer.adoc | 2 +- .../proc-setting-up-infinispan-storage.adoc | 9 - .../proc-setting-up-postgresql-storage.adoc | 11 +- .../partials/shared/attributes-links.adoc | 8 +- .../ROOT/partials/shared/attributes.adoc | 28 ++- 23 files changed, 226 insertions(+), 243 deletions(-) create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc index c0eda1102f..f393a6d42e 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="installing-the-registry-docker"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc index 1bee9b27a6..1757e24e0a 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="installing-registry-ocp"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc index 3a2701cc7a..2c97634461 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc @@ -10,7 +10,7 @@ This chapter introduces the optional rules used to govern registry content and p * xref:registry-rules[] * xref:registry-rules-apply[] * xref:registry-rules-work[] -* xref:registry-rules-config[] +* xref:registry-rules-config[] //INCLUDES include::{mod-loc}getting-started/con-registry-rules.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc index 30c1230ee9..26261033a0 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="intro-to-the-registry"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc index cba301c3bb..8d630451b1 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-api"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc index b63a8461ec..ae03c18b4a 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-maven"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc index 33b6ca111a..a178e98923 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-ui"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc index 8319eda9f2..ae52869c66 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="artifact-and-rule-types"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc index 991d78da82..b71386d19d 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel -// include::{mod-loc}shared/all-attributes.adoc[] [id="using-kafka-client-serdes"] @@ -14,6 +13,9 @@ This chapter provides instructions on how to use the Kafka client serializers an * xref:registry-serdes-concepts-strategy-{context}[] * xref:registry-serdes-concepts-constants-{context}[] * xref:registry-serdes-types-serde-{context}[] +* xref:registry-serdes-types-avro-{context}[] +* xref:registry-serdes-types-json-{context}[] +* xref:registry-serdes-types-protobuf-{context}[] * xref:registry-serdes-register-{context}[] * xref:registry-serdes-config-consumer-{context}[] * xref:registry-serdes-config-producer-{context}[] @@ -34,12 +36,14 @@ include::{mod-loc}getting-started/con-registry-serdes-concepts.adoc[leveloffset= include::{mod-loc}getting-started/con-registry-serdes-strategy.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-constants.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-types.adoc[leveloffset=+1] +include::{mod-loc}getting-started/con-registry-serdes-avro.adoc[leveloffset=+2] +include::{mod-loc}getting-started/con-registry-serdes-json.adoc[leveloffset=+2] +include::{mod-loc}getting-started/con-registry-serdes-protobuf.adoc[leveloffset=+2] include::{mod-loc}getting-started/proc-registry-serdes-register.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-consumer.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-producer.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-stream.adoc[leveloffset=+1] - //.Additional resources (or Next steps) //* ... diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc index 3d7d0a4e32..9667224634 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc @@ -1,7 +1,7 @@ // Metadata created by nebel [id="registry-artifacts"] -= Schema and API artifacts in {registry} += Schema and API artifacts in {registry} The items stored in {registry}, such as event schemas and API specifications, are known as registry _artifacts_. The following shows an example of an Apache Avro schema artifact in JSON format for a simple share price application: diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc new file mode 100644 index 0000000000..1f07782945 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc @@ -0,0 +1,90 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-types-avro-{context}'] += Configure Avro SerDe with {registry} + +{registry} provides Kafka client serializer and deserializer classes for Apache Avro to make using Avro as +easy as possible: + +* `io.apicurio.registry.utils.serde.AvroKafkaSerializer` +* `io.apicurio.registry.utils.serde.AvroKafkaDeserializer` + + +.Configure the Avro serializer + +You can configure the Avro serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy +* Global ID strategy +* Global ID location +* Global ID handler +* Avro datum provider +* Avro encoding + +.Global ID location +The serializer passes the unique global ID of the schema as part of the Kafka message so that consumers can use the right schema for deserialization. The location of that global ID can be in the payload of the message or in the message headers. The default approach is to pass the global ID in the message payload. If you want the ID sent in the message headers instead, you can set the following configuration property: +---- +props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true") +---- +The property name is `apicurio.registry.use.headers`. + + +.Global ID handler +You can customize precisely how the global ID is encoded when passing it in the Kafka message body. Set +the configuration property `apicurio.registry.id-handler` to be a class that implements the +`io.apicurio.registry.utils.serde.strategy.IdHandler` interface. {registry} provides two implementations of +that interface: + +* `io.apicurio.registry.utils.serde.strategy.DefaultIdHandler` - stores the ID as an 8 byte long +* `io.apicurio.registry.utils.serde.strategy.Legacy4ByteIdHandler` - stores the ID as an 4 byte int + +{registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with other registries or serde classes) you may want to use 4 bytes when sending the ID. + +.Avro datum provider +Avro provides different datum writers and readers to write and read data. {registry} supports three different types: + +* Generic +* Specific +* Reflect + +The {registry} `AvroDatumProvider` is the abstraction on which type is then actually used, where `DefaultAvroDatumProvider` is used by default. + +There are two configuration options you can set: + +* `apicurio.registry.avro-datum-provider` - provide a fully qualified Java class name of the `AvroDatumProvider` implementation, for example `io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider` +* `apicurio.registry.use-specific-avro-reader` - true or false, to use specific type when using `DefaultAvroDatumProvider` + +.Avro encoding + +When using Apache Avro to serializer data, it is common to use the Avro binary encoding format. This is so that the data is encoded in as efficient a format as possible. However, Avro also supports encoding the data as JSON. Encoding as JSON is useful because it is much easier to inspect the payload of each message, often for logging, debugging, or other similar use cases. The {registry} Avro serializer can be configured to change the encoding to JSON from the default (binary). + +Set the Avro encoding to use by configuring the `apicurio.avro.encoding` property. The value must be either +`JSON` or `BINARY`. + +.Configure the Avro deserializer + +You must configure the Avro deserializer class to match the configuration settings of the serializer. As a +result, you can configure the Avro deserializer class in the following ways: + +* {registry} location as a URL +* Global ID handler +* Avro datum provider +* Avro encoding + +See the serializer section for these configuration options - the property names and values are the same. + +[NOTE] +==== +The following options are not needed when configuring the deserializer: + +* Artifact ID strategy +* Global ID strategy +* Global ID location +==== + +The reason these options are not necessary is that the deserializer class can figure this information out from +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. + +The location of that global ID is determined by the deserializer by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload using the configured handler. If the magic byte is not found, the global ID is read from the message headers. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc index 46c6b93b9a..0bd423d29b 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc @@ -17,7 +17,8 @@ Schemas can evolve, so you can define rules in {registry}, for example, to ensur These schema technologies can be used by client applications through Kafka client serializer/deserializer (SerDe) services provided by {registry}. The maturity and usage of the SerDe classes provided by {registry} may vary. See the type-specific sections below for more details about each. -= Producer schema configuration +[discrete] +== Producer schema configuration A producer client application uses a serializer to put the messages that it sends to a specific broker topic into the correct data format. @@ -35,7 +36,8 @@ After registering your schema, when you start Kafka and {registry}, you can acce If a schema already exists, you can create a new version using the REST API based on compatibility rules defined in {registry}. Versions are used for compatibility checking as a schema evolves. An artifact ID and schema version represents a unique tuple that identifies a schema. -= Consumer schema configuration +[discrete] +== Consumer schema configuration A consumer client application uses a deserializer to get the messages that it consumes from a specific broker topic into the correct data format. To enable a consumer to use {registry} for deserialization: diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc new file mode 100644 index 0000000000..35c2129f84 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-types-json-{context}'] += Configure JSON Schema SerDe with {registry} + +{registry} provides Kafka client serializer and deserializer classes for JSON Schema to make using JSON Schema as easy as possible: + +* `io.apicurio.registry.utils.serde.JsonSchemaKafkaSerializer` +* `io.apicurio.registry.utils.serde.JsonSchemaKafkaDeserializer` + +Unlike Apache Avro, JSON Schema is not actually a serialization technology - it is instead a validation +technology. As a result, configuration options for JSON Schema are quite different. For example, there is no +encoding option, because data is always encoded as JSON. + +.Configure the JSON Schema serializer + +You can configure the JSON Schema serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy +* Global ID strategy +* Validation enabled/disabled + +The only non-standard configuration property is whether JSON Schema validation is enabled or +disabled. The validation feature is disabled by default but can be enabled by setting +`apicurio.registry.serdes.json-schema.validation-enabled` to `"true"`. For example: +---- +props.putIfAbsent(JsonSchemaSerDeConstants.REGISTRY_JSON_SCHEMA_VALIDATION_ENABLED, "true")` +---- + +.Configure the JSON Schema deserializer + +You can configure the JSON Schema deserializer class in the following ways: + +* {registry} location as a URL +* Validation enabled/disabled + +The deserializer is simple to configure. You must provide the location of {registry} so that the schema can be loaded. The only other configuration is whether or not to perform validation. These +configuration properties are the same as for the serializer. + +NOTE: Deserializer validation only works if the serializer passes the global ID in the Kafka message, which will only happen when validation is enabled in the serializer. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc new file mode 100644 index 0000000000..22cfbb04ac --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-types-protobuf-{context}'] + += Configure Protobuf SerDe with {registry} + +{registry} provides Kafka client serializer and deserializer classes for Google Protobuf to make using Protobuf as easy as possible: + +* `io.apicurio.registry.utils.serde.ProtobufKafkaSerializer` +* `io.apicurio.registry.utils.serde.ProtobufKafkaDeserializer` + +.Configure the Protobuf serializer + +You can configure the Protobuf serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy +* Global ID strategy +* Global ID location +* Global ID handler + +.Configure the Protobuf deserializer + +You must configure the Protobuf deserializer class to match the configuration settings of the serializer. As a result, you can configure the Protobuf deserializer class in the following ways: + +* {registry} location as a URL +* Global ID handler + +See the serializer section for these configuration options - the property names and values are the same. + +[NOTE] +==== +The following options are not needed when configuring the deserializer: + +* Artifact ID strategy +* Global ID strategy +* Global ID location +==== + +The reason these options are not necessary is that the deserializer class can figure this information out from +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. + +The location of that global ID is determined (by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload (using the configured handler). If the magic byte is not found, the global ID is read from the message headers. + +NOTE: The Protobuf deserializer does not deserialize to your exact Protobuf Message implementation, +but rather to a `DynamicMessage` instance (because there is no appropriate API to do otherwise). diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index 50fcb3b072..32352d5d77 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -29,6 +29,7 @@ Strategies to return an artifact ID based on an implementation of `ArtifactIdStr `TopicIdStrategy`:: (Default) strategy that uses the topic name and `key` or `value` suffix. `SimpleTopicIdStrategy`:: Simple strategy that only uses the topic name. + [discrete] == Global ID strategy diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc index 2b21536313..fe150f6f48 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-types-serde-{context}'] -= Configuring different SerDe types += Using different client serializer/deserializer types When using a schema technology in your Kafka applications, you must choose which specific schema type to use. Common options include: @@ -12,13 +12,13 @@ When using a schema technology in your Kafka applications, you must choose which Which schema technology you choose is dependent on use case and preference. Of course you can use Kafka to implement custom serializer and deserializer classes, so you are always free to write your own classes, including leveraging {registry} functionality using the {registry} REST Java client. -For your convenience, {registry} provides out-of-the box SerDe classes for all three schema technologies. This section explains how to configure Kafka applications to use each type. - -Using one of the serializer or deserializer classes provided by {registry} in your Kafka application involves setting the correct configuration properties. Here are some simple examples of configuring producer and consumer Kafka applications. +For your convenience, {registry} provides out-of-the box SerDe classes for Avro, JSON Schema, and Protobuf schema technologies. The following sections explains how to configure Kafka applications to use each type. [discrete] -== Configuring a producer +== Kafka application configuration for serializers/deserializers +Using one of the serializer or deserializer classes provided by {registry} in your Kafka application involves setting the correct configuration properties. The following simple examples show how to configure a serializer in a Kafka producer application and how to configure a deserializer in a Kafka consumer application. +.Example serializer configuration in a Kafka producer [source,java,subs="+quotes,attributes"] ---- public Producer createKafkaProducer(String kafkaBootstrapServers, String topicName) { @@ -29,7 +29,7 @@ public Producer createKafkaProducer(String kafkaBootstrapServers, props.putIfAbsent(ProducerConfig.CLIENT_ID_CONFIG, "Producer-" + topicName); props.putIfAbsent(ProducerConfig.ACKS_CONFIG, "all"); - // Use a {registry} provided Kafka Serializer + // Use a {registry}-provided Kafka serializer props.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, io.apicurio.registry.utils.serde.AvroKafkaSerializer.class.getName()); props.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, @@ -37,9 +37,11 @@ public Producer createKafkaProducer(String kafkaBootstrapServers, // Configure {registry} location props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, REGISTRY_URL); + // Map the topic name (plus -key/value) to the artifactId in the registry props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, io.apicurio.registry.utils.serde.strategy.TopicIdStrategy.class.getName()); + // Get an existing schema or auto-register if not found props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, io.apicurio.registry.utils.serde.strategy.GetOrCreateIdStrategy.class.getName()); @@ -50,9 +52,7 @@ public Producer createKafkaProducer(String kafkaBootstrapServers, } ---- -[discrete] -== Configuring a consumer - +.Example deserializer configuration in a Kafka consumer [source,java,subs="+quotes,attributes"] ---- public Consumer createKafkaConsumer(String kafkaBootstrapServers, String topicName) { @@ -65,7 +65,7 @@ public Consumer createKafkaConsumer(String kafkaBootstrapServers, props.putIfAbsent(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); props.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - // Use a {registry} provided Kafka Deserializer + // Use a {registry}-provided Kafka deserializer props.putIfAbsent(ProducerConfig.KEY_DESERIALIZER_CLASS_CONFIG, io.apicurio.registry.utils.serde.AvroKafkaDeserializer.class.getName()); props.putIfAbsent(ProducerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, @@ -73,180 +73,13 @@ public Consumer createKafkaConsumer(String kafkaBootstrapServers, // Configure {registry} location props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, REGISTRY_URL); - // No other configuration needed for the deserializer, because the globalId of the schema - // the deserializer should use is sent as part of the message. So the deserializer simply + + // No other configuration needed for deserializer because globalId of the schema + // the deserializer uses is sent as part of the message. The deserializer simply // extracts that globalId and uses it to look up the schema from the registry. - // Create the Kafka Consumer + // Create the Kafka consumer KafkaConsumer consumer = new KafkaConsumer<>(props); return consumer; } ---- - -== Using Avro SerDe with {registry} - -{registry} provides serializer and deserializer classes for Apache Avro to make using Avro as -easy as possible. These classes are: - -* `io.apicurio.registry.utils.serde.AvroKafkaSerializer` -* `io.apicurio.registry.utils.serde.AvroKafkaDeserializer` - -=== Configuring the Avro serializer - -You can configure the Avro serializer class in the following ways: - -* {registry} location as a URL -* Artifact ID strategy -* Global ID strategy -* Global ID location -* Global ID handler -* Avro datum provider -* Avro encoding - -.Global ID location -The serializer passes the unique global ID of the schema as part of the Kafka message so that consumers can use the right schema for deserialization. The location of that global ID can be in the payload of the message or in the message headers. The default approach is to pass the global ID in the message payload. If you want the ID sent in the message headers instead, you can set the following configuration property: ----- -props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true") ----- -The property name is `apicurio.registry.use.headers`. - - -.Global ID handler -You can customize precisely how the global ID is encoded when passing it in the Kafka message body. Set -the configuration property `apicurio.registry.id-handler` to be a class that implements the -`io.apicurio.registry.utils.serde.strategy.IdHandler` interface. {registry} provides two implementations of -that interface: - -* `io.apicurio.registry.utils.serde.strategy.DefaultIdHandler` - stores the ID as an 8 byte long -* `io.apicurio.registry.utils.serde.strategy.Legacy4ByteIdHandler` - stores the ID as an 4 byte int - -{registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with other registries or serde classes) you may want to use 4 bytes when sending the ID. - -.Avro datum provider -Avro provides different datum writers and readers to write and read data. {registry} supports three different types: - -* Generic -* Specific -* Reflect - -The {registry} `AvroDatumProvider` is the abstraction on which type is then actually used, where `DefaultAvroDatumProvider` is used by default. - -There are two configuration options you can set: - -* `apicurio.registry.avro-datum-provider` - provide a fully qualified Java class name of the `AvroDatumProvider` implementation, for example `io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider` -* `apicurio.registry.use-specific-avro-reader` - true or false, to use specific type when using `DefaultAvroDatumProvider` - -.Avro encoding - -When using Apache Avro to serializer data, it is common to use the Avro binary encoding format. This is so that the data is encoded in as efficient a format as possible. However, Avro also supports encoding the data as JSON. Encoding as JSON is useful because it is much easier to inspect the payload of each message, often for logging, debugging, or other similar use cases. The {registry} Avro serializer can be configured to change the encoding to JSON from the default (binary). - -Set the Avro encoding to use by configuring the `apicurio.avro.encoding` property. The value must be either -`JSON` or `BINARY`. - -=== Configuring the Avro deserializer - -You must configure the Avro deserializer class to match the configuration settings of the serializer. As a -result, you can configure the Avro deserializer class in the following ways: - -* {registry} location as a URL -* Global ID handler -* Avro datum provider -* Avro encoding - -See the serializer documentation for the above configuration options - the property names and values are the same. - -[NOTE] -==== -The following options are not needed when configuring the deserializer: - -* Artifact ID strategy -* Global ID strategy -* Global ID location -==== - -The reason these options are not necessary is that the deserializer class can figure this information out from -the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. - -The location of that global ID is determined by the deserializer by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload using the configured handler. If the magic byte is not found, the global ID is read from the message headers. - -== Using JSON Schema SerDe with {registry} - -{registry} provides serializer and deserializer classes for JSON Schema to make using JSON Schema as easy as possible. These classes are: - -* `io.apicurio.registry.utils.serde.JsonSchemaKafkaSerializer` -* `io.apicurio.registry.utils.serde.JsonSchemaKafkaDeserializer` - -Unlike Apache Avro, JSON Schema is not actually a serialization technology - it is instead a validation -technology. As a result, configuration options for JSON Schema are quite different. For example, there is no -encoding option, because data is always encoded as JSON. - -=== Configuring the JSON Schema serializer - -You can configure the JSON Schema serializer class in the following ways: - -* {registry} location as a URL -* Artifact ID strategy -* Global ID strategy -* Validation enabled/disabled - -As you can see, the only non-standard configuration property is whether JSON Schema validation is enabled or -disabled. The validation feature is disabled by default but can be enabled by setting -`apicurio.registry.serdes.json-schema.validation-enabled` to `"true"`. For example: ----- -props.putIfAbsent(JsonSchemaSerDeConstants.REGISTRY_JSON_SCHEMA_VALIDATION_ENABLED, "true")` ----- - -=== Configuring the JSON Schema deserializer - -You can configure the JSON Schema deserializer class in the following ways: - -* {registry} location as a URL -* Validation enabled/disabled - -The deserializer is simple to configure. You must provide the location of {registry} so that the schema can be loaded. The only other configuration is whether or not to perform validation. These -configuration properties are the same as for the serializer. - -NOTE: Deserializer validation only works if the serializer passes the global ID in the Kafka message, which will only happen when validation is enabled in the serializer. - -== Using Protobuf SerDe with {registry} - -{registry} provides serializer and deserializer classes for Google Protobuf out of the box, to make using Protobuf as easy as possible. These classes are: - -* `io.apicurio.registry.utils.serde.ProtobufKafkaSerializer` -* `io.apicurio.registry.utils.serde.ProtobufKafkaDeserializer` - -=== Configuring the Protobuf serializer - -You can configure the Protobuf serializer class in the following ways: - -* {registry} location as a URL -* Artifact ID strategy -* Global ID strategy -* Global ID location -* Global ID handler - -=== Configuring the Protobuf deserializer - -You must configure the Protobuf deserializer class to match the configuration settings of the serializer. As a result, you can configure the Protobuf deserializer class in the following ways: - -* {registry} location as a URL -* Global ID handler - -See the serializer documentation these configuration options - the property names and values are the same. - -[NOTE] -==== -The following options are not needed when configuring the deserializer: - -* Artifact ID strategy -* Global ID strategy -* Global ID location -==== - -The reason these options are not necessary is that the deserializer class can figure this information out from -the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. - -The location of that global ID is determined (by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload (using the configured handler). If the magic byte is not found, the global ID is read from the message headers. - -NOTE: The Protobuf deserializer does not deserialize to your exact Protobuf Message implementation, -but rather to a `DynamicMessage` instance (because there is no appropriate API to do otherwise). diff --git a/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc b/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc index 94c5506f65..e4e5b820bd 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc @@ -8,15 +8,6 @@ If you do not already have a PostgreSQL database Operator installed, you can install a PostgreSQL Operator on your OpenShift cluster from the OperatorHub. The OperatorHub is available from the OpenShift Container Platform web console and provides an interface for cluster administrators to discover and install Operators. For more details, see the https://docs.openshift.com/container-platform/{registry-ocp-version}/operators/olm-understanding-operatorhub.html[OpenShift documentation]. -ifdef::rh-service-registry[] -[IMPORTANT] -==== -{registry} storage in a PostgreSQL database is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. - -These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview. -==== -endif::[] - .Prerequisites * You must have cluster administrator access to an OpenShift cluster. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc index 0ae08a6257..71b8e87554 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-config-consumer-{context}'] -= Using a schema from a consumer client += Using a schema from a Kafka consumer client This procedure describes how to configure a Kafka consumer client written in Java to use a schema from {registry}. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc index bfcdf27345..7b35e40598 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-config-producer-{context}'] -= Using a schema from a producer client += Using a schema from a Kafka producer client This procedure describes how to configure a Kafka producer client written in Java to use a schema from {registry}. diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc index d6d3cc0c3f..d378794aab 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc @@ -8,15 +8,6 @@ This section explains how to configure Infinispan cache-based storage for {registry} on OpenShift. This storage option is based on Infinispan community Java libraries embedded in the Quarkus-based {registry} server. You do not need to install a separate Infinispan server using this storage option. This option is suitable for development or demonstration only, and is not suitable for production environments. -ifdef::rh-service-registry[] -[IMPORTANT] -==== -{registry} storage in Infinispan is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. - -These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview. -==== -endif::[] - .Prerequisites * You must have an OpenShift cluster with cluster administrator access. * You must have already installed {registry}. See xref:installing-registry-operatorhub[]. diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc index 9d2db85884..d29d3390fb 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc @@ -8,19 +8,10 @@ This section explains how to configure Java Persistence API-based storage for {registry} on OpenShift using a PostgreSQL database Operator. You can install {registry} in an existing database or create a new database, depending on your environment. This section shows a simple example using the PostgreSQL Operator by Dev4Ddevs.com. -ifdef::rh-service-registry[] -[IMPORTANT] -==== -{registry} storage in a PostgreSQL database is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. - -These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview. -==== -endif::[] - .Prerequisites * You must have an OpenShift cluster with cluster administrator access. * You must have already installed {registry}. See xref:installing-registry-operatorhub[]. -* You must have already installed a PostgreSQL Operator on OpenShift. For examaple, see xref:installing-postgresql-operatorhub[]. +* You must have already installed a PostgreSQL Operator on OpenShift. For example, see xref:installing-postgresql-operatorhub[]. .Procedure diff --git a/docs/modules/ROOT/partials/shared/attributes-links.adoc b/docs/modules/ROOT/partials/shared/attributes-links.adoc index c1e4851362..ce3fc01355 100644 --- a/docs/modules/ROOT/partials/shared/attributes-links.adoc +++ b/docs/modules/ROOT/partials/shared/attributes-links.adoc @@ -160,16 +160,16 @@ // Debezium titles -:LinkDebeziumInstallOpenShift: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/installing_change_data_capture_on_openshift/ +:LinkDebeziumInstallOpenShift: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/installing_change_data_capture_on_openshift/ :NameDebeziumInstallOpenShift: Installing Debezium on OpenShift -:LinkDebeziumInstallRHEL: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/installing_change_data_capture_on_rhel/ +:LinkDebeziumInstallRHEL: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/installing_change_data_capture_on_rhel/ :NameDebeziumInstallRHEL: Installing Debezium on RHEL -:LinkDebeziumGettingStarted: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/getting_started_with_change_data_capture/index +:LinkDebeziumGettingStarted: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/getting_started_with_change_data_capture/index :NameDebeziumGettingStarted: Getting Started with Debezium -:LinkDebeziumUserGuide: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/debezium_user_guide/index +:LinkDebeziumUserGuide: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/debezium_user_guide/index :NameDebeziumUserGuide: Debezium User Guide // Debezium link attributes that are used upstream. Add attributes as needed. diff --git a/docs/modules/ROOT/partials/shared/attributes.adoc b/docs/modules/ROOT/partials/shared/attributes.adoc index ff112546f8..f340f3ec41 100644 --- a/docs/modules/ROOT/partials/shared/attributes.adoc +++ b/docs/modules/ROOT/partials/shared/attributes.adoc @@ -14,18 +14,19 @@ // Untagged content is common // upstream -:apicurio-registry: -:registry: Apicurio Registry -:kafka-streams: Strimzi -:registry-version: 1.3 +//:apicurio-registry: +//:registry: Apicurio Registry +//:kafka-streams: Strimzi +//:registry-version: 1.3 // downstream -//:rh-service-registry: -//:registry: Service Registry -//:kafka-streams: AMQ Streams -//:registry-version: 1.0 - -//:attachmentsdir: files +:rh-service-registry: +:registry: Service Registry +:kafka-streams: AMQ Streams +:registry-version: 1.1 +:registry-ocp-version: 4.5 +:version: 2020-Q4 +:attachmentsdir: files //integration products :fuse-version: 7.7 @@ -33,12 +34,9 @@ :3scale-version: 2.9 //common -:version: 2020-Q4 :registry-ocp-version: 4.5 :context: registry - - // Characters :copy: © :infin: ∞ @@ -48,7 +46,7 @@ :reg: ® :trade: ™ -//Include attributes for deep linking +//Include attributes for external linking include::attributes-links.adoc[] // Download URLs @@ -63,7 +61,7 @@ include::attributes-links.adoc[] :registry-rule-types: xref:artifact-and-rule-types[] :managing-registry-artifacts-ui: xref:managing-registry-artifacts-ui[] :installing-the-registry-openshift: xref:installing-registry-ocp[] -:installing-the-registry-docker: xref:installing-the-registry-docker[] +:installing-the-registry-storage-openshift: xref:installing-registry-streams-storage[] :registry-reference: xref:artifact-and-rule-types[] :managing-registry-artifacts-api: xref:managing-registry-artifacts-api[] :kafka-client-serdes: xref:using-kafka-client-serdes[]