Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v1.13.0 #66

Merged
merged 6 commits into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 4 additions & 12 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,23 +73,15 @@ BRANCH=dev sh internal/tools/sync_proto.sh

2. Update the `TestImage` value in [`qdrant_test/image_test.go`](https://github.com/qdrant/go-client/blob/master/qdrant_test/image_test.go) to `qdrant/qdrant:dev`.

3. Remove the gRPC server definitions from the auto-generated code.

There is currently [no way](https://github.com/golang/protobuf/issues/373) to skip generating Go server definitions.

You’ll need to manually delete them from [`snapshots_service_grpc.pb.go`](https://github.com/qdrant/go-client/blob/master/qdrant/snapshots_service_grpc.pb.go), [`points_service_grpc.pb.go`](https://github.com/qdrant/go-client/blob/master/qdrant/points_service.pb.go), and [`collections_service_grpc.pb.go`](https://github.com/qdrant/go-client/blob/master/qdrant/collections_service_grpc.pb.go).

Remove lines starting from comments like `// CollectionsServer is the server API for Collections service.` until the end of the file. [Here’s an example commit](https://github.com/qdrant/go-client/commit/6d04e31bb2acccf54f964a634df8930533642892).

4. Implement new Qdrant methods in [`points.go`](https://github.com/qdrant/go-client/blob/master/qdrant/points.go), [`collections.go`](https://github.com/qdrant/go-client/blob/master/qdrant/collections.go), or [`qdrant.go`](https://github.com/qdrant/go-client/blob/master/qdrant/qdrant.go) as needed and associated tests in [`qdrant_test/`](https://github.com/qdrant/go-client/tree/master/qdrant_test).
3. Implement new Qdrant methods in [`points.go`](https://github.com/qdrant/go-client/blob/master/qdrant/points.go), [`collections.go`](https://github.com/qdrant/go-client/blob/master/qdrant/collections.go), or [`qdrant.go`](https://github.com/qdrant/go-client/blob/master/qdrant/qdrant.go) as needed and associated tests in [`qdrant_test/`](https://github.com/qdrant/go-client/tree/master/qdrant_test).

Since the API reference is published at https://pkg.go.dev/github.com/qdrant/go-client, the docstrings have to be appropriate.

5. If there are any new `oneOf` properties in the proto definitions, add helper constructors to [`oneof_factory.go`](https://github.com/qdrant/go-client/blob/master/qdrant/oneof_factory.go) following the existing patterns.
4. If there are any new `oneOf` properties in the proto definitions, add helper constructors to [`oneof_factory.go`](https://github.com/qdrant/go-client/blob/master/qdrant/oneof_factory.go) following the existing patterns.

6. Run the linter, formatter and tests as per the instructions above.
5. Run the linter, formatter and tests as per the instructions above.

7. Submit your pull request and get those approvals.
6. Submit your pull request and get those approvals.

### Releasing a New Version

Expand Down
12 changes: 6 additions & 6 deletions examples/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,6 @@ func main() {
log.Fatalf("Could not get health: %v", err)
}
log.Printf("Qdrant version: %s", healthCheckResult.GetVersion())
// Delete collection
err = client.DeleteCollection(ctx, collectionName)
if err != nil {
log.Fatalf("Could not delete collection: %v", err)
}
log.Println("Collection", collectionName, "deleted")
// Create collection
err = client.CreateCollection(ctx, &qdrant.CreateCollection{
CollectionName: collectionName,
Expand Down Expand Up @@ -151,4 +145,10 @@ func main() {
log.Fatalf("Could not search points: %v", err)
}
log.Printf("Found points: %s", filteredPoints)
// Delete collection
err = client.DeleteCollection(ctx, collectionName)
if err != nil {
log.Fatalf("Could not delete collection: %v", err)
}
log.Println("Collection", collectionName, "deleted")
}
45 changes: 39 additions & 6 deletions internal/proto/collections.proto
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,17 @@ enum CompressionRatio {
x64 = 4;
}

message MaxOptimizationThreads {
enum Setting {
Auto = 0;
}

oneof variant {
uint64 value = 1;
Setting setting = 2;
}
}

message OptimizerStatus {
bool ok = 1;
string error = 2;
Expand Down Expand Up @@ -236,7 +247,7 @@ message OptimizersConfigDiff {
optional uint64 max_segment_size = 4;
/*
Maximum size (in kilobytes) of vectors to store in-memory per segment.
Segments larger than this threshold will be stored as read-only memmaped file.
Segments larger than this threshold will be stored as read-only memmapped file.

Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.

Expand All @@ -259,13 +270,17 @@ message OptimizersConfigDiff {
Interval between forced flushes.
*/
optional uint64 flush_interval_sec = 7;

// Deprecated in favor of `max_optimization_threads`
optional uint64 deprecated_max_optimization_threads = 8;

/*
Max number of threads (jobs) for running optimizations per shard.
Note: each optimization job will also use `max_indexing_threads` threads by itself for index building.
If null - have no limit and choose dynamically to saturate CPU.
If "auto" - have no limit and choose dynamically to saturate CPU.
If 0 - no optimization threads, optimizations will be disabled.
*/
optional uint64 max_optimization_threads = 8;
optional MaxOptimizationThreads max_optimization_threads = 9;
}

message ScalarQuantization {
Expand Down Expand Up @@ -319,6 +334,13 @@ message StrictModeConfig {
optional uint32 search_max_hnsw_ef = 6;
optional bool search_allow_exact = 7;
optional float search_max_oversampling = 8;
optional uint64 upsert_max_batchsize = 9;
optional uint64 max_collection_vector_size_bytes = 10;
optional uint32 read_rate_limit = 11; // Max number of read operations per minute per replica
optional uint32 write_rate_limit = 12; // Max number of write operations per minute per replica
optional uint64 max_collection_payload_size_bytes = 13;
optional uint64 filter_max_conditions = 14;
optional uint64 condition_max_size = 15;
}

message CreateCollection {
Expand Down Expand Up @@ -350,6 +372,7 @@ message UpdateCollection {
optional VectorsConfigDiff vectors_config = 6; // New vector parameters
optional QuantizationConfigDiff quantization_config = 7; // Quantization configuration of vector
optional SparseVectorConfig sparse_vectors_config = 8; // New sparse vector parameters
optional StrictModeConfig strict_mode_config = 9; // New strict mode configuration
}

message DeleteCollection {
Expand Down Expand Up @@ -429,6 +452,7 @@ message TextIndexParams {
}

message BoolIndexParams {
optional bool on_disk = 1; // If true - store index on disk.
}

message DatetimeIndexParams {
Expand Down Expand Up @@ -529,7 +553,8 @@ enum ReplicaState {
Listener = 4; // A shard which receives data, but is not used for search; Useful for backup shards
PartialSnapshot = 5; // Deprecated: snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard
Recovery = 6; // Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true
Resharding = 7; // Points are being migrated to this shard as part of resharding
Resharding = 7; // Points are being migrated to this shard as part of scale-up resharding
ReshardingScaleDown = 8; // Points are being migrated to this shard as part of scale-down resharding
}

message ShardKey {
Expand Down Expand Up @@ -565,6 +590,15 @@ message ReshardingInfo {
uint32 shard_id = 1;
uint64 peer_id = 2;
optional ShardKey shard_key = 3;
ReshardingDirection direction = 4;
}

/*
Resharding direction, scale up or down in number of shards
*/
enum ReshardingDirection {
Up = 0; // Scale up, add a new shard
Down = 1; // Scale down, remove a shard
}

message CollectionClusterInfoResponse {
Expand All @@ -573,8 +607,7 @@ message CollectionClusterInfoResponse {
repeated LocalShardInfo local_shards = 3; // Local shards
repeated RemoteShardInfo remote_shards = 4; // Remote shards
repeated ShardTransferInfo shard_transfers = 5; // Shard transfers
// TODO(resharding): enable on release:
// repeated ReshardingInfo resharding_operations = 6; // Resharding operations
repeated ReshardingInfo resharding_operations = 6; // Resharding operations
}

message MoveShard {
Expand Down
90 changes: 85 additions & 5 deletions internal/proto/points.proto
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,48 @@ message SparseIndices {
repeated uint32 data = 1;
}

message Document {
string text = 1; // Text of the document
string model = 3; // Model name
map<string, Value> options = 4; // Model options
}

message Image {
Value image = 1; // Image data, either base64 encoded or URL
string model = 2; // Model name
map<string, Value> options = 3; // Model options
}

message InferenceObject {
Value object = 1; // Object to infer
string model = 2; // Model name
map<string, Value> options = 3; // Model options
}

// Legacy vector format, which determines the vector type by the configuration of its fields.
message Vector {
repeated float data = 1; // Vector data (flatten for multi vectors)
optional SparseIndices indices = 2; // Sparse indices for sparse vectors
optional uint32 vectors_count = 3; // Number of vectors per multi vector
repeated float data = 1; // Vector data (flatten for multi vectors), deprecated
optional SparseIndices indices = 2; // Sparse indices for sparse vectors, deprecated
optional uint32 vectors_count = 3; // Number of vectors per multi vector, deprecated
oneof vector {
DenseVector dense = 101; // Dense vector
SparseVector sparse = 102; // Sparse vector
MultiDenseVector multi_dense = 103; // Multi dense vector
Document document = 104;
Image image = 105;
InferenceObject object = 106;
}
}

message VectorOutput {
repeated float data = 1; // Vector data (flatten for multi vectors), deprecated
optional SparseIndices indices = 2; // Sparse indices for sparse vectors, deprecated
optional uint32 vectors_count = 3; // Number of vectors per multi vector, deprecated
oneof vector {
DenseVector dense = 101; // Dense vector
SparseVector sparse = 102; // Sparse vector
MultiDenseVector multi_dense = 103; // Multi dense vector
}
}

message DenseVector {
Expand All @@ -72,6 +109,9 @@ message VectorInput {
DenseVector dense = 2;
SparseVector sparse = 3;
MultiDenseVector multi_dense = 4;
Document document = 5;
Image image = 6;
InferenceObject object = 7;
}
}

Expand Down Expand Up @@ -213,13 +253,24 @@ message NamedVectors {
map<string, Vector> vectors = 1;
}

message NamedVectorsOutput {
map<string, VectorOutput> vectors = 1;
}

message Vectors {
oneof vectors_options {
Vector vector = 1;
NamedVectors vectors = 2;
}
}

message VectorsOutput {
oneof vectors_options {
VectorOutput vector = 1;
NamedVectorsOutput vectors = 2;
}
}

message VectorsSelector {
repeated string names = 1; // List of vectors to include into result
}
Expand Down Expand Up @@ -736,7 +787,7 @@ message ScoredPoint {
float score = 3; // Similarity score
reserved 4; // deprecated "vector" field
uint64 version = 5; // Last update operation applied to this point
optional Vectors vectors = 6; // Vectors to search
optional VectorsOutput vectors = 6; // Vectors to search
optional ShardKey shard_key = 7; // Shard key
optional OrderValue order_value = 8; // Order by value
}
Expand Down Expand Up @@ -765,21 +816,25 @@ message GroupsResult {
message SearchResponse {
repeated ScoredPoint result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message QueryResponse {
repeated ScoredPoint result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message QueryBatchResponse {
repeated BatchResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message QueryGroupsResponse {
GroupsResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message BatchResult {
Expand All @@ -789,16 +844,19 @@ message BatchResult {
message SearchBatchResponse {
repeated BatchResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message SearchGroupsResponse {
GroupsResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message CountResponse {
CountResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message ScrollResponse {
Expand All @@ -815,7 +873,7 @@ message RetrievedPoint {
PointId id = 1;
map<string, Value> payload = 2;
reserved 3; // deprecated "vector" field
optional Vectors vectors = 4;
optional VectorsOutput vectors = 4;
optional ShardKey shard_key = 5; // Shard key
optional OrderValue order_value = 6; // Order-by value
}
Expand All @@ -828,26 +886,31 @@ message GetResponse {
message RecommendResponse {
repeated ScoredPoint result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message RecommendBatchResponse {
repeated BatchResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message DiscoverResponse {
repeated ScoredPoint result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message DiscoverBatchResponse {
repeated BatchResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message RecommendGroupsResponse {
GroupsResult result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message UpdateBatchResponse {
Expand All @@ -863,11 +926,13 @@ message FacetResponse {
message SearchMatrixPairsResponse {
SearchMatrixPairs result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

message SearchMatrixOffsetsResponse {
SearchMatrixOffsets result = 1;
double time = 2; // Time spent to process
optional HardwareUsage usage = 3;
}

// ---------------------------------------------
Expand All @@ -894,6 +959,7 @@ message Condition {
Filter filter = 4;
IsNullCondition is_null = 5;
NestedCondition nested = 6;
HasVectorCondition has_vector = 7;
}
}

Expand All @@ -909,6 +975,10 @@ message HasIdCondition {
repeated PointId has_id = 1;
}

message HasVectorCondition {
string has_vector = 1;
}

message NestedCondition {
string key = 1; // Path to nested object
Filter filter = 2; // Filter condition
Expand Down Expand Up @@ -1020,3 +1090,13 @@ message GeoPoint {
double lon = 1;
double lat = 2;
}

// ---------------------------------------------
// ------------ Hardware measurements ----------
// ---------------------------------------------

message HardwareUsage {
uint64 cpu = 1;
uint64 io_read = 2;
uint64 io_write = 3;
}
Loading
Loading