From 62ef2cd8a39fc93e7fa4bb790d7cd92adb77571f Mon Sep 17 00:00:00 2001 From: Wes McKinney Date: Thu, 20 Jul 2017 12:58:35 -0400 Subject: [PATCH 01/16] [C++] Remove Plasma source tree for 0.5.0 release pending IP Clearance Change-Id: I1244ada7297d2f6f567d679452d21a26b91c0360 --- cpp/src/plasma/CMakeLists.txt | 113 - cpp/src/plasma/client.cc | 557 -- cpp/src/plasma/client.h | 343 -- cpp/src/plasma/common.cc | 83 - cpp/src/plasma/common.h | 63 - cpp/src/plasma/events.cc | 81 - cpp/src/plasma/events.h | 99 - cpp/src/plasma/eviction_policy.cc | 107 - cpp/src/plasma/eviction_policy.h | 134 - cpp/src/plasma/extension.cc | 456 -- cpp/src/plasma/extension.h | 50 - cpp/src/plasma/fling.cc | 90 - cpp/src/plasma/fling.h | 52 - cpp/src/plasma/format/.gitignore | 1 - cpp/src/plasma/format/common.fbs | 34 - cpp/src/plasma/format/plasma.fbs | 291 - cpp/src/plasma/io.cc | 212 - cpp/src/plasma/io.h | 55 - cpp/src/plasma/malloc.cc | 178 - cpp/src/plasma/malloc.h | 26 - cpp/src/plasma/plasma.cc | 64 - cpp/src/plasma/plasma.h | 191 - cpp/src/plasma/protocol.cc | 502 -- cpp/src/plasma/protocol.h | 170 - cpp/src/plasma/store.cc | 683 --- cpp/src/plasma/store.h | 169 - cpp/src/plasma/test/client_tests.cc | 132 - cpp/src/plasma/test/run_tests.sh | 61 - cpp/src/plasma/test/run_valgrind.sh | 27 - cpp/src/plasma/test/serialization_tests.cc | 388 -- cpp/src/plasma/thirdparty/ae/ae.c | 465 -- cpp/src/plasma/thirdparty/ae/ae.h | 123 - cpp/src/plasma/thirdparty/ae/ae_epoll.c | 135 - cpp/src/plasma/thirdparty/ae/ae_evport.c | 320 - cpp/src/plasma/thirdparty/ae/ae_kqueue.c | 138 - cpp/src/plasma/thirdparty/ae/ae_select.c | 106 - cpp/src/plasma/thirdparty/ae/config.h | 54 - cpp/src/plasma/thirdparty/ae/zmalloc.h | 45 - cpp/src/plasma/thirdparty/dlmalloc.c | 6281 -------------------- cpp/src/plasma/thirdparty/xxhash.cc | 889 --- cpp/src/plasma/thirdparty/xxhash.h | 293 - 41 files changed, 14261 deletions(-) delete mode 100644 cpp/src/plasma/CMakeLists.txt delete mode 100644 cpp/src/plasma/client.cc delete mode 100644 cpp/src/plasma/client.h delete mode 100644 cpp/src/plasma/common.cc delete mode 100644 cpp/src/plasma/common.h delete mode 100644 cpp/src/plasma/events.cc delete mode 100644 cpp/src/plasma/events.h delete mode 100644 cpp/src/plasma/eviction_policy.cc delete mode 100644 cpp/src/plasma/eviction_policy.h delete mode 100644 cpp/src/plasma/extension.cc delete mode 100644 cpp/src/plasma/extension.h delete mode 100644 cpp/src/plasma/fling.cc delete mode 100644 cpp/src/plasma/fling.h delete mode 100644 cpp/src/plasma/format/.gitignore delete mode 100644 cpp/src/plasma/format/common.fbs delete mode 100644 cpp/src/plasma/format/plasma.fbs delete mode 100644 cpp/src/plasma/io.cc delete mode 100644 cpp/src/plasma/io.h delete mode 100644 cpp/src/plasma/malloc.cc delete mode 100644 cpp/src/plasma/malloc.h delete mode 100644 cpp/src/plasma/plasma.cc delete mode 100644 cpp/src/plasma/plasma.h delete mode 100644 cpp/src/plasma/protocol.cc delete mode 100644 cpp/src/plasma/protocol.h delete mode 100644 cpp/src/plasma/store.cc delete mode 100644 cpp/src/plasma/store.h delete mode 100644 cpp/src/plasma/test/client_tests.cc delete mode 100644 cpp/src/plasma/test/run_tests.sh delete mode 100644 cpp/src/plasma/test/run_valgrind.sh delete mode 100644 cpp/src/plasma/test/serialization_tests.cc delete mode 100644 cpp/src/plasma/thirdparty/ae/ae.c delete mode 100644 cpp/src/plasma/thirdparty/ae/ae.h delete mode 100644 cpp/src/plasma/thirdparty/ae/ae_epoll.c delete mode 100644 cpp/src/plasma/thirdparty/ae/ae_evport.c delete mode 100644 cpp/src/plasma/thirdparty/ae/ae_kqueue.c delete mode 100644 cpp/src/plasma/thirdparty/ae/ae_select.c delete mode 100644 cpp/src/plasma/thirdparty/ae/config.h delete mode 100644 cpp/src/plasma/thirdparty/ae/zmalloc.h delete mode 100644 cpp/src/plasma/thirdparty/dlmalloc.c delete mode 100644 cpp/src/plasma/thirdparty/xxhash.cc delete mode 100644 cpp/src/plasma/thirdparty/xxhash.h diff --git a/cpp/src/plasma/CMakeLists.txt b/cpp/src/plasma/CMakeLists.txt deleted file mode 100644 index 4ff3beba779c2..0000000000000 --- a/cpp/src/plasma/CMakeLists.txt +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -cmake_minimum_required(VERSION 2.8) - -project(plasma) - -find_package(PythonLibsNew REQUIRED) -find_package(Threads) - -option(PLASMA_PYTHON - "Build the Plasma Python extensions" - OFF) - -if(APPLE) - SET(CMAKE_SHARED_LIBRARY_SUFFIX ".so") -endif(APPLE) - -include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS}) -include_directories("${FLATBUFFERS_INCLUDE_DIR}" "${CMAKE_CURRENT_LIST_DIR}/" "${CMAKE_CURRENT_LIST_DIR}/thirdparty/" "${CMAKE_CURRENT_LIST_DIR}/../") - -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_XOPEN_SOURCE=500 -D_POSIX_C_SOURCE=200809L") - -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-conversion") - -# Compile flatbuffers - -set(PLASMA_FBS_SRC "${CMAKE_CURRENT_LIST_DIR}/format/plasma.fbs" "${CMAKE_CURRENT_LIST_DIR}/format/common.fbs") -set(OUTPUT_DIR ${CMAKE_CURRENT_LIST_DIR}/format/) - -set(PLASMA_FBS_OUTPUT_FILES - "${OUTPUT_DIR}/common_generated.h" - "${OUTPUT_DIR}/plasma_generated.h") - -add_custom_target(gen_plasma_fbs DEPENDS ${PLASMA_FBS_OUTPUT_FILES}) - -if(FLATBUFFERS_VENDORED) - add_dependencies(gen_plasma_fbs flatbuffers_ep) -endif() - -add_custom_command( - OUTPUT ${PLASMA_FBS_OUTPUT_FILES} - # The --gen-object-api flag generates a C++ class MessageT for each - # flatbuffers message Message, which can be used to store deserialized - # messages in data structures. This is currently used for ObjectInfo for - # example. - COMMAND ${FLATBUFFERS_COMPILER} -c -o ${OUTPUT_DIR} ${PLASMA_FBS_SRC} --gen-object-api - DEPENDS ${PLASMA_FBS_SRC} - COMMENT "Running flatc compiler on ${PLASMA_FBS_SRC}" - VERBATIM) - -if(UNIX AND NOT APPLE) - link_libraries(rt) -endif() - -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") - -set_source_files_properties(extension.cc PROPERTIES COMPILE_FLAGS -Wno-strict-aliasing) - -set(PLASMA_SRCS - client.cc - common.cc - eviction_policy.cc - events.cc - fling.cc - io.cc - malloc.cc - plasma.cc - protocol.cc - thirdparty/ae/ae.c - thirdparty/xxhash.cc) - -ADD_ARROW_LIB(plasma - SOURCES ${PLASMA_SRCS} - DEPENDENCIES gen_plasma_fbs - SHARED_LINK_LIBS ${FLATBUFFERS_STATIC_LIB} ${CMAKE_THREAD_LIBS_INIT} arrow_static - STATIC_LINK_LIBS ${FLATBUFFERS_STATIC_LIB} ${CMAKE_THREAD_LIBS_INIT} arrow_static) - -# The optimization flag -O3 is suggested by dlmalloc.c, which is #included in -# malloc.cc; we set it here regardless of whether we do a debug or release build. -set_source_files_properties(malloc.cc PROPERTIES COMPILE_FLAGS "-Wno-error -O3") - -add_executable(plasma_store store.cc) -target_link_libraries(plasma_store plasma_static) - -ADD_ARROW_TEST(test/serialization_tests) -ARROW_TEST_LINK_LIBRARIES(test/serialization_tests plasma_static) -ADD_ARROW_TEST(test/client_tests) -ARROW_TEST_LINK_LIBRARIES(test/client_tests plasma_static) - -if(PLASMA_PYTHON) - add_library(plasma_extension SHARED extension.cc) - - if(APPLE) - target_link_libraries(plasma_extension plasma_static "-undefined dynamic_lookup") - else(APPLE) - target_link_libraries(plasma_extension plasma_static -Wl,--whole-archive ${FLATBUFFERS_STATIC_LIB} -Wl,--no-whole-archive) - endif(APPLE) -endif() diff --git a/cpp/src/plasma/client.cc b/cpp/src/plasma/client.cc deleted file mode 100644 index dcb78e7ec52c6..0000000000000 --- a/cpp/src/plasma/client.cc +++ /dev/null @@ -1,557 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// PLASMA CLIENT: Client library for using the plasma store and manager - -#include "plasma/client.h" - -#ifdef _WIN32 -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "plasma/common.h" -#include "plasma/fling.h" -#include "plasma/io.h" -#include "plasma/plasma.h" -#include "plasma/protocol.h" - -#define XXH_STATIC_LINKING_ONLY -#include "thirdparty/xxhash.h" - -#define XXH64_DEFAULT_SEED 0 - -// Number of threads used for memcopy and hash computations. -constexpr int64_t kThreadPoolSize = 8; -constexpr int64_t kBytesInMB = 1 << 20; -static std::vector threadpool_(kThreadPoolSize); - -// If the file descriptor fd has been mmapped in this client process before, -// return the pointer that was returned by mmap, otherwise mmap it and store the -// pointer in a hash table. -uint8_t* PlasmaClient::lookup_or_mmap(int fd, int store_fd_val, int64_t map_size) { - auto entry = mmap_table_.find(store_fd_val); - if (entry != mmap_table_.end()) { - close(fd); - return entry->second.pointer; - } else { - uint8_t* result = reinterpret_cast( - mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)); - // TODO(pcm): Don't fail here, instead return a Status. - if (result == MAP_FAILED) { ARROW_LOG(FATAL) << "mmap failed"; } - close(fd); - ClientMmapTableEntry& entry = mmap_table_[store_fd_val]; - entry.pointer = result; - entry.length = map_size; - entry.count = 0; - return result; - } -} - -// Get a pointer to a file that we know has been memory mapped in this client -// process before. -uint8_t* PlasmaClient::lookup_mmapped_file(int store_fd_val) { - auto entry = mmap_table_.find(store_fd_val); - ARROW_CHECK(entry != mmap_table_.end()); - return entry->second.pointer; -} - -void PlasmaClient::increment_object_count( - const ObjectID& object_id, PlasmaObject* object, bool is_sealed) { - // Increment the count of the object to track the fact that it is being used. - // The corresponding decrement should happen in PlasmaClient::Release. - auto elem = objects_in_use_.find(object_id); - ObjectInUseEntry* object_entry; - if (elem == objects_in_use_.end()) { - // Add this object ID to the hash table of object IDs in use. The - // corresponding call to free happens in PlasmaClient::Release. - objects_in_use_[object_id] = - std::unique_ptr(new ObjectInUseEntry()); - objects_in_use_[object_id]->object = *object; - objects_in_use_[object_id]->count = 0; - objects_in_use_[object_id]->is_sealed = is_sealed; - object_entry = objects_in_use_[object_id].get(); - // Increment the count of the number of objects in the memory-mapped file - // that are being used. The corresponding decrement should happen in - // PlasmaClient::Release. - auto entry = mmap_table_.find(object->handle.store_fd); - ARROW_CHECK(entry != mmap_table_.end()); - ARROW_CHECK(entry->second.count >= 0); - // Update the in_use_object_bytes_. - in_use_object_bytes_ += - (object_entry->object.data_size + object_entry->object.metadata_size); - entry->second.count += 1; - } else { - object_entry = elem->second.get(); - ARROW_CHECK(object_entry->count > 0); - } - // Increment the count of the number of instances of this object that are - // being used by this client. The corresponding decrement should happen in - // PlasmaClient::Release. - object_entry->count += 1; -} - -Status PlasmaClient::Create(const ObjectID& object_id, int64_t data_size, - uint8_t* metadata, int64_t metadata_size, uint8_t** data) { - ARROW_LOG(DEBUG) << "called plasma_create on conn " << store_conn_ << " with size " - << data_size << " and metadata size " << metadata_size; - RETURN_NOT_OK(SendCreateRequest(store_conn_, object_id, data_size, metadata_size)); - std::vector buffer; - RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaCreateReply, &buffer)); - ObjectID id; - PlasmaObject object; - RETURN_NOT_OK(ReadCreateReply(buffer.data(), &id, &object)); - // If the CreateReply included an error, then the store will not send a file - // descriptor. - int fd = recv_fd(store_conn_); - ARROW_CHECK(fd >= 0) << "recv not successful"; - ARROW_CHECK(object.data_size == data_size); - ARROW_CHECK(object.metadata_size == metadata_size); - // The metadata should come right after the data. - ARROW_CHECK(object.metadata_offset == object.data_offset + data_size); - *data = lookup_or_mmap(fd, object.handle.store_fd, object.handle.mmap_size) + - object.data_offset; - // If plasma_create is being called from a transfer, then we will not copy the - // metadata here. The metadata will be written along with the data streamed - // from the transfer. - if (metadata != NULL) { - // Copy the metadata to the buffer. - memcpy(*data + object.data_size, metadata, metadata_size); - } - // Increment the count of the number of instances of this object that this - // client is using. A call to PlasmaClient::Release is required to decrement - // this - // count. Cache the reference to the object. - increment_object_count(object_id, &object, false); - // We increment the count a second time (and the corresponding decrement will - // happen in a PlasmaClient::Release call in plasma_seal) so even if the - // buffer - // returned by PlasmaClient::Dreate goes out of scope, the object does not get - // released before the call to PlasmaClient::Seal happens. - increment_object_count(object_id, &object, false); - return Status::OK(); -} - -Status PlasmaClient::Get(const ObjectID* object_ids, int64_t num_objects, - int64_t timeout_ms, ObjectBuffer* object_buffers) { - // Fill out the info for the objects that are already in use locally. - bool all_present = true; - for (int i = 0; i < num_objects; ++i) { - auto object_entry = objects_in_use_.find(object_ids[i]); - if (object_entry == objects_in_use_.end()) { - // This object is not currently in use by this client, so we need to send - // a request to the store. - all_present = false; - // Make a note to ourselves that the object is not present. - object_buffers[i].data_size = -1; - } else { - // NOTE: If the object is still unsealed, we will deadlock, since we must - // have been the one who created it. - ARROW_CHECK(object_entry->second->is_sealed) - << "Plasma client called get on an unsealed object that it created"; - PlasmaObject* object = &object_entry->second->object; - object_buffers[i].data = lookup_mmapped_file(object->handle.store_fd); - object_buffers[i].data = object_buffers[i].data + object->data_offset; - object_buffers[i].data_size = object->data_size; - object_buffers[i].metadata = object_buffers[i].data + object->data_size; - object_buffers[i].metadata_size = object->metadata_size; - // Increment the count of the number of instances of this object that this - // client is using. A call to PlasmaClient::Release is required to - // decrement this - // count. Cache the reference to the object. - increment_object_count(object_ids[i], object, true); - } - } - - if (all_present) { return Status::OK(); } - - // If we get here, then the objects aren't all currently in use by this - // client, so we need to send a request to the plasma store. - RETURN_NOT_OK(SendGetRequest(store_conn_, object_ids, num_objects, timeout_ms)); - std::vector buffer; - RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaGetReply, &buffer)); - std::vector received_object_ids(num_objects); - std::vector object_data(num_objects); - PlasmaObject* object; - RETURN_NOT_OK(ReadGetReply( - buffer.data(), received_object_ids.data(), object_data.data(), num_objects)); - - for (int i = 0; i < num_objects; ++i) { - DCHECK(received_object_ids[i] == object_ids[i]); - object = &object_data[i]; - if (object_buffers[i].data_size != -1) { - // If the object was already in use by the client, then the store should - // have returned it. - DCHECK_NE(object->data_size, -1); - // We won't use this file descriptor, but the store sent us one, so we - // need to receive it and then close it right away so we don't leak file - // descriptors. - int fd = recv_fd(store_conn_); - close(fd); - ARROW_CHECK(fd >= 0); - // We've already filled out the information for this object, so we can - // just continue. - continue; - } - // If we are here, the object was not currently in use, so we need to - // process the reply from the object store. - if (object->data_size != -1) { - // The object was retrieved. The user will be responsible for releasing - // this object. - int fd = recv_fd(store_conn_); - ARROW_CHECK(fd >= 0); - object_buffers[i].data = - lookup_or_mmap(fd, object->handle.store_fd, object->handle.mmap_size); - // Finish filling out the return values. - object_buffers[i].data = object_buffers[i].data + object->data_offset; - object_buffers[i].data_size = object->data_size; - object_buffers[i].metadata = object_buffers[i].data + object->data_size; - object_buffers[i].metadata_size = object->metadata_size; - // Increment the count of the number of instances of this object that this - // client is using. A call to PlasmaClient::Release is required to - // decrement this - // count. Cache the reference to the object. - increment_object_count(received_object_ids[i], object, true); - } else { - // The object was not retrieved. Make sure we already put a -1 here to - // indicate that the object was not retrieved. The caller is not - // responsible for releasing this object. - DCHECK_EQ(object_buffers[i].data_size, -1); - object_buffers[i].data_size = -1; - } - } - return Status::OK(); -} - -/// This is a helper method for implementing plasma_release. We maintain a -/// buffer -/// of release calls and only perform them once the buffer becomes full (as -/// judged by the aggregate sizes of the objects). There may be multiple release -/// calls for the same object ID in the buffer. In this case, the first release -/// calls will not do anything. The client will only send a message to the store -/// releasing the object when the client is truly done with the object. -/// -/// @param conn The plasma connection. -/// @param object_id The object ID to attempt to release. -Status PlasmaClient::PerformRelease(const ObjectID& object_id) { - // Decrement the count of the number of instances of this object that are - // being used by this client. The corresponding increment should have happened - // in PlasmaClient::Get. - auto object_entry = objects_in_use_.find(object_id); - ARROW_CHECK(object_entry != objects_in_use_.end()); - object_entry->second->count -= 1; - ARROW_CHECK(object_entry->second->count >= 0); - // Check if the client is no longer using this object. - if (object_entry->second->count == 0) { - // Decrement the count of the number of objects in this memory-mapped file - // that the client is using. The corresponding increment should have - // happened in plasma_get. - int fd = object_entry->second->object.handle.store_fd; - auto entry = mmap_table_.find(fd); - ARROW_CHECK(entry != mmap_table_.end()); - entry->second.count -= 1; - ARROW_CHECK(entry->second.count >= 0); - // If none are being used then unmap the file. - if (entry->second.count == 0) { - munmap(entry->second.pointer, entry->second.length); - // Remove the corresponding entry from the hash table. - mmap_table_.erase(fd); - } - // Tell the store that the client no longer needs the object. - RETURN_NOT_OK(SendReleaseRequest(store_conn_, object_id)); - // Update the in_use_object_bytes_. - in_use_object_bytes_ -= (object_entry->second->object.data_size + - object_entry->second->object.metadata_size); - DCHECK_GE(in_use_object_bytes_, 0); - // Remove the entry from the hash table of objects currently in use. - objects_in_use_.erase(object_id); - } - return Status::OK(); -} - -Status PlasmaClient::Release(const ObjectID& object_id) { - // Add the new object to the release history. - release_history_.push_front(object_id); - // If there are too many bytes in use by the client or if there are too many - // pending release calls, and there are at least some pending release calls in - // the release_history list, then release some objects. - while ((in_use_object_bytes_ > std::min(kL3CacheSizeBytes, store_capacity_ / 100) || - release_history_.size() > config_.release_delay) && - release_history_.size() > 0) { - // Perform a release for the object ID for the first pending release. - RETURN_NOT_OK(PerformRelease(release_history_.back())); - // Remove the last entry from the release history. - release_history_.pop_back(); - } - return Status::OK(); -} - -// This method is used to query whether the plasma store contains an object. -Status PlasmaClient::Contains(const ObjectID& object_id, bool* has_object) { - // Check if we already have a reference to the object. - if (objects_in_use_.count(object_id) > 0) { - *has_object = 1; - } else { - // If we don't already have a reference to the object, check with the store - // to see if we have the object. - RETURN_NOT_OK(SendContainsRequest(store_conn_, object_id)); - std::vector buffer; - RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaContainsReply, &buffer)); - ObjectID object_id2; - RETURN_NOT_OK(ReadContainsReply(buffer.data(), &object_id2, has_object)); - } - return Status::OK(); -} - -static void ComputeBlockHash(const unsigned char* data, int64_t nbytes, uint64_t* hash) { - XXH64_state_t hash_state; - XXH64_reset(&hash_state, XXH64_DEFAULT_SEED); - XXH64_update(&hash_state, data, nbytes); - *hash = XXH64_digest(&hash_state); -} - -static inline bool compute_object_hash_parallel( - XXH64_state_t* hash_state, const unsigned char* data, int64_t nbytes) { - // Note that this function will likely be faster if the address of data is - // aligned on a 64-byte boundary. - const int num_threads = kThreadPoolSize; - uint64_t threadhash[num_threads + 1]; - const uint64_t data_address = reinterpret_cast(data); - const uint64_t num_blocks = nbytes / BLOCK_SIZE; - const uint64_t chunk_size = (num_blocks / num_threads) * BLOCK_SIZE; - const uint64_t right_address = data_address + chunk_size * num_threads; - const uint64_t suffix = (data_address + nbytes) - right_address; - // Now the data layout is | k * num_threads * block_size | suffix | == - // | num_threads * chunk_size | suffix |, where chunk_size = k * block_size. - // Each thread gets a "chunk" of k blocks, except the suffix thread. - - for (int i = 0; i < num_threads; i++) { - threadpool_[i] = std::thread(ComputeBlockHash, - reinterpret_cast(data_address) + i * chunk_size, chunk_size, - &threadhash[i]); - } - ComputeBlockHash( - reinterpret_cast(right_address), suffix, &threadhash[num_threads]); - - // Join the threads. - for (auto& t : threadpool_) { - if (t.joinable()) { t.join(); } - } - - XXH64_update(hash_state, (unsigned char*)threadhash, sizeof(threadhash)); - return true; -} - -static uint64_t compute_object_hash(const ObjectBuffer& obj_buffer) { - XXH64_state_t hash_state; - XXH64_reset(&hash_state, XXH64_DEFAULT_SEED); - if (obj_buffer.data_size >= kBytesInMB) { - compute_object_hash_parallel( - &hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size); - } else { - XXH64_update(&hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size); - } - XXH64_update( - &hash_state, (unsigned char*)obj_buffer.metadata, obj_buffer.metadata_size); - return XXH64_digest(&hash_state); -} - -bool plasma_compute_object_hash( - PlasmaClient* conn, ObjectID object_id, unsigned char* digest) { - // Get the plasma object data. We pass in a timeout of 0 to indicate that - // the operation should timeout immediately. - ObjectBuffer object_buffer; - ARROW_CHECK_OK(conn->Get(&object_id, 1, 0, &object_buffer)); - // If the object was not retrieved, return false. - if (object_buffer.data_size == -1) { return false; } - // Compute the hash. - uint64_t hash = compute_object_hash(object_buffer); - memcpy(digest, &hash, sizeof(hash)); - // Release the plasma object. - ARROW_CHECK_OK(conn->Release(object_id)); - return true; -} - -Status PlasmaClient::Seal(const ObjectID& object_id) { - // Make sure this client has a reference to the object before sending the - // request to Plasma. - auto object_entry = objects_in_use_.find(object_id); - ARROW_CHECK(object_entry != objects_in_use_.end()) - << "Plasma client called seal an object without a reference to it"; - ARROW_CHECK(!object_entry->second->is_sealed) - << "Plasma client called seal an already sealed object"; - object_entry->second->is_sealed = true; - /// Send the seal request to Plasma. - static unsigned char digest[kDigestSize]; - ARROW_CHECK(plasma_compute_object_hash(this, object_id, &digest[0])); - RETURN_NOT_OK(SendSealRequest(store_conn_, object_id, &digest[0])); - // We call PlasmaClient::Release to decrement the number of instances of this - // object - // that are currently being used by this client. The corresponding increment - // happened in plasma_create and was used to ensure that the object was not - // released before the call to PlasmaClient::Seal. - return Release(object_id); -} - -Status PlasmaClient::Delete(const ObjectID& object_id) { - // TODO(rkn): In the future, we can use this method to give hints to the - // eviction policy about when an object will no longer be needed. - return Status::NotImplemented("PlasmaClient::Delete is not implemented."); -} - -Status PlasmaClient::Evict(int64_t num_bytes, int64_t& num_bytes_evicted) { - // Send a request to the store to evict objects. - RETURN_NOT_OK(SendEvictRequest(store_conn_, num_bytes)); - // Wait for a response with the number of bytes actually evicted. - std::vector buffer; - int64_t type; - RETURN_NOT_OK(ReadMessage(store_conn_, &type, &buffer)); - return ReadEvictReply(buffer.data(), num_bytes_evicted); -} - -Status PlasmaClient::Subscribe(int* fd) { - int sock[2]; - // Create a non-blocking socket pair. This will only be used to send - // notifications from the Plasma store to the client. - socketpair(AF_UNIX, SOCK_STREAM, 0, sock); - // Make the socket non-blocking. - int flags = fcntl(sock[1], F_GETFL, 0); - ARROW_CHECK(fcntl(sock[1], F_SETFL, flags | O_NONBLOCK) == 0); - // Tell the Plasma store about the subscription. - RETURN_NOT_OK(SendSubscribeRequest(store_conn_)); - // Send the file descriptor that the Plasma store should use to push - // notifications about sealed objects to this client. - ARROW_CHECK(send_fd(store_conn_, sock[1]) >= 0); - close(sock[1]); - // Return the file descriptor that the client should use to read notifications - // about sealed objects. - *fd = sock[0]; - return Status::OK(); -} - -Status PlasmaClient::Connect(const std::string& store_socket_name, - const std::string& manager_socket_name, int release_delay) { - store_conn_ = connect_ipc_sock_retry(store_socket_name, -1, -1); - if (manager_socket_name != "") { - manager_conn_ = connect_ipc_sock_retry(manager_socket_name, -1, -1); - } else { - manager_conn_ = -1; - } - config_.release_delay = release_delay; - in_use_object_bytes_ = 0; - // Send a ConnectRequest to the store to get its memory capacity. - RETURN_NOT_OK(SendConnectRequest(store_conn_)); - std::vector buffer; - RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaConnectReply, &buffer)); - RETURN_NOT_OK(ReadConnectReply(buffer.data(), &store_capacity_)); - return Status::OK(); -} - -Status PlasmaClient::Disconnect() { - // NOTE: We purposefully do not finish sending release calls for objects in - // use, so that we don't duplicate PlasmaClient::Release calls (when handling - // a SIGTERM, for example). - - // Close the connections to Plasma. The Plasma store will release the objects - // that were in use by us when handling the SIGPIPE. - close(store_conn_); - if (manager_conn_ >= 0) { close(manager_conn_); } - return Status::OK(); -} - -#define h_addr h_addr_list[0] - -Status PlasmaClient::Transfer(const char* address, int port, const ObjectID& object_id) { - return SendDataRequest(manager_conn_, object_id, address, port); -} - -Status PlasmaClient::Fetch(int num_object_ids, const ObjectID* object_ids) { - ARROW_CHECK(manager_conn_ >= 0); - return SendFetchRequest(manager_conn_, object_ids, num_object_ids); -} - -int PlasmaClient::get_manager_fd() { - return manager_conn_; -} - -Status PlasmaClient::Info(const ObjectID& object_id, int* object_status) { - ARROW_CHECK(manager_conn_ >= 0); - - RETURN_NOT_OK(SendStatusRequest(manager_conn_, &object_id, 1)); - std::vector buffer; - RETURN_NOT_OK(PlasmaReceive(manager_conn_, MessageType_PlasmaStatusReply, &buffer)); - ObjectID id; - RETURN_NOT_OK(ReadStatusReply(buffer.data(), &id, object_status, 1)); - ARROW_CHECK(object_id == id); - return Status::OK(); -} - -Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_requests, - int num_ready_objects, int64_t timeout_ms, int* num_objects_ready) { - ARROW_CHECK(manager_conn_ >= 0); - ARROW_CHECK(num_object_requests > 0); - ARROW_CHECK(num_ready_objects > 0); - ARROW_CHECK(num_ready_objects <= num_object_requests); - - for (int i = 0; i < num_object_requests; ++i) { - ARROW_CHECK(object_requests[i].type == PLASMA_QUERY_LOCAL || - object_requests[i].type == PLASMA_QUERY_ANYWHERE); - } - - RETURN_NOT_OK(SendWaitRequest(manager_conn_, object_requests, num_object_requests, - num_ready_objects, timeout_ms)); - std::vector buffer; - RETURN_NOT_OK(PlasmaReceive(manager_conn_, MessageType_PlasmaWaitReply, &buffer)); - RETURN_NOT_OK(ReadWaitReply(buffer.data(), object_requests, &num_ready_objects)); - - *num_objects_ready = 0; - for (int i = 0; i < num_object_requests; ++i) { - int type = object_requests[i].type; - int status = object_requests[i].status; - switch (type) { - case PLASMA_QUERY_LOCAL: - if (status == ObjectStatus_Local) { *num_objects_ready += 1; } - break; - case PLASMA_QUERY_ANYWHERE: - if (status == ObjectStatus_Local || status == ObjectStatus_Remote) { - *num_objects_ready += 1; - } else { - ARROW_CHECK(status == ObjectStatus_Nonexistent); - } - break; - default: - ARROW_LOG(FATAL) << "This code should be unreachable."; - } - } - return Status::OK(); -} diff --git a/cpp/src/plasma/client.h b/cpp/src/plasma/client.h deleted file mode 100644 index fb3a161795d47..0000000000000 --- a/cpp/src/plasma/client.h +++ /dev/null @@ -1,343 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_CLIENT_H -#define PLASMA_CLIENT_H - -#include -#include - -#include -#include - -#include "plasma/plasma.h" - -using arrow::Status; - -#define PLASMA_DEFAULT_RELEASE_DELAY 64 - -// Use 100MB as an overestimate of the L3 cache size. -constexpr int64_t kL3CacheSizeBytes = 100000000; - -/// Object buffer data structure. -struct ObjectBuffer { - /// The size in bytes of the data object. - int64_t data_size; - /// The address of the data object. - uint8_t* data; - /// The metadata size in bytes. - int64_t metadata_size; - /// The address of the metadata. - uint8_t* metadata; -}; - -/// Configuration options for the plasma client. -struct PlasmaClientConfig { - /// Number of release calls we wait until the object is actually released. - /// This allows us to avoid invalidating the cpu cache on workers if objects - /// are reused accross tasks. - size_t release_delay; -}; - -struct ClientMmapTableEntry { - /// The result of mmap for this file descriptor. - uint8_t* pointer; - /// The length of the memory-mapped file. - size_t length; - /// The number of objects in this memory-mapped file that are currently being - /// used by the client. When this count reaches zeros, we unmap the file. - int count; -}; - -struct ObjectInUseEntry { - /// A count of the number of times this client has called PlasmaClient::Create - /// or - /// PlasmaClient::Get on this object ID minus the number of calls to - /// PlasmaClient::Release. - /// When this count reaches zero, we remove the entry from the ObjectsInUse - /// and decrement a count in the relevant ClientMmapTableEntry. - int count; - /// Cached information to read the object. - PlasmaObject object; - /// A flag representing whether the object has been sealed. - bool is_sealed; -}; - -class PlasmaClient { - public: - /// Connect to the local plasma store and plasma manager. Return - /// the resulting connection. - /// - /// @param store_socket_name The name of the UNIX domain socket to use to - /// connect to the Plasma store. - /// @param manager_socket_name The name of the UNIX domain socket to use to - /// connect to the local Plasma manager. If this is "", then this - /// function will not connect to a manager. - /// @param release_delay Number of released objects that are kept around - /// and not evicted to avoid too many munmaps. - /// @return The return status. - Status Connect(const std::string& store_socket_name, - const std::string& manager_socket_name, int release_delay); - - /// Create an object in the Plasma Store. Any metadata for this object must be - /// be passed in when the object is created. - /// - /// @param object_id The ID to use for the newly created object. - /// @param data_size The size in bytes of the space to be allocated for this - /// object's - /// data (this does not include space used for metadata). - /// @param metadata The object's metadata. If there is no metadata, this - /// pointer - /// should be NULL. - /// @param metadata_size The size in bytes of the metadata. If there is no - /// metadata, this should be 0. - /// @param data The address of the newly created object will be written here. - /// @return The return status. - Status Create(const ObjectID& object_id, int64_t data_size, uint8_t* metadata, - int64_t metadata_size, uint8_t** data); - - /// Get some objects from the Plasma Store. This function will block until the - /// objects have all been created and sealed in the Plasma Store or the - /// timeout - /// expires. The caller is responsible for releasing any retrieved objects, - /// but - /// the caller should not release objects that were not retrieved. - /// - /// @param object_ids The IDs of the objects to get. - /// @param num_object_ids The number of object IDs to get. - /// @param timeout_ms The amount of time in milliseconds to wait before this - /// request times out. If this value is -1, then no timeout is set. - /// @param object_buffers An array where the results will be stored. If the - /// data - /// size field is -1, then the object was not retrieved. - /// @return The return status. - Status Get(const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms, - ObjectBuffer* object_buffers); - - /// Tell Plasma that the client no longer needs the object. This should be - /// called - /// after Get when the client is done with the object. After this call, - /// the address returned by Get is no longer valid. This should be called - /// once for each call to Get (with the same object ID). - /// - /// @param object_id The ID of the object that is no longer needed. - /// @return The return status. - Status Release(const ObjectID& object_id); - - /// Check if the object store contains a particular object and the object has - /// been sealed. The result will be stored in has_object. - /// - /// @todo: We may want to indicate if the object has been created but not - /// sealed. - /// - /// @param object_id The ID of the object whose presence we are checking. - /// @param has_object The function will write true at this address if - /// the object is present and false if it is not present. - /// @return The return status. - Status Contains(const ObjectID& object_id, bool* has_object); - - /// Seal an object in the object store. The object will be immutable after - /// this - /// call. - /// - /// @param object_id The ID of the object to seal. - /// @return The return status. - Status Seal(const ObjectID& object_id); - - /// Delete an object from the object store. This currently assumes that the - /// object is present and has been sealed. - /// - /// @todo We may want to allow the deletion of objects that are not present or - /// haven't been sealed. - /// - /// @param object_id The ID of the object to delete. - /// @return The return status. - Status Delete(const ObjectID& object_id); - - /// Delete objects until we have freed up num_bytes bytes or there are no more - /// released objects that can be deleted. - /// - /// @param num_bytes The number of bytes to try to free up. - /// @param num_bytes_evicted Out parameter for total number of bytes of space - /// retrieved. - /// @return The return status. - Status Evict(int64_t num_bytes, int64_t& num_bytes_evicted); - - /// Subscribe to notifications when objects are sealed in the object store. - /// Whenever an object is sealed, a message will be written to the client - /// socket - /// that is returned by this method. - /// - /// @param fd Out parameter for the file descriptor the client should use to - /// read notifications - /// from the object store about sealed objects. - /// @return The return status. - Status Subscribe(int* fd); - - /// Disconnect from the local plasma instance, including the local store and - /// manager. - /// - /// @return The return status. - Status Disconnect(); - - /// Attempt to initiate the transfer of some objects from remote Plasma - /// Stores. - /// This method does not guarantee that the fetched objects will arrive - /// locally. - /// - /// For an object that is available in the local Plasma Store, this method - /// will - /// not do anything. For an object that is not available locally, it will - /// check - /// if the object are already being fetched. If so, it will not do anything. - /// If - /// not, it will query the object table for a list of Plasma Managers that - /// have - /// the object. The object table will return a non-empty list, and this Plasma - /// Manager will attempt to initiate transfers from one of those Plasma - /// Managers. - /// - /// This function is non-blocking. - /// - /// This method is idempotent in the sense that it is ok to call it multiple - /// times. - /// - /// @param num_object_ids The number of object IDs fetch is being called on. - /// @param object_ids The IDs of the objects that fetch is being called on. - /// @return The return status. - Status Fetch(int num_object_ids, const ObjectID* object_ids); - - /// Wait for (1) a specified number of objects to be available (sealed) in the - /// local Plasma Store or in a remote Plasma Store, or (2) for a timeout to - /// expire. This is a blocking call. - /// - /// @param num_object_requests Size of the object_requests array. - /// @param object_requests Object event array. Each element contains a request - /// for a particular object_id. The type of request is specified in the - /// "type" field. - /// - A PLASMA_QUERY_LOCAL request is satisfied when object_id becomes - /// available in the local Plasma Store. In this case, this function - /// sets the "status" field to ObjectStatus_Local. Note, if the - /// status - /// is not ObjectStatus_Local, it will be ObjectStatus_Nonexistent, - /// but it may exist elsewhere in the system. - /// - A PLASMA_QUERY_ANYWHERE request is satisfied when object_id - /// becomes - /// available either at the local Plasma Store or on a remote Plasma - /// Store. In this case, the functions sets the "status" field to - /// ObjectStatus_Local or ObjectStatus_Remote. - /// @param num_ready_objects The number of requests in object_requests array - /// that - /// must be satisfied before the function returns, unless it timeouts. - /// The num_ready_objects should be no larger than num_object_requests. - /// @param timeout_ms Timeout value in milliseconds. If this timeout expires - /// before min_num_ready_objects of requests are satisfied, the - /// function - /// returns. - /// @param num_objects_ready Out parameter for number of satisfied requests in - /// the object_requests list. If the returned number is less than - /// min_num_ready_objects this means that timeout expired. - /// @return The return status. - Status Wait(int64_t num_object_requests, ObjectRequest* object_requests, - int num_ready_objects, int64_t timeout_ms, int* num_objects_ready); - - /// Transfer local object to a different plasma manager. - /// - /// @param conn The object containing the connection state. - /// @param addr IP address of the plasma manager we are transfering to. - /// @param port Port of the plasma manager we are transfering to. - /// @object_id ObjectID of the object we are transfering. - /// @return The return status. - Status Transfer(const char* addr, int port, const ObjectID& object_id); - - /// Return the status of a given object. This method may query the object - /// table. - /// - /// @param conn The object containing the connection state. - /// @param object_id The ID of the object whose status we query. - /// @param object_status Out parameter for object status. Can take the - /// following values. - /// - PLASMA_CLIENT_LOCAL, if object is stored in the local Plasma - /// Store. - /// has been already scheduled by the Plasma Manager. - /// - PLASMA_CLIENT_TRANSFER, if the object is either currently being - /// transferred or just scheduled. - /// - PLASMA_CLIENT_REMOTE, if the object is stored at a remote - /// Plasma Store. - /// - PLASMA_CLIENT_DOES_NOT_EXIST, if the object doesn’t exist in the - /// system. - /// @return The return status. - Status Info(const ObjectID& object_id, int* object_status); - - /// Get the file descriptor for the socket connection to the plasma manager. - /// - /// @param conn The plasma connection. - /// @return The file descriptor for the manager connection. If there is no - /// connection to the manager, this is -1. - int get_manager_fd(); - - private: - Status PerformRelease(const ObjectID& object_id); - - uint8_t* lookup_or_mmap(int fd, int store_fd_val, int64_t map_size); - - uint8_t* lookup_mmapped_file(int store_fd_val); - - void increment_object_count( - const ObjectID& object_id, PlasmaObject* object, bool is_sealed); - - /// File descriptor of the Unix domain socket that connects to the store. - int store_conn_; - /// File descriptor of the Unix domain socket that connects to the manager. - int manager_conn_; - /// Table of dlmalloc buffer files that have been memory mapped so far. This - /// is a hash table mapping a file descriptor to a struct containing the - /// address of the corresponding memory-mapped file. - std::unordered_map mmap_table_; - /// A hash table of the object IDs that are currently being used by this - /// client. - std::unordered_map, UniqueIDHasher> - objects_in_use_; - /// Object IDs of the last few release calls. This is a deque and - /// is used to delay releasing objects to see if they can be reused by - /// subsequent tasks so we do not unneccessarily invalidate cpu caches. - /// TODO(pcm): replace this with a proper lru cache using the size of the L3 - /// cache. - std::deque release_history_; - /// The number of bytes in the combined objects that are held in the release - /// history doubly-linked list. If this is too large then the client starts - /// releasing objects. - int64_t in_use_object_bytes_; - /// Configuration options for the plasma client. - PlasmaClientConfig config_; - /// The amount of memory available to the Plasma store. The client needs this - /// information to make sure that it does not delay in releasing so much - /// memory that the store is unable to evict enough objects to free up space. - int64_t store_capacity_; -}; - -/// Compute the hash of an object in the object store. -/// -/// @param conn The object containing the connection state. -/// @param object_id The ID of the object we want to hash. -/// @param digest A pointer at which to return the hash digest of the object. -/// The pointer must have at least DIGEST_SIZE bytes allocated. -/// @return A boolean representing whether the hash operation succeeded. -bool plasma_compute_object_hash( - PlasmaClient* conn, ObjectID object_id, unsigned char* digest); - -#endif // PLASMA_CLIENT_H diff --git a/cpp/src/plasma/common.cc b/cpp/src/plasma/common.cc deleted file mode 100644 index a09a963fa4769..0000000000000 --- a/cpp/src/plasma/common.cc +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/common.h" - -#include - -#include "format/plasma_generated.h" - -using arrow::Status; - -UniqueID UniqueID::from_random() { - UniqueID id; - uint8_t* data = id.mutable_data(); - std::random_device engine; - for (int i = 0; i < kUniqueIDSize; i++) { - data[i] = static_cast(engine()); - } - return id; -} - -UniqueID UniqueID::from_binary(const std::string& binary) { - UniqueID id; - std::memcpy(&id, binary.data(), sizeof(id)); - return id; -} - -const uint8_t* UniqueID::data() const { - return id_; -} - -uint8_t* UniqueID::mutable_data() { - return id_; -} - -std::string UniqueID::binary() const { - return std::string(reinterpret_cast(id_), kUniqueIDSize); -} - -std::string UniqueID::hex() const { - constexpr char hex[] = "0123456789abcdef"; - std::string result; - for (int i = 0; i < kUniqueIDSize; i++) { - unsigned int val = id_[i]; - result.push_back(hex[val >> 4]); - result.push_back(hex[val & 0xf]); - } - return result; -} - -bool UniqueID::operator==(const UniqueID& rhs) const { - return std::memcmp(data(), rhs.data(), kUniqueIDSize) == 0; -} - -Status plasma_error_status(int plasma_error) { - switch (plasma_error) { - case PlasmaError_OK: - return Status::OK(); - case PlasmaError_ObjectExists: - return Status::PlasmaObjectExists("object already exists in the plasma store"); - case PlasmaError_ObjectNonexistent: - return Status::PlasmaObjectNonexistent("object does not exist in the plasma store"); - case PlasmaError_OutOfMemory: - return Status::PlasmaStoreFull("object does not fit in the plasma store"); - default: - ARROW_LOG(FATAL) << "unknown plasma error code " << plasma_error; - } - return Status::OK(); -} diff --git a/cpp/src/plasma/common.h b/cpp/src/plasma/common.h deleted file mode 100644 index 85dc74bf86e0d..0000000000000 --- a/cpp/src/plasma/common.h +++ /dev/null @@ -1,63 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_COMMON_H -#define PLASMA_COMMON_H - -#include -#include -// TODO(pcm): Convert getopt and sscanf in the store to use more idiomatic C++ -// and get rid of the next three lines: -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS -#endif - -#include "arrow/status.h" -#include "arrow/util/logging.h" - -constexpr int64_t kUniqueIDSize = 20; - -class UniqueID { - public: - static UniqueID from_random(); - static UniqueID from_binary(const std::string& binary); - bool operator==(const UniqueID& rhs) const; - const uint8_t* data() const; - uint8_t* mutable_data(); - std::string binary() const; - std::string hex() const; - - private: - uint8_t id_[kUniqueIDSize]; -}; - -static_assert(std::is_pod::value, "UniqueID must be plain old data"); - -struct UniqueIDHasher { - // ObjectID hashing function. - size_t operator()(const UniqueID& id) const { - size_t result; - std::memcpy(&result, id.data(), sizeof(size_t)); - return result; - } -}; - -typedef UniqueID ObjectID; - -arrow::Status plasma_error_status(int plasma_error); - -#endif // PLASMA_COMMON_H diff --git a/cpp/src/plasma/events.cc b/cpp/src/plasma/events.cc deleted file mode 100644 index a9f7356e1f67e..0000000000000 --- a/cpp/src/plasma/events.cc +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/events.h" - -#include - -void EventLoop::file_event_callback( - aeEventLoop* loop, int fd, void* context, int events) { - FileCallback* callback = reinterpret_cast(context); - (*callback)(events); -} - -int EventLoop::timer_event_callback(aeEventLoop* loop, TimerID timer_id, void* context) { - TimerCallback* callback = reinterpret_cast(context); - return (*callback)(timer_id); -} - -constexpr int kInitialEventLoopSize = 1024; - -EventLoop::EventLoop() { - loop_ = aeCreateEventLoop(kInitialEventLoopSize); -} - -bool EventLoop::add_file_event(int fd, int events, const FileCallback& callback) { - if (file_callbacks_.find(fd) != file_callbacks_.end()) { return false; } - auto data = std::unique_ptr(new FileCallback(callback)); - void* context = reinterpret_cast(data.get()); - // Try to add the file descriptor. - int err = aeCreateFileEvent(loop_, fd, events, EventLoop::file_event_callback, context); - // If it cannot be added, increase the size of the event loop. - if (err == AE_ERR && errno == ERANGE) { - err = aeResizeSetSize(loop_, 3 * aeGetSetSize(loop_) / 2); - if (err != AE_OK) { return false; } - err = aeCreateFileEvent(loop_, fd, events, EventLoop::file_event_callback, context); - } - // In any case, test if there were errors. - if (err == AE_OK) { - file_callbacks_.emplace(fd, std::move(data)); - return true; - } - return false; -} - -void EventLoop::remove_file_event(int fd) { - aeDeleteFileEvent(loop_, fd, AE_READABLE | AE_WRITABLE); - file_callbacks_.erase(fd); -} - -void EventLoop::run() { - aeMain(loop_); -} - -int64_t EventLoop::add_timer(int64_t timeout, const TimerCallback& callback) { - auto data = std::unique_ptr(new TimerCallback(callback)); - void* context = reinterpret_cast(data.get()); - int64_t timer_id = - aeCreateTimeEvent(loop_, timeout, EventLoop::timer_event_callback, context, NULL); - timer_callbacks_.emplace(timer_id, std::move(data)); - return timer_id; -} - -int EventLoop::remove_timer(int64_t timer_id) { - int err = aeDeleteTimeEvent(loop_, timer_id); - timer_callbacks_.erase(timer_id); - return err; -} diff --git a/cpp/src/plasma/events.h b/cpp/src/plasma/events.h deleted file mode 100644 index bd93d6bb2a6fd..0000000000000 --- a/cpp/src/plasma/events.h +++ /dev/null @@ -1,99 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_EVENTS -#define PLASMA_EVENTS - -#include -#include -#include - -extern "C" { -#include "ae/ae.h" -} - -/// Constant specifying that the timer is done and it will be removed. -constexpr int kEventLoopTimerDone = AE_NOMORE; - -/// Read event on the file descriptor. -constexpr int kEventLoopRead = AE_READABLE; - -/// Write event on the file descriptor. -constexpr int kEventLoopWrite = AE_WRITABLE; - -typedef long long TimerID; // NOLINT - -class EventLoop { - public: - // Signature of the handler that will be called when there is a new event - // on the file descriptor that this handler has been registered for. - // - // The arguments are the event flags (read or write). - using FileCallback = std::function; - - // This handler will be called when a timer times out. The timer id is - // passed as an argument. The return is the number of milliseconds the timer - // shall be reset to or kEventLoopTimerDone if the timer shall not be - // triggered again. - using TimerCallback = std::function; - - EventLoop(); - - /// Add a new file event handler to the event loop. - /// - /// @param fd The file descriptor we are listening to. - /// @param events The flags for events we are listening to (read or write). - /// @param callback The callback that will be called when the event happens. - /// @return Returns true if the event handler was added successfully. - bool add_file_event(int fd, int events, const FileCallback& callback); - - /// Remove a file event handler from the event loop. - /// - /// @param fd The file descriptor of the event handler. - /// @return Void. - void remove_file_event(int fd); - - /// Register a handler that will be called after a time slice of - /// "timeout" milliseconds. - /// - /// @param timeout The timeout in milliseconds. - /// @param callback The callback for the timeout. - /// @return The ID of the newly created timer. - int64_t add_timer(int64_t timeout, const TimerCallback& callback); - - /// Remove a timer handler from the event loop. - /// - /// @param timer_id The ID of the timer that is to be removed. - /// @return The ae.c error code. TODO(pcm): needs to be standardized - int remove_timer(int64_t timer_id); - - /// Run the event loop. - /// - /// @return Void. - void run(); - - private: - static void file_event_callback(aeEventLoop* loop, int fd, void* context, int events); - - static int timer_event_callback(aeEventLoop* loop, TimerID timer_id, void* context); - - aeEventLoop* loop_; - std::unordered_map> file_callbacks_; - std::unordered_map> timer_callbacks_; -}; - -#endif // PLASMA_EVENTS diff --git a/cpp/src/plasma/eviction_policy.cc b/cpp/src/plasma/eviction_policy.cc deleted file mode 100644 index 4ae6384d42543..0000000000000 --- a/cpp/src/plasma/eviction_policy.cc +++ /dev/null @@ -1,107 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/eviction_policy.h" - -#include - -void LRUCache::add(const ObjectID& key, int64_t size) { - auto it = item_map_.find(key); - ARROW_CHECK(it == item_map_.end()); - /* Note that it is important to use a list so the iterators stay valid. */ - item_list_.emplace_front(key, size); - item_map_.emplace(key, item_list_.begin()); -} - -void LRUCache::remove(const ObjectID& key) { - auto it = item_map_.find(key); - ARROW_CHECK(it != item_map_.end()); - item_list_.erase(it->second); - item_map_.erase(it); -} - -int64_t LRUCache::choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict) { - int64_t bytes_evicted = 0; - auto it = item_list_.end(); - while (bytes_evicted < num_bytes_required && it != item_list_.begin()) { - it--; - objects_to_evict->push_back(it->first); - bytes_evicted += it->second; - } - return bytes_evicted; -} - -EvictionPolicy::EvictionPolicy(PlasmaStoreInfo* store_info) - : memory_used_(0), store_info_(store_info) {} - -int64_t EvictionPolicy::choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict) { - int64_t bytes_evicted = - cache_.choose_objects_to_evict(num_bytes_required, objects_to_evict); - /* Update the LRU cache. */ - for (auto& object_id : *objects_to_evict) { - cache_.remove(object_id); - } - /* Update the number of bytes used. */ - memory_used_ -= bytes_evicted; - return bytes_evicted; -} - -void EvictionPolicy::object_created(const ObjectID& object_id) { - auto entry = store_info_->objects[object_id].get(); - cache_.add(object_id, entry->info.data_size + entry->info.metadata_size); -} - -bool EvictionPolicy::require_space( - int64_t size, std::vector* objects_to_evict) { - /* Check if there is enough space to create the object. */ - int64_t required_space = memory_used_ + size - store_info_->memory_capacity; - int64_t num_bytes_evicted; - if (required_space > 0) { - /* Try to free up at least as much space as we need right now but ideally - * up to 20% of the total capacity. */ - int64_t space_to_free = std::max(size, store_info_->memory_capacity / 5); - ARROW_LOG(DEBUG) << "not enough space to create this object, so evicting objects"; - /* Choose some objects to evict, and update the return pointers. */ - num_bytes_evicted = choose_objects_to_evict(space_to_free, objects_to_evict); - ARROW_LOG(INFO) << "There is not enough space to create this object, so evicting " - << objects_to_evict->size() << " objects to free up " - << num_bytes_evicted << " bytes."; - } else { - num_bytes_evicted = 0; - } - if (num_bytes_evicted >= required_space) { - /* We only increment the space used if there is enough space to create the - * object. */ - memory_used_ += size; - } - return num_bytes_evicted >= required_space; -} - -void EvictionPolicy::begin_object_access( - const ObjectID& object_id, std::vector* objects_to_evict) { - /* If the object is in the LRU cache, remove it. */ - cache_.remove(object_id); -} - -void EvictionPolicy::end_object_access( - const ObjectID& object_id, std::vector* objects_to_evict) { - auto entry = store_info_->objects[object_id].get(); - /* Add the object to the LRU cache.*/ - cache_.add(object_id, entry->info.data_size + entry->info.metadata_size); -} diff --git a/cpp/src/plasma/eviction_policy.h b/cpp/src/plasma/eviction_policy.h deleted file mode 100644 index 3815fc6652f0c..0000000000000 --- a/cpp/src/plasma/eviction_policy.h +++ /dev/null @@ -1,134 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_EVICTION_POLICY_H -#define PLASMA_EVICTION_POLICY_H - -#include -#include -#include -#include - -#include "plasma/common.h" -#include "plasma/plasma.h" - -// ==== The eviction policy ==== -// -// This file contains declaration for all functions and data structures that -// need to be provided if you want to implement a new eviction algorithm for the -// Plasma store. - -class LRUCache { - public: - LRUCache() {} - - void add(const ObjectID& key, int64_t size); - - void remove(const ObjectID& key); - - int64_t choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict); - - private: - /// A doubly-linked list containing the items in the cache and - /// their sizes in LRU order. - typedef std::list> ItemList; - ItemList item_list_; - /// A hash table mapping the object ID of an object in the cache to its - /// location in the doubly linked list item_list_. - std::unordered_map item_map_; -}; - -/// The eviction policy. -class EvictionPolicy { - public: - /// Construct an eviction policy. - /// - /// @param store_info Information about the Plasma store that is exposed - /// to the eviction policy. - explicit EvictionPolicy(PlasmaStoreInfo* store_info); - - /// This method will be called whenever an object is first created in order to - /// add it to the LRU cache. This is done so that the first time, the Plasma - /// store calls begin_object_access, we can remove the object from the LRU - /// cache. - /// - /// @param object_id The object ID of the object that was created. - /// @return Void. - void object_created(const ObjectID& object_id); - - /// This method will be called when the Plasma store needs more space, perhaps - /// to create a new object. If the required amount of space cannot be freed up, - /// then a fatal error will be thrown. When this method is called, the eviction - /// policy will assume that the objects chosen to be evicted will in fact be - /// evicted from the Plasma store by the caller. - /// - /// @param size The size in bytes of the new object, including both data and - /// metadata. - /// @param objects_to_evict The object IDs that were chosen for eviction will - /// be stored into this vector. - /// @return True if enough space can be freed and false otherwise. - bool require_space(int64_t size, std::vector* objects_to_evict); - - /// This method will be called whenever an unused object in the Plasma store - /// starts to be used. When this method is called, the eviction policy will - /// assume that the objects chosen to be evicted will in fact be evicted from - /// the Plasma store by the caller. - /// - /// @param object_id The ID of the object that is now being used. - /// @param objects_to_evict The object IDs that were chosen for eviction will - /// be stored into this vector. - /// @return Void. - void begin_object_access( - const ObjectID& object_id, std::vector* objects_to_evict); - - /// This method will be called whenever an object in the Plasma store that was - /// being used is no longer being used. When this method is called, the - /// eviction policy will assume that the objects chosen to be evicted will in - /// fact be evicted from the Plasma store by the caller. - /// - /// @param object_id The ID of the object that is no longer being used. - /// @param objects_to_evict The object IDs that were chosen for eviction will - /// be stored into this vector. - /// @return Void. - void end_object_access( - const ObjectID& object_id, std::vector* objects_to_evict); - - /// Choose some objects to evict from the Plasma store. When this method is - /// called, the eviction policy will assume that the objects chosen to be - /// evicted will in fact be evicted from the Plasma store by the caller. - /// - /// @note This method is not part of the API. It is exposed in the header file - /// only for testing. - /// - /// @param num_bytes_required The number of bytes of space to try to free up. - /// @param objects_to_evict The object IDs that were chosen for eviction will - /// be stored into this vector. - /// @return The total number of bytes of space chosen to be evicted. - int64_t choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict); - - private: - /// The amount of memory (in bytes) currently being used. - int64_t memory_used_; - /// Pointer to the plasma store info. - PlasmaStoreInfo* store_info_; - /// Datastructure for the LRU cache. - LRUCache cache_; -}; - -#endif // PLASMA_EVICTION_POLICY_H diff --git a/cpp/src/plasma/extension.cc b/cpp/src/plasma/extension.cc deleted file mode 100644 index 5d61e337c108d..0000000000000 --- a/cpp/src/plasma/extension.cc +++ /dev/null @@ -1,456 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/extension.h" - -#include -#include - -#include "plasma/client.h" -#include "plasma/common.h" -#include "plasma/io.h" -#include "plasma/protocol.h" - -PyObject* PlasmaOutOfMemoryError; -PyObject* PlasmaObjectExistsError; - -PyObject* PyPlasma_connect(PyObject* self, PyObject* args) { - const char* store_socket_name; - const char* manager_socket_name; - int release_delay; - if (!PyArg_ParseTuple( - args, "ssi", &store_socket_name, &manager_socket_name, &release_delay)) { - return NULL; - } - PlasmaClient* client = new PlasmaClient(); - ARROW_CHECK_OK(client->Connect(store_socket_name, manager_socket_name, release_delay)); - - return PyCapsule_New(client, "plasma", NULL); -} - -PyObject* PyPlasma_disconnect(PyObject* self, PyObject* args) { - PyObject* client_capsule; - if (!PyArg_ParseTuple(args, "O", &client_capsule)) { return NULL; } - PlasmaClient* client; - ARROW_CHECK(PyObjectToPlasmaClient(client_capsule, &client)); - ARROW_CHECK_OK(client->Disconnect()); - /* We use the context of the connection capsule to indicate if the connection - * is still active (if the context is NULL) or if it is closed (if the context - * is (void*) 0x1). This is neccessary because the primary pointer of the - * capsule cannot be NULL. */ - PyCapsule_SetContext(client_capsule, reinterpret_cast(0x1)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_create(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - Py_ssize_t size; - PyObject* metadata; - if (!PyArg_ParseTuple(args, "O&O&nO", PyObjectToPlasmaClient, &client, - PyStringToUniqueID, &object_id, &size, &metadata)) { - return NULL; - } - if (!PyByteArray_Check(metadata)) { - PyErr_SetString(PyExc_TypeError, "metadata must be a bytearray"); - return NULL; - } - uint8_t* data; - Status s = client->Create(object_id, size, - reinterpret_cast(PyByteArray_AsString(metadata)), - PyByteArray_Size(metadata), &data); - if (s.IsPlasmaObjectExists()) { - PyErr_SetString(PlasmaObjectExistsError, - "An object with this ID already exists in the plasma " - "store."); - return NULL; - } - if (s.IsPlasmaStoreFull()) { - PyErr_SetString(PlasmaOutOfMemoryError, - "The plasma store ran out of memory and could not create " - "this object."); - return NULL; - } - ARROW_CHECK(s.ok()); - -#if PY_MAJOR_VERSION >= 3 - return PyMemoryView_FromMemory(reinterpret_cast(data), size, PyBUF_WRITE); -#else - return PyBuffer_FromReadWriteMemory(reinterpret_cast(data), size); -#endif -} - -PyObject* PyPlasma_hash(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - unsigned char digest[kDigestSize]; - bool success = plasma_compute_object_hash(client, object_id, digest); - if (success) { - PyObject* digest_string = - PyBytes_FromStringAndSize(reinterpret_cast(digest), kDigestSize); - return digest_string; - } else { - Py_RETURN_NONE; - } -} - -PyObject* PyPlasma_seal(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - ARROW_CHECK_OK(client->Seal(object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_release(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - ARROW_CHECK_OK(client->Release(object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_get(PyObject* self, PyObject* args) { - PlasmaClient* client; - PyObject* object_id_list; - Py_ssize_t timeout_ms; - if (!PyArg_ParseTuple( - args, "O&On", PyObjectToPlasmaClient, &client, &object_id_list, &timeout_ms)) { - return NULL; - } - - Py_ssize_t num_object_ids = PyList_Size(object_id_list); - std::vector object_ids(num_object_ids); - std::vector object_buffers(num_object_ids); - - for (int i = 0; i < num_object_ids; ++i) { - PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]); - } - - Py_BEGIN_ALLOW_THREADS; - ARROW_CHECK_OK( - client->Get(object_ids.data(), num_object_ids, timeout_ms, object_buffers.data())); - Py_END_ALLOW_THREADS; - - PyObject* returns = PyList_New(num_object_ids); - for (int i = 0; i < num_object_ids; ++i) { - if (object_buffers[i].data_size != -1) { - /* The object was retrieved, so return the object. */ - PyObject* t = PyTuple_New(2); - Py_ssize_t data_size = static_cast(object_buffers[i].data_size); - Py_ssize_t metadata_size = static_cast(object_buffers[i].metadata_size); -#if PY_MAJOR_VERSION >= 3 - char* data = reinterpret_cast(object_buffers[i].data); - char* metadata = reinterpret_cast(object_buffers[i].metadata); - PyTuple_SET_ITEM(t, 0, PyMemoryView_FromMemory(data, data_size, PyBUF_READ)); - PyTuple_SET_ITEM( - t, 1, PyMemoryView_FromMemory(metadata, metadata_size, PyBUF_READ)); -#else - void* data = reinterpret_cast(object_buffers[i].data); - void* metadata = reinterpret_cast(object_buffers[i].metadata); - PyTuple_SET_ITEM(t, 0, PyBuffer_FromMemory(data, data_size)); - PyTuple_SET_ITEM(t, 1, PyBuffer_FromMemory(metadata, metadata_size)); -#endif - ARROW_CHECK(PyList_SetItem(returns, i, t) == 0); - } else { - /* The object was not retrieved, so just add None to the list of return - * values. */ - Py_INCREF(Py_None); - ARROW_CHECK(PyList_SetItem(returns, i, Py_None) == 0); - } - } - return returns; -} - -PyObject* PyPlasma_contains(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - bool has_object; - ARROW_CHECK_OK(client->Contains(object_id, &has_object)); - - if (has_object) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } -} - -PyObject* PyPlasma_fetch(PyObject* self, PyObject* args) { - PlasmaClient* client; - PyObject* object_id_list; - if (!PyArg_ParseTuple(args, "O&O", PyObjectToPlasmaClient, &client, &object_id_list)) { - return NULL; - } - if (client->get_manager_fd() == -1) { - PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); - return NULL; - } - Py_ssize_t n = PyList_Size(object_id_list); - ObjectID* object_ids = new ObjectID[n]; - for (int i = 0; i < n; ++i) { - PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]); - } - ARROW_CHECK_OK(client->Fetch(static_cast(n), object_ids)); - delete[] object_ids; - Py_RETURN_NONE; -} - -PyObject* PyPlasma_wait(PyObject* self, PyObject* args) { - PlasmaClient* client; - PyObject* object_id_list; - Py_ssize_t timeout; - int num_returns; - if (!PyArg_ParseTuple(args, "O&Oni", PyObjectToPlasmaClient, &client, &object_id_list, - &timeout, &num_returns)) { - return NULL; - } - Py_ssize_t n = PyList_Size(object_id_list); - - if (client->get_manager_fd() == -1) { - PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); - return NULL; - } - if (num_returns < 0) { - PyErr_SetString( - PyExc_RuntimeError, "The argument num_returns cannot be less than zero."); - return NULL; - } - if (num_returns > n) { - PyErr_SetString(PyExc_RuntimeError, - "The argument num_returns cannot be greater than len(object_ids)"); - return NULL; - } - int64_t threshold = 1 << 30; - if (timeout > threshold) { - PyErr_SetString( - PyExc_RuntimeError, "The argument timeout cannot be greater than 2 ** 30."); - return NULL; - } - - std::vector object_requests(n); - for (int i = 0; i < n; ++i) { - ARROW_CHECK(PyStringToUniqueID(PyList_GetItem(object_id_list, i), - &object_requests[i].object_id) == 1); - object_requests[i].type = PLASMA_QUERY_ANYWHERE; - } - /* Drop the global interpreter lock while we are waiting, so other threads can - * run. */ - int num_return_objects; - Py_BEGIN_ALLOW_THREADS; - ARROW_CHECK_OK( - client->Wait(n, object_requests.data(), num_returns, timeout, &num_return_objects)); - Py_END_ALLOW_THREADS; - - int num_to_return = std::min(num_return_objects, num_returns); - PyObject* ready_ids = PyList_New(num_to_return); - PyObject* waiting_ids = PySet_New(object_id_list); - int num_returned = 0; - for (int i = 0; i < n; ++i) { - if (num_returned == num_to_return) { break; } - if (object_requests[i].status == ObjectStatus_Local || - object_requests[i].status == ObjectStatus_Remote) { - PyObject* ready = PyBytes_FromStringAndSize( - reinterpret_cast(&object_requests[i].object_id), - sizeof(object_requests[i].object_id)); - PyList_SetItem(ready_ids, num_returned, ready); - PySet_Discard(waiting_ids, ready); - num_returned += 1; - } else { - ARROW_CHECK(object_requests[i].status == ObjectStatus_Nonexistent); - } - } - ARROW_CHECK(num_returned == num_to_return); - /* Return both the ready IDs and the remaining IDs. */ - PyObject* t = PyTuple_New(2); - PyTuple_SetItem(t, 0, ready_ids); - PyTuple_SetItem(t, 1, waiting_ids); - return t; -} - -PyObject* PyPlasma_evict(PyObject* self, PyObject* args) { - PlasmaClient* client; - Py_ssize_t num_bytes; - if (!PyArg_ParseTuple(args, "O&n", PyObjectToPlasmaClient, &client, &num_bytes)) { - return NULL; - } - int64_t evicted_bytes; - ARROW_CHECK_OK(client->Evict(static_cast(num_bytes), evicted_bytes)); - return PyLong_FromSsize_t(static_cast(evicted_bytes)); -} - -PyObject* PyPlasma_delete(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - ARROW_CHECK_OK(client->Delete(object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_transfer(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - const char* addr; - int port; - if (!PyArg_ParseTuple(args, "O&O&si", PyObjectToPlasmaClient, &client, - PyStringToUniqueID, &object_id, &addr, &port)) { - return NULL; - } - - if (client->get_manager_fd() == -1) { - PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); - return NULL; - } - - ARROW_CHECK_OK(client->Transfer(addr, port, object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_subscribe(PyObject* self, PyObject* args) { - PlasmaClient* client; - if (!PyArg_ParseTuple(args, "O&", PyObjectToPlasmaClient, &client)) { return NULL; } - - int sock; - ARROW_CHECK_OK(client->Subscribe(&sock)); - return PyLong_FromLong(sock); -} - -PyObject* PyPlasma_receive_notification(PyObject* self, PyObject* args) { - int plasma_sock; - - if (!PyArg_ParseTuple(args, "i", &plasma_sock)) { return NULL; } - /* Receive object notification from the plasma connection socket. If the - * object was added, return a tuple of its fields: ObjectID, data_size, - * metadata_size. If the object was deleted, data_size and metadata_size will - * be set to -1. */ - uint8_t* notification = read_message_async(plasma_sock); - if (notification == NULL) { - PyErr_SetString( - PyExc_RuntimeError, "Failed to read object notification from Plasma socket"); - return NULL; - } - auto object_info = flatbuffers::GetRoot(notification); - /* Construct a tuple from object_info and return. */ - PyObject* t = PyTuple_New(3); - PyTuple_SetItem(t, 0, PyBytes_FromStringAndSize(object_info->object_id()->data(), - object_info->object_id()->size())); - if (object_info->is_deletion()) { - PyTuple_SetItem(t, 1, PyLong_FromLong(-1)); - PyTuple_SetItem(t, 2, PyLong_FromLong(-1)); - } else { - PyTuple_SetItem(t, 1, PyLong_FromLong(object_info->data_size())); - PyTuple_SetItem(t, 2, PyLong_FromLong(object_info->metadata_size())); - } - - delete[] notification; - return t; -} - -static PyMethodDef plasma_methods[] = { - {"connect", PyPlasma_connect, METH_VARARGS, "Connect to plasma."}, - {"disconnect", PyPlasma_disconnect, METH_VARARGS, "Disconnect from plasma."}, - {"create", PyPlasma_create, METH_VARARGS, "Create a new plasma object."}, - {"hash", PyPlasma_hash, METH_VARARGS, "Compute the hash of a plasma object."}, - {"seal", PyPlasma_seal, METH_VARARGS, "Seal a plasma object."}, - {"get", PyPlasma_get, METH_VARARGS, "Get a plasma object."}, - {"contains", PyPlasma_contains, METH_VARARGS, - "Does the plasma store contain this plasma object?"}, - {"fetch", PyPlasma_fetch, METH_VARARGS, - "Fetch the object from another plasma manager instance."}, - {"wait", PyPlasma_wait, METH_VARARGS, - "Wait until num_returns objects in object_ids are ready."}, - {"evict", PyPlasma_evict, METH_VARARGS, - "Evict some objects until we recover some number of bytes."}, - {"release", PyPlasma_release, METH_VARARGS, "Release the plasma object."}, - {"delete", PyPlasma_delete, METH_VARARGS, "Delete a plasma object."}, - {"transfer", PyPlasma_transfer, METH_VARARGS, - "Transfer object to another plasma manager."}, - {"subscribe", PyPlasma_subscribe, METH_VARARGS, - "Subscribe to the plasma notification socket."}, - {"receive_notification", PyPlasma_receive_notification, METH_VARARGS, - "Receive next notification from plasma notification socket."}, - {NULL} /* Sentinel */ -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, "libplasma", /* m_name */ - "A Python client library for plasma.", /* m_doc */ - 0, /* m_size */ - plasma_methods, /* m_methods */ - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL, /* m_free */ -}; -#endif - -#if PY_MAJOR_VERSION >= 3 -#define INITERROR return NULL -#else -#define INITERROR return -#endif - -#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ -#define PyMODINIT_FUNC void -#endif - -#if PY_MAJOR_VERSION >= 3 -#define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void) -#else -#define MOD_INIT(name) PyMODINIT_FUNC init##name(void) -#endif - -MOD_INIT(libplasma) { -#if PY_MAJOR_VERSION >= 3 - PyObject* m = PyModule_Create(&moduledef); -#else - PyObject* m = - Py_InitModule3("libplasma", plasma_methods, "A Python client library for plasma."); -#endif - - /* Create a custom exception for when an object ID is reused. */ - char plasma_object_exists_error[] = "plasma_object_exists.error"; - PlasmaObjectExistsError = PyErr_NewException(plasma_object_exists_error, NULL, NULL); - Py_INCREF(PlasmaObjectExistsError); - PyModule_AddObject(m, "plasma_object_exists_error", PlasmaObjectExistsError); - /* Create a custom exception for when the plasma store is out of memory. */ - char plasma_out_of_memory_error[] = "plasma_out_of_memory.error"; - PlasmaOutOfMemoryError = PyErr_NewException(plasma_out_of_memory_error, NULL, NULL); - Py_INCREF(PlasmaOutOfMemoryError); - PyModule_AddObject(m, "plasma_out_of_memory_error", PlasmaOutOfMemoryError); - -#if PY_MAJOR_VERSION >= 3 - return m; -#endif -} diff --git a/cpp/src/plasma/extension.h b/cpp/src/plasma/extension.h deleted file mode 100644 index cee30abb3592d..0000000000000 --- a/cpp/src/plasma/extension.h +++ /dev/null @@ -1,50 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_EXTENSION_H -#define PLASMA_EXTENSION_H - -#undef _XOPEN_SOURCE -#undef _POSIX_C_SOURCE -#include - -#include "bytesobject.h" // NOLINT - -#include "plasma/client.h" -#include "plasma/common.h" - -static int PyObjectToPlasmaClient(PyObject* object, PlasmaClient** client) { - if (PyCapsule_IsValid(object, "plasma")) { - *client = reinterpret_cast(PyCapsule_GetPointer(object, "plasma")); - return 1; - } else { - PyErr_SetString(PyExc_TypeError, "must be a 'plasma' capsule"); - return 0; - } -} - -int PyStringToUniqueID(PyObject* object, ObjectID* object_id) { - if (PyBytes_Check(object)) { - memcpy(object_id, PyBytes_AsString(object), sizeof(ObjectID)); - return 1; - } else { - PyErr_SetString(PyExc_TypeError, "must be a 20 character string"); - return 0; - } -} - -#endif // PLASMA_EXTENSION_H diff --git a/cpp/src/plasma/fling.cc b/cpp/src/plasma/fling.cc deleted file mode 100644 index 79da4f43a192a..0000000000000 --- a/cpp/src/plasma/fling.cc +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2013 Sharvil Nanavati -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "plasma/fling.h" - -#include - -void init_msg(struct msghdr* msg, struct iovec* iov, char* buf, size_t buf_len) { - iov->iov_base = buf; - iov->iov_len = 1; - - msg->msg_iov = iov; - msg->msg_iovlen = 1; - msg->msg_control = buf; - msg->msg_controllen = buf_len; - msg->msg_name = NULL; - msg->msg_namelen = 0; -} - -int send_fd(int conn, int fd) { - struct msghdr msg; - struct iovec iov; - char buf[CMSG_SPACE(sizeof(int))]; - memset(&buf, 0, CMSG_SPACE(sizeof(int))); - - init_msg(&msg, &iov, buf, sizeof(buf)); - - struct cmsghdr* header = CMSG_FIRSTHDR(&msg); - header->cmsg_level = SOL_SOCKET; - header->cmsg_type = SCM_RIGHTS; - header->cmsg_len = CMSG_LEN(sizeof(int)); - *reinterpret_cast(CMSG_DATA(header)) = fd; - - // Send file descriptor. - ssize_t r = sendmsg(conn, &msg, 0); - if (r >= 0) { - return 0; - } else { - return static_cast(r); - } -} - -int recv_fd(int conn) { - struct msghdr msg; - struct iovec iov; - char buf[CMSG_SPACE(sizeof(int))]; - init_msg(&msg, &iov, buf, sizeof(buf)); - - if (recvmsg(conn, &msg, 0) == -1) return -1; - - int found_fd = -1; - int oh_noes = 0; - for (struct cmsghdr* header = CMSG_FIRSTHDR(&msg); header != NULL; - header = CMSG_NXTHDR(&msg, header)) - if (header->cmsg_level == SOL_SOCKET && header->cmsg_type == SCM_RIGHTS) { - ssize_t count = - (header->cmsg_len - (CMSG_DATA(header) - (unsigned char*)header)) / sizeof(int); - for (int i = 0; i < count; ++i) { - int fd = (reinterpret_cast(CMSG_DATA(header)))[i]; - if (found_fd == -1) { - found_fd = fd; - } else { - close(fd); - oh_noes = 1; - } - } - } - - // The sender sent us more than one file descriptor. We've closed - // them all to prevent fd leaks but notify the caller that we got - // a bad message. - if (oh_noes) { - close(found_fd); - errno = EBADMSG; - return -1; - } - - return found_fd; -} diff --git a/cpp/src/plasma/fling.h b/cpp/src/plasma/fling.h deleted file mode 100644 index 78ac9d17f26fb..0000000000000 --- a/cpp/src/plasma/fling.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013 Sharvil Nanavati -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// FLING: Exchanging file descriptors over sockets -// -// This is a little library for sending file descriptors over a socket -// between processes. The reason for doing that (as opposed to using -// filenames to share the files) is so (a) no files remain in the -// filesystem after all the processes terminate, (b) to make sure that -// there are no name collisions and (c) to be able to control who has -// access to the data. -// -// Most of the code is from https://github.com/sharvil/flingfd - -#include -#include -#include -#include -#include - -// This is neccessary for Mac OS X, see http://www.apuebook.com/faqs2e.html -// (10). -#if !defined(CMSG_SPACE) && !defined(CMSG_LEN) -#define CMSG_SPACE(len) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + __DARWIN_ALIGN32(len)) -#define CMSG_LEN(len) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + (len)) -#endif - -void init_msg(struct msghdr* msg, struct iovec* iov, char* buf, size_t buf_len); - -// Send a file descriptor over a unix domain socket. -// -// @param conn Unix domain socket to send the file descriptor over. -// @param fd File descriptor to send over. -// @return Status code which is < 0 on failure. -int send_fd(int conn, int fd); - -// Receive a file descriptor over a unix domain socket. -// -// @param conn Unix domain socket to receive the file descriptor from. -// @return File descriptor or a value < 0 on failure. -int recv_fd(int conn); diff --git a/cpp/src/plasma/format/.gitignore b/cpp/src/plasma/format/.gitignore deleted file mode 100644 index b2ddb055dcbc6..0000000000000 --- a/cpp/src/plasma/format/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*_generated.h diff --git a/cpp/src/plasma/format/common.fbs b/cpp/src/plasma/format/common.fbs deleted file mode 100644 index 4d7d2852aec3d..0000000000000 --- a/cpp/src/plasma/format/common.fbs +++ /dev/null @@ -1,34 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Object information data structure. -table ObjectInfo { - // Object ID of this object. - object_id: string; - // Number of bytes the content of this object occupies in memory. - data_size: long; - // Number of bytes the metadata of this object occupies in memory. - metadata_size: long; - // Unix epoch of when this object was created. - create_time: long; - // How long creation of this object took. - construct_duration: long; - // Hash of the object content. - digest: string; - // Specifies if this object was deleted or added. - is_deletion: bool; -} diff --git a/cpp/src/plasma/format/plasma.fbs b/cpp/src/plasma/format/plasma.fbs deleted file mode 100644 index 23782ade539d4..0000000000000 --- a/cpp/src/plasma/format/plasma.fbs +++ /dev/null @@ -1,291 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Plasma protocol specification - -enum MessageType:int { - // Create a new object. - PlasmaCreateRequest = 1, - PlasmaCreateReply, - // Seal an object. - PlasmaSealRequest, - PlasmaSealReply, - // Get an object that is stored on the local Plasma store. - PlasmaGetRequest, - PlasmaGetReply, - // Release an object. - PlasmaReleaseRequest, - PlasmaReleaseReply, - // Delete an object. - PlasmaDeleteRequest, - PlasmaDeleteReply, - // Get status of an object. - PlasmaStatusRequest, - PlasmaStatusReply, - // See if the store contains an object (will be deprecated). - PlasmaContainsRequest, - PlasmaContainsReply, - // Get information for a newly connecting client. - PlasmaConnectRequest, - PlasmaConnectReply, - // Make room for new objects in the plasma store. - PlasmaEvictRequest, - PlasmaEvictReply, - // Fetch objects from remote Plasma stores. - PlasmaFetchRequest, - // Wait for objects to be ready either from local or remote Plasma stores. - PlasmaWaitRequest, - PlasmaWaitReply, - // Subscribe to a list of objects or to all objects. - PlasmaSubscribeRequest, - // Unsubscribe. - PlasmaUnsubscribeRequest, - // Sending and receiving data. - // PlasmaDataRequest initiates sending the data, there will be one - // such message per data transfer. - PlasmaDataRequest, - // PlasmaDataReply contains the actual data and is sent back to the - // object store that requested the data. For each transfer, multiple - // reply messages get sent. Each one contains a fixed number of bytes. - PlasmaDataReply, - // Object notifications. - PlasmaNotification -} - -enum PlasmaError:int { - // Operation was successful. - OK, - // Trying to create an object that already exists. - ObjectExists, - // Trying to access an object that doesn't exist. - ObjectNonexistent, - // Trying to create an object but there isn't enough space in the store. - OutOfMemory -} - -// Plasma store messages - -struct PlasmaObjectSpec { - // Index of the memory segment (= memory mapped file) that - // this object is allocated in. - segment_index: int; - // Size in bytes of this segment (needed to call mmap). - mmap_size: ulong; - // The offset in bytes in the memory mapped file of the data. - data_offset: ulong; - // The size in bytes of the data. - data_size: ulong; - // The offset in bytes in the memory mapped file of the metadata. - metadata_offset: ulong; - // The size in bytes of the metadata. - metadata_size: ulong; -} - -table PlasmaCreateRequest { - // ID of the object to be created. - object_id: string; - // The size of the object's data in bytes. - data_size: ulong; - // The size of the object's metadata in bytes. - metadata_size: ulong; -} - -table PlasmaCreateReply { - // ID of the object that was created. - object_id: string; - // The object that is returned with this reply. - plasma_object: PlasmaObjectSpec; - // Error that occurred for this call. - error: PlasmaError; -} - -table PlasmaSealRequest { - // ID of the object to be sealed. - object_id: string; - // Hash of the object data. - digest: string; -} - -table PlasmaSealReply { - // ID of the object that was sealed. - object_id: string; - // Error code. - error: PlasmaError; -} - -table PlasmaGetRequest { - // IDs of the objects stored at local Plasma store we are getting. - object_ids: [string]; - // The number of milliseconds before the request should timeout. - timeout_ms: long; -} - -table PlasmaGetReply { - // IDs of the objects being returned. - // This number can be smaller than the number of requested - // objects if not all requested objects are stored and sealed - // in the local Plasma store. - object_ids: [string]; - // Plasma object information, in the same order as their IDs. - plasma_objects: [PlasmaObjectSpec]; - // The number of elements in both object_ids and plasma_objects arrays must agree. -} - -table PlasmaReleaseRequest { - // ID of the object to be released. - object_id: string; -} - -table PlasmaReleaseReply { - // ID of the object that was released. - object_id: string; - // Error code. - error: PlasmaError; -} - -table PlasmaDeleteRequest { - // ID of the object to be deleted. - object_id: string; -} - -table PlasmaDeleteReply { - // ID of the object that was deleted. - object_id: string; - // Error code. - error: PlasmaError; -} - -table PlasmaStatusRequest { - // IDs of the objects stored at local Plasma store we request the status of. - object_ids: [string]; -} - -enum ObjectStatus:int { - // Object is stored in the local Plasma Store. - Local = 1, - // Object is stored on a remote Plasma store, and it is not stored on the - // local Plasma Store. - Remote, - // Object is not stored in the system. - Nonexistent, - // Object is currently transferred from a remote Plasma store the the local - // Plasma Store. - Transfer -} - -table PlasmaStatusReply { - // IDs of the objects being returned. - object_ids: [string]; - // Status of the object. - status: [ObjectStatus]; -} - -// PlasmaContains is a subset of PlasmaStatus which does not -// involve the plasma manager, only the store. We should consider -// unifying them in the future and deprecating PlasmaContains. - -table PlasmaContainsRequest { - // ID of the object we are querying. - object_id: string; -} - -table PlasmaContainsReply { - // ID of the object we are querying. - object_id: string; - // 1 if the object is in the store and 0 otherwise. - has_object: int; -} - -// PlasmaConnect is used by a plasma client the first time it connects with the -// store. This is not really necessary, but is used to get some information -// about the store such as its memory capacity. - -table PlasmaConnectRequest { -} - -table PlasmaConnectReply { - // The memory capacity of the store. - memory_capacity: long; -} - -table PlasmaEvictRequest { - // Number of bytes that shall be freed. - num_bytes: ulong; -} - -table PlasmaEvictReply { - // Number of bytes that have been freed. - num_bytes: ulong; -} - -table PlasmaFetchRequest { - // IDs of objects to be gotten. - object_ids: [string]; -} - -table ObjectRequestSpec { - // ID of the object. - object_id: string; - // The type of the object. This specifies whether we - // will be waiting for an object store in the local or - // global Plasma store. - type: int; -} - -table PlasmaWaitRequest { - // Array of object requests whose status we are asking for. - object_requests: [ObjectRequestSpec]; - // Number of objects expected to be returned, if available. - num_ready_objects: int; - // timeout - timeout: long; -} - -table ObjectReply { - // ID of the object. - object_id: string; - // The object status. This specifies where the object is stored. - status: int; -} - -table PlasmaWaitReply { - // Array of object requests being returned. - object_requests: [ObjectReply]; - // Number of objects expected to be returned, if available. - num_ready_objects: int; -} - -table PlasmaSubscribeRequest { -} - -table PlasmaDataRequest { - // ID of the object that is requested. - object_id: string; - // The host address where the data shall be sent to. - address: string; - // The port of the manager the data shall be sent to. - port: int; -} - -table PlasmaDataReply { - // ID of the object that will be sent. - object_id: string; - // Size of the object data in bytes. - object_size: ulong; - // Size of the metadata in bytes. - metadata_size: ulong; -} diff --git a/cpp/src/plasma/io.cc b/cpp/src/plasma/io.cc deleted file mode 100644 index 5875ebb7ae611..0000000000000 --- a/cpp/src/plasma/io.cc +++ /dev/null @@ -1,212 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/io.h" - -#include "plasma/common.h" - -using arrow::Status; - -/* Number of times we try binding to a socket. */ -#define NUM_BIND_ATTEMPTS 5 -#define BIND_TIMEOUT_MS 100 - -/* Number of times we try connecting to a socket. */ -#define NUM_CONNECT_ATTEMPTS 50 -#define CONNECT_TIMEOUT_MS 100 - -Status WriteBytes(int fd, uint8_t* cursor, size_t length) { - ssize_t nbytes = 0; - size_t bytesleft = length; - size_t offset = 0; - while (bytesleft > 0) { - /* While we haven't written the whole message, write to the file descriptor, - * advance the cursor, and decrease the amount left to write. */ - nbytes = write(fd, cursor + offset, bytesleft); - if (nbytes < 0) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; } - return Status::IOError(std::string(strerror(errno))); - } else if (nbytes == 0) { - return Status::IOError("Encountered unexpected EOF"); - } - ARROW_CHECK(nbytes > 0); - bytesleft -= nbytes; - offset += nbytes; - } - - return Status::OK(); -} - -Status WriteMessage(int fd, int64_t type, int64_t length, uint8_t* bytes) { - int64_t version = PLASMA_PROTOCOL_VERSION; - RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast(&version), sizeof(version))); - RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast(&type), sizeof(type))); - RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast(&length), sizeof(length))); - return WriteBytes(fd, bytes, length * sizeof(char)); -} - -Status ReadBytes(int fd, uint8_t* cursor, size_t length) { - ssize_t nbytes = 0; - /* Termination condition: EOF or read 'length' bytes total. */ - size_t bytesleft = length; - size_t offset = 0; - while (bytesleft > 0) { - nbytes = read(fd, cursor + offset, bytesleft); - if (nbytes < 0) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; } - return Status::IOError(std::string(strerror(errno))); - } else if (0 == nbytes) { - return Status::IOError("Encountered unexpected EOF"); - } - ARROW_CHECK(nbytes > 0); - bytesleft -= nbytes; - offset += nbytes; - } - - return Status::OK(); -} - -Status ReadMessage(int fd, int64_t* type, std::vector* buffer) { - int64_t version; - RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(&version), sizeof(version)), - *type = DISCONNECT_CLIENT); - ARROW_CHECK(version == PLASMA_PROTOCOL_VERSION) << "version = " << version; - size_t length; - RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(type), sizeof(*type)), - *type = DISCONNECT_CLIENT); - RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(&length), sizeof(length)), - *type = DISCONNECT_CLIENT); - if (length > buffer->size()) { buffer->resize(length); } - RETURN_NOT_OK_ELSE(ReadBytes(fd, buffer->data(), length), *type = DISCONNECT_CLIENT); - return Status::OK(); -} - -int bind_ipc_sock(const std::string& pathname, bool shall_listen) { - struct sockaddr_un socket_address; - int socket_fd = socket(AF_UNIX, SOCK_STREAM, 0); - if (socket_fd < 0) { - ARROW_LOG(ERROR) << "socket() failed for pathname " << pathname; - return -1; - } - /* Tell the system to allow the port to be reused. */ - int on = 1; - if (setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&on), - sizeof(on)) < 0) { - ARROW_LOG(ERROR) << "setsockopt failed for pathname " << pathname; - close(socket_fd); - return -1; - } - - unlink(pathname.c_str()); - memset(&socket_address, 0, sizeof(socket_address)); - socket_address.sun_family = AF_UNIX; - if (pathname.size() + 1 > sizeof(socket_address.sun_path)) { - ARROW_LOG(ERROR) << "Socket pathname is too long."; - close(socket_fd); - return -1; - } - strncpy(socket_address.sun_path, pathname.c_str(), pathname.size() + 1); - - if (bind(socket_fd, (struct sockaddr*)&socket_address, sizeof(socket_address)) != 0) { - ARROW_LOG(ERROR) << "Bind failed for pathname " << pathname; - close(socket_fd); - return -1; - } - if (shall_listen && listen(socket_fd, 128) == -1) { - ARROW_LOG(ERROR) << "Could not listen to socket " << pathname; - close(socket_fd); - return -1; - } - return socket_fd; -} - -int connect_ipc_sock_retry( - const std::string& pathname, int num_retries, int64_t timeout) { - /* Pick the default values if the user did not specify. */ - if (num_retries < 0) { num_retries = NUM_CONNECT_ATTEMPTS; } - if (timeout < 0) { timeout = CONNECT_TIMEOUT_MS; } - - int fd = -1; - for (int num_attempts = 0; num_attempts < num_retries; ++num_attempts) { - fd = connect_ipc_sock(pathname); - if (fd >= 0) { break; } - if (num_attempts == 0) { - ARROW_LOG(ERROR) << "Connection to socket failed for pathname " << pathname; - } - /* Sleep for timeout milliseconds. */ - usleep(static_cast(timeout * 1000)); - } - /* If we could not connect to the socket, exit. */ - if (fd == -1) { ARROW_LOG(FATAL) << "Could not connect to socket " << pathname; } - return fd; -} - -int connect_ipc_sock(const std::string& pathname) { - struct sockaddr_un socket_address; - int socket_fd; - - socket_fd = socket(AF_UNIX, SOCK_STREAM, 0); - if (socket_fd < 0) { - ARROW_LOG(ERROR) << "socket() failed for pathname " << pathname; - return -1; - } - - memset(&socket_address, 0, sizeof(socket_address)); - socket_address.sun_family = AF_UNIX; - if (pathname.size() + 1 > sizeof(socket_address.sun_path)) { - ARROW_LOG(ERROR) << "Socket pathname is too long."; - return -1; - } - strncpy(socket_address.sun_path, pathname.c_str(), pathname.size() + 1); - - if (connect(socket_fd, (struct sockaddr*)&socket_address, sizeof(socket_address)) != - 0) { - close(socket_fd); - return -1; - } - - return socket_fd; -} - -int AcceptClient(int socket_fd) { - int client_fd = accept(socket_fd, NULL, NULL); - if (client_fd < 0) { - ARROW_LOG(ERROR) << "Error reading from socket."; - return -1; - } - return client_fd; -} - -uint8_t* read_message_async(int sock) { - int64_t size; - Status s = ReadBytes(sock, reinterpret_cast(&size), sizeof(int64_t)); - if (!s.ok()) { - /* The other side has closed the socket. */ - ARROW_LOG(DEBUG) << "Socket has been closed, or some other error has occurred."; - close(sock); - return NULL; - } - uint8_t* message = reinterpret_cast(malloc(size)); - s = ReadBytes(sock, message, size); - if (!s.ok()) { - /* The other side has closed the socket. */ - ARROW_LOG(DEBUG) << "Socket has been closed, or some other error has occurred."; - close(sock); - return NULL; - } - return message; -} diff --git a/cpp/src/plasma/io.h b/cpp/src/plasma/io.h deleted file mode 100644 index 43c3fb535497f..0000000000000 --- a/cpp/src/plasma/io.h +++ /dev/null @@ -1,55 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_IO_H -#define PLASMA_IO_H - -#include -#include -#include -#include - -#include -#include - -#include "arrow/status.h" - -// TODO(pcm): Replace our own custom message header (message type, -// message length, plasma protocol verion) with one that is serialized -// using flatbuffers. -#define PLASMA_PROTOCOL_VERSION 0x0000000000000000 -#define DISCONNECT_CLIENT 0 - -arrow::Status WriteBytes(int fd, uint8_t* cursor, size_t length); - -arrow::Status WriteMessage(int fd, int64_t type, int64_t length, uint8_t* bytes); - -arrow::Status ReadBytes(int fd, uint8_t* cursor, size_t length); - -arrow::Status ReadMessage(int fd, int64_t* type, std::vector* buffer); - -int bind_ipc_sock(const std::string& pathname, bool shall_listen); - -int connect_ipc_sock(const std::string& pathname); - -int connect_ipc_sock_retry(const std::string& pathname, int num_retries, int64_t timeout); - -int AcceptClient(int socket_fd); - -uint8_t* read_message_async(int sock); - -#endif // PLASMA_IO_H diff --git a/cpp/src/plasma/malloc.cc b/cpp/src/plasma/malloc.cc deleted file mode 100644 index 97c9a16c0c0bd..0000000000000 --- a/cpp/src/plasma/malloc.cc +++ /dev/null @@ -1,178 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/malloc.h" - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "plasma/common.h" - -extern "C" { -void* fake_mmap(size_t); -int fake_munmap(void*, int64_t); - -#define MMAP(s) fake_mmap(s) -#define MUNMAP(a, s) fake_munmap(a, s) -#define DIRECT_MMAP(s) fake_mmap(s) -#define DIRECT_MUNMAP(a, s) fake_munmap(a, s) -#define USE_DL_PREFIX -#define HAVE_MORECORE 0 -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#define DEFAULT_GRANULARITY ((size_t)128U * 1024U) - -#include "thirdparty/dlmalloc.c" // NOLINT - -#undef MMAP -#undef MUNMAP -#undef DIRECT_MMAP -#undef DIRECT_MUNMAP -#undef USE_DL_PREFIX -#undef HAVE_MORECORE -#undef DEFAULT_GRANULARITY -} - -struct mmap_record { - int fd; - int64_t size; -}; - -namespace { - -/** Hashtable that contains one entry per segment that we got from the OS - * via mmap. Associates the address of that segment with its file descriptor - * and size. */ -std::unordered_map mmap_records; - -} /* namespace */ - -constexpr int GRANULARITY_MULTIPLIER = 2; - -static void* pointer_advance(void* p, ptrdiff_t n) { - return (unsigned char*)p + n; -} - -static void* pointer_retreat(void* p, ptrdiff_t n) { - return (unsigned char*)p - n; -} - -static ptrdiff_t pointer_distance(void const* pfrom, void const* pto) { - return (unsigned char const*)pto - (unsigned char const*)pfrom; -} - -/* Create a buffer. This is creating a temporary file and then - * immediately unlinking it so we do not leave traces in the system. */ -int create_buffer(int64_t size) { - int fd; -#ifdef _WIN32 - if (!CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, - (DWORD)((uint64_t)size >> (CHAR_BIT * sizeof(DWORD))), (DWORD)(uint64_t)size, - NULL)) { - fd = -1; - } -#else -#ifdef __linux__ - constexpr char file_template[] = "/dev/shm/plasmaXXXXXX"; -#else - constexpr char file_template[] = "/tmp/plasmaXXXXXX"; -#endif - char file_name[32]; - strncpy(file_name, file_template, 32); - fd = mkstemp(file_name); - if (fd < 0) return -1; - FILE* file = fdopen(fd, "a+"); - if (!file) { - close(fd); - return -1; - } - if (unlink(file_name) != 0) { - ARROW_LOG(FATAL) << "unlink error"; - return -1; - } - if (ftruncate(fd, (off_t)size) != 0) { - ARROW_LOG(FATAL) << "ftruncate error"; - return -1; - } -#endif - return fd; -} - -void* fake_mmap(size_t size) { - /* Add sizeof(size_t) so that the returned pointer is deliberately not - * page-aligned. This ensures that the segments of memory returned by - * fake_mmap are never contiguous. */ - size += sizeof(size_t); - - int fd = create_buffer(size); - ARROW_CHECK(fd >= 0) << "Failed to create buffer during mmap"; - void* pointer = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if (pointer == MAP_FAILED) { return pointer; } - - /* Increase dlmalloc's allocation granularity directly. */ - mparams.granularity *= GRANULARITY_MULTIPLIER; - - mmap_record& record = mmap_records[pointer]; - record.fd = fd; - record.size = size; - - /* We lie to dlmalloc about where mapped memory actually lives. */ - pointer = pointer_advance(pointer, sizeof(size_t)); - ARROW_LOG(DEBUG) << pointer << " = fake_mmap(" << size << ")"; - return pointer; -} - -int fake_munmap(void* addr, int64_t size) { - ARROW_LOG(DEBUG) << "fake_munmap(" << addr << ", " << size << ")"; - addr = pointer_retreat(addr, sizeof(size_t)); - size += sizeof(size_t); - - auto entry = mmap_records.find(addr); - - if (entry == mmap_records.end() || entry->second.size != size) { - /* Reject requests to munmap that don't directly match previous - * calls to mmap, to prevent dlmalloc from trimming. */ - return -1; - } - - int r = munmap(addr, size); - if (r == 0) { close(entry->second.fd); } - - mmap_records.erase(entry); - return r; -} - -void get_malloc_mapinfo(void* addr, int* fd, int64_t* map_size, ptrdiff_t* offset) { - /* TODO(rshin): Implement a more efficient search through mmap_records. */ - for (const auto& entry : mmap_records) { - if (addr >= entry.first && addr < pointer_advance(entry.first, entry.second.size)) { - *fd = entry.second.fd; - *map_size = entry.second.size; - *offset = pointer_distance(entry.first, addr); - return; - } - } - *fd = -1; - *map_size = 0; - *offset = 0; -} diff --git a/cpp/src/plasma/malloc.h b/cpp/src/plasma/malloc.h deleted file mode 100644 index b4af2c826b5c9..0000000000000 --- a/cpp/src/plasma/malloc.h +++ /dev/null @@ -1,26 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_MALLOC_H -#define PLASMA_MALLOC_H - -#include -#include - -void get_malloc_mapinfo(void* addr, int* fd, int64_t* map_length, ptrdiff_t* offset); - -#endif // MALLOC_H diff --git a/cpp/src/plasma/plasma.cc b/cpp/src/plasma/plasma.cc deleted file mode 100644 index 559d8e7f2a65e..0000000000000 --- a/cpp/src/plasma/plasma.cc +++ /dev/null @@ -1,64 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/plasma.h" - -#include -#include -#include - -#include "plasma/common.h" -#include "plasma/protocol.h" - -int warn_if_sigpipe(int status, int client_sock) { - if (status >= 0) { return 0; } - if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) { - ARROW_LOG(WARNING) << "Received SIGPIPE, BAD FILE DESCRIPTOR, or ECONNRESET when " - "sending a message to client on fd " - << client_sock << ". The client on the other end may " - "have hung up."; - return errno; - } - ARROW_LOG(FATAL) << "Failed to write message to client on fd " << client_sock << "."; - return -1; // This is never reached. -} - -/** - * This will create a new ObjectInfo buffer. The first sizeof(int64_t) bytes - * of this buffer are the length of the remaining message and the - * remaining message is a serialized version of the object info. - * - * @param object_info The object info to be serialized - * @return The object info buffer. It is the caller's responsibility to free - * this buffer with "delete" after it has been used. - */ -uint8_t* create_object_info_buffer(ObjectInfoT* object_info) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreateObjectInfo(fbb, object_info); - fbb.Finish(message); - uint8_t* notification = new uint8_t[sizeof(int64_t) + fbb.GetSize()]; - *(reinterpret_cast(notification)) = fbb.GetSize(); - memcpy(notification + sizeof(int64_t), fbb.GetBufferPointer(), fbb.GetSize()); - return notification; -} - -ObjectTableEntry* get_object_table_entry( - PlasmaStoreInfo* store_info, const ObjectID& object_id) { - auto it = store_info->objects.find(object_id); - if (it == store_info->objects.end()) { return NULL; } - return it->second.get(); -} diff --git a/cpp/src/plasma/plasma.h b/cpp/src/plasma/plasma.h deleted file mode 100644 index 275d0c7a41687..0000000000000 --- a/cpp/src/plasma/plasma.h +++ /dev/null @@ -1,191 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_PLASMA_H -#define PLASMA_PLASMA_H - -#include -#include -#include -#include -#include -#include -#include -#include // pid_t - -#include -#include - -#include "arrow/status.h" -#include "arrow/util/logging.h" -#include "format/common_generated.h" -#include "plasma/common.h" - -#define HANDLE_SIGPIPE(s, fd_) \ - do { \ - Status _s = (s); \ - if (!_s.ok()) { \ - if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) { \ - ARROW_LOG(WARNING) \ - << "Received SIGPIPE, BAD FILE DESCRIPTOR, or ECONNRESET when " \ - "sending a message to client on fd " \ - << fd_ << ". " \ - "The client on the other end may have hung up."; \ - } else { \ - return _s; \ - } \ - } \ - } while (0); - -/// Allocation granularity used in plasma for object allocation. -#define BLOCK_SIZE 64 - -/// Size of object hash digests. -constexpr int64_t kDigestSize = sizeof(uint64_t); - -struct Client; - -/// Object request data structure. Used in the plasma_wait_for_objects() -/// argument. -typedef struct { - /// The ID of the requested object. If ID_NIL request any object. - ObjectID object_id; - /// Request associated to the object. It can take one of the following values: - /// - PLASMA_QUERY_LOCAL: return if or when the object is available in the - /// local Plasma Store. - /// - PLASMA_QUERY_ANYWHERE: return if or when the object is available in - /// the system (i.e., either in the local or a remote Plasma Store). - int type; - /// Object status. Same as the status returned by plasma_status() function - /// call. This is filled in by plasma_wait_for_objects1(): - /// - ObjectStatus_Local: object is ready at the local Plasma Store. - /// - ObjectStatus_Remote: object is ready at a remote Plasma Store. - /// - ObjectStatus_Nonexistent: object does not exist in the system. - /// - PLASMA_CLIENT_IN_TRANSFER, if the object is currently being scheduled - /// for being transferred or it is transferring. - int status; -} ObjectRequest; - -/// Mapping from object IDs to type and status of the request. -typedef std::unordered_map ObjectRequestMap; - -/// Handle to access memory mapped file and map it into client address space. -typedef struct { - /// The file descriptor of the memory mapped file in the store. It is used as - /// a unique identifier of the file in the client to look up the corresponding - /// file descriptor on the client's side. - int store_fd; - /// The size in bytes of the memory mapped file. - int64_t mmap_size; -} object_handle; - -// TODO(pcm): Replace this by the flatbuffers message PlasmaObjectSpec. -typedef struct { - /// Handle for memory mapped file the object is stored in. - object_handle handle; - /// The offset in bytes in the memory mapped file of the data. - ptrdiff_t data_offset; - /// The offset in bytes in the memory mapped file of the metadata. - ptrdiff_t metadata_offset; - /// The size in bytes of the data. - int64_t data_size; - /// The size in bytes of the metadata. - int64_t metadata_size; -} PlasmaObject; - -typedef enum { - /// Object was created but not sealed in the local Plasma Store. - PLASMA_CREATED = 1, - /// Object is sealed and stored in the local Plasma Store. - PLASMA_SEALED -} object_state; - -typedef enum { - /// The object was not found. - OBJECT_NOT_FOUND = 0, - /// The object was found. - OBJECT_FOUND = 1 -} object_status; - -typedef enum { - /// Query for object in the local plasma store. - PLASMA_QUERY_LOCAL = 1, - /// Query for object in the local plasma store or in a remote plasma store. - PLASMA_QUERY_ANYWHERE -} object_request_type; - -/// This type is used by the Plasma store. It is here because it is exposed to -/// the eviction policy. -struct ObjectTableEntry { - /// Object id of this object. - ObjectID object_id; - /// Object info like size, creation time and owner. - ObjectInfoT info; - /// Memory mapped file containing the object. - int fd; - /// Size of the underlying map. - int64_t map_size; - /// Offset from the base of the mmap. - ptrdiff_t offset; - /// Pointer to the object data. Needed to free the object. - uint8_t* pointer; - /// Set of clients currently using this object. - std::unordered_set clients; - /// The state of the object, e.g., whether it is open or sealed. - object_state state; - /// The digest of the object. Used to see if two objects are the same. - unsigned char digest[kDigestSize]; -}; - -/// The plasma store information that is exposed to the eviction policy. -struct PlasmaStoreInfo { - /// Objects that are in the Plasma store. - std::unordered_map, UniqueIDHasher> objects; - /// The amount of memory (in bytes) that we allow to be allocated in the - /// store. - int64_t memory_capacity; -}; - -/// Get an entry from the object table and return NULL if the object_id -/// is not present. -/// -/// @param store_info The PlasmaStoreInfo that contains the object table. -/// @param object_id The object_id of the entry we are looking for. -/// @return The entry associated with the object_id or NULL if the object_id -/// is not present. -ObjectTableEntry* get_object_table_entry( - PlasmaStoreInfo* store_info, const ObjectID& object_id); - -/// Print a warning if the status is less than zero. This should be used to check -/// the success of messages sent to plasma clients. We print a warning instead of -/// failing because the plasma clients are allowed to die. This is used to handle -/// situations where the store writes to a client file descriptor, and the client -/// may already have disconnected. If we have processed the disconnection and -/// closed the file descriptor, we should get a BAD FILE DESCRIPTOR error. If we -/// have not, then we should get a SIGPIPE. If we write to a TCP socket that -/// isn't connected yet, then we should get an ECONNRESET. -/// -/// @param status The status to check. If it is less less than zero, we will -/// print a warning. -/// @param client_sock The client socket. This is just used to print some extra -/// information. -/// @return The errno set. -int warn_if_sigpipe(int status, int client_sock); - -uint8_t* create_object_info_buffer(ObjectInfoT* object_info); - -#endif // PLASMA_PLASMA_H diff --git a/cpp/src/plasma/protocol.cc b/cpp/src/plasma/protocol.cc deleted file mode 100644 index 246aa29736056..0000000000000 --- a/cpp/src/plasma/protocol.cc +++ /dev/null @@ -1,502 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/protocol.h" - -#include "flatbuffers/flatbuffers.h" -#include "format/plasma_generated.h" - -#include "plasma/common.h" -#include "plasma/io.h" - -using flatbuffers::uoffset_t; - -flatbuffers::Offset>> -to_flatbuffer(flatbuffers::FlatBufferBuilder* fbb, const ObjectID* object_ids, - int64_t num_objects) { - std::vector> results; - for (int64_t i = 0; i < num_objects; i++) { - results.push_back(fbb->CreateString(object_ids[i].binary())); - } - return fbb->CreateVector(results); -} - -Status PlasmaReceive(int sock, int64_t message_type, std::vector* buffer) { - int64_t type; - RETURN_NOT_OK(ReadMessage(sock, &type, buffer)); - ARROW_CHECK(type == message_type) << "type = " << type - << ", message_type = " << message_type; - return Status::OK(); -} - -template -Status PlasmaSend(int sock, int64_t message_type, flatbuffers::FlatBufferBuilder* fbb, - const Message& message) { - fbb->Finish(message); - return WriteMessage(sock, message_type, fbb->GetSize(), fbb->GetBufferPointer()); -} - -// Create messages. - -Status SendCreateRequest( - int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaCreateRequest( - fbb, fbb.CreateString(object_id.binary()), data_size, metadata_size); - return PlasmaSend(sock, MessageType_PlasmaCreateRequest, &fbb, message); -} - -Status ReadCreateRequest( - uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *data_size = message->data_size(); - *metadata_size = message->metadata_size(); - *object_id = ObjectID::from_binary(message->object_id()->str()); - return Status::OK(); -} - -Status SendCreateReply( - int sock, ObjectID object_id, PlasmaObject* object, int error_code) { - flatbuffers::FlatBufferBuilder fbb; - PlasmaObjectSpec plasma_object(object->handle.store_fd, object->handle.mmap_size, - object->data_offset, object->data_size, object->metadata_offset, - object->metadata_size); - auto message = CreatePlasmaCreateReply( - fbb, fbb.CreateString(object_id.binary()), &plasma_object, (PlasmaError)error_code); - return PlasmaSend(sock, MessageType_PlasmaCreateReply, &fbb, message); -} - -Status ReadCreateReply(uint8_t* data, ObjectID* object_id, PlasmaObject* object) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - object->handle.store_fd = message->plasma_object()->segment_index(); - object->handle.mmap_size = message->plasma_object()->mmap_size(); - object->data_offset = message->plasma_object()->data_offset(); - object->data_size = message->plasma_object()->data_size(); - object->metadata_offset = message->plasma_object()->metadata_offset(); - object->metadata_size = message->plasma_object()->metadata_size(); - return plasma_error_status(message->error()); -} - -// Seal messages. - -Status SendSealRequest(int sock, ObjectID object_id, unsigned char* digest) { - flatbuffers::FlatBufferBuilder fbb; - auto digest_string = fbb.CreateString(reinterpret_cast(digest), kDigestSize); - auto message = - CreatePlasmaSealRequest(fbb, fbb.CreateString(object_id.binary()), digest_string); - return PlasmaSend(sock, MessageType_PlasmaSealRequest, &fbb, message); -} - -Status ReadSealRequest(uint8_t* data, ObjectID* object_id, unsigned char* digest) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - ARROW_CHECK(message->digest()->size() == kDigestSize); - memcpy(digest, message->digest()->data(), kDigestSize); - return Status::OK(); -} - -Status SendSealReply(int sock, ObjectID object_id, int error) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaSealReply( - fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); - return PlasmaSend(sock, MessageType_PlasmaSealReply, &fbb, message); -} - -Status ReadSealReply(uint8_t* data, ObjectID* object_id) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - return plasma_error_status(message->error()); -} - -// Release messages. - -Status SendReleaseRequest(int sock, ObjectID object_id) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaSealRequest(fbb, fbb.CreateString(object_id.binary())); - return PlasmaSend(sock, MessageType_PlasmaReleaseRequest, &fbb, message); -} - -Status ReadReleaseRequest(uint8_t* data, ObjectID* object_id) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - return Status::OK(); -} - -Status SendReleaseReply(int sock, ObjectID object_id, int error) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaReleaseReply( - fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); - return PlasmaSend(sock, MessageType_PlasmaReleaseReply, &fbb, message); -} - -Status ReadReleaseReply(uint8_t* data, ObjectID* object_id) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - return plasma_error_status(message->error()); -} - -// Delete messages. - -Status SendDeleteRequest(int sock, ObjectID object_id) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaDeleteRequest(fbb, fbb.CreateString(object_id.binary())); - return PlasmaSend(sock, MessageType_PlasmaDeleteRequest, &fbb, message); -} - -Status ReadDeleteRequest(uint8_t* data, ObjectID* object_id) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - return Status::OK(); -} - -Status SendDeleteReply(int sock, ObjectID object_id, int error) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaDeleteReply( - fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); - return PlasmaSend(sock, MessageType_PlasmaDeleteReply, &fbb, message); -} - -Status ReadDeleteReply(uint8_t* data, ObjectID* object_id) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - return plasma_error_status(message->error()); -} - -// Satus messages. - -Status SendStatusRequest(int sock, const ObjectID* object_ids, int64_t num_objects) { - flatbuffers::FlatBufferBuilder fbb; - auto message = - CreatePlasmaStatusRequest(fbb, to_flatbuffer(&fbb, object_ids, num_objects)); - return PlasmaSend(sock, MessageType_PlasmaStatusRequest, &fbb, message); -} - -Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objects) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - for (uoffset_t i = 0; i < num_objects; ++i) { - object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str()); - } - return Status::OK(); -} - -Status SendStatusReply( - int sock, ObjectID object_ids[], int object_status[], int64_t num_objects) { - flatbuffers::FlatBufferBuilder fbb; - auto message = - CreatePlasmaStatusReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects), - fbb.CreateVector(object_status, num_objects)); - return PlasmaSend(sock, MessageType_PlasmaStatusReply, &fbb, message); -} - -int64_t ReadStatusReply_num_objects(uint8_t* data) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - return message->object_ids()->size(); -} - -Status ReadStatusReply( - uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - for (uoffset_t i = 0; i < num_objects; ++i) { - object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str()); - } - for (uoffset_t i = 0; i < num_objects; ++i) { - object_status[i] = message->status()->data()[i]; - } - return Status::OK(); -} - -// Contains messages. - -Status SendContainsRequest(int sock, ObjectID object_id) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaContainsRequest(fbb, fbb.CreateString(object_id.binary())); - return PlasmaSend(sock, MessageType_PlasmaContainsRequest, &fbb, message); -} - -Status ReadContainsRequest(uint8_t* data, ObjectID* object_id) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - return Status::OK(); -} - -Status SendContainsReply(int sock, ObjectID object_id, bool has_object) { - flatbuffers::FlatBufferBuilder fbb; - auto message = - CreatePlasmaContainsReply(fbb, fbb.CreateString(object_id.binary()), has_object); - return PlasmaSend(sock, MessageType_PlasmaContainsReply, &fbb, message); -} - -Status ReadContainsReply(uint8_t* data, ObjectID* object_id, bool* has_object) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - *has_object = message->has_object(); - return Status::OK(); -} - -// Connect messages. - -Status SendConnectRequest(int sock) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaConnectRequest(fbb); - return PlasmaSend(sock, MessageType_PlasmaConnectRequest, &fbb, message); -} - -Status ReadConnectRequest(uint8_t* data) { - return Status::OK(); -} - -Status SendConnectReply(int sock, int64_t memory_capacity) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaConnectReply(fbb, memory_capacity); - return PlasmaSend(sock, MessageType_PlasmaConnectReply, &fbb, message); -} - -Status ReadConnectReply(uint8_t* data, int64_t* memory_capacity) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *memory_capacity = message->memory_capacity(); - return Status::OK(); -} - -// Evict messages. - -Status SendEvictRequest(int sock, int64_t num_bytes) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaEvictRequest(fbb, num_bytes); - return PlasmaSend(sock, MessageType_PlasmaEvictRequest, &fbb, message); -} - -Status ReadEvictRequest(uint8_t* data, int64_t* num_bytes) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *num_bytes = message->num_bytes(); - return Status::OK(); -} - -Status SendEvictReply(int sock, int64_t num_bytes) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaEvictReply(fbb, num_bytes); - return PlasmaSend(sock, MessageType_PlasmaEvictReply, &fbb, message); -} - -Status ReadEvictReply(uint8_t* data, int64_t& num_bytes) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - num_bytes = message->num_bytes(); - return Status::OK(); -} - -// Get messages. - -Status SendGetRequest( - int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaGetRequest( - fbb, to_flatbuffer(&fbb, object_ids, num_objects), timeout_ms); - return PlasmaSend(sock, MessageType_PlasmaGetRequest, &fbb, message); -} - -Status ReadGetRequest( - uint8_t* data, std::vector& object_ids, int64_t* timeout_ms) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - for (uoffset_t i = 0; i < message->object_ids()->size(); ++i) { - auto object_id = message->object_ids()->Get(i)->str(); - object_ids.push_back(ObjectID::from_binary(object_id)); - } - *timeout_ms = message->timeout_ms(); - return Status::OK(); -} - -Status SendGetReply(int sock, ObjectID object_ids[], - std::unordered_map& plasma_objects, - int64_t num_objects) { - flatbuffers::FlatBufferBuilder fbb; - std::vector objects; - - for (int i = 0; i < num_objects; ++i) { - const PlasmaObject& object = plasma_objects[object_ids[i]]; - objects.push_back(PlasmaObjectSpec(object.handle.store_fd, object.handle.mmap_size, - object.data_offset, object.data_size, object.metadata_offset, - object.metadata_size)); - } - auto message = CreatePlasmaGetReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects), - fbb.CreateVectorOfStructs(objects.data(), num_objects)); - return PlasmaSend(sock, MessageType_PlasmaGetReply, &fbb, message); -} - -Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[], - int64_t num_objects) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - for (uoffset_t i = 0; i < num_objects; ++i) { - object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str()); - } - for (uoffset_t i = 0; i < num_objects; ++i) { - const PlasmaObjectSpec* object = message->plasma_objects()->Get(i); - plasma_objects[i].handle.store_fd = object->segment_index(); - plasma_objects[i].handle.mmap_size = object->mmap_size(); - plasma_objects[i].data_offset = object->data_offset(); - plasma_objects[i].data_size = object->data_size(); - plasma_objects[i].metadata_offset = object->metadata_offset(); - plasma_objects[i].metadata_size = object->metadata_size(); - } - return Status::OK(); -} - -// Fetch messages. - -Status SendFetchRequest(int sock, const ObjectID* object_ids, int64_t num_objects) { - flatbuffers::FlatBufferBuilder fbb; - auto message = - CreatePlasmaFetchRequest(fbb, to_flatbuffer(&fbb, object_ids, num_objects)); - return PlasmaSend(sock, MessageType_PlasmaFetchRequest, &fbb, message); -} - -Status ReadFetchRequest(uint8_t* data, std::vector& object_ids) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - for (uoffset_t i = 0; i < message->object_ids()->size(); ++i) { - object_ids.push_back(ObjectID::from_binary(message->object_ids()->Get(i)->str())); - } - return Status::OK(); -} - -// Wait messages. - -Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests, - int num_ready_objects, int64_t timeout_ms) { - flatbuffers::FlatBufferBuilder fbb; - - std::vector> object_request_specs; - for (int i = 0; i < num_requests; i++) { - object_request_specs.push_back(CreateObjectRequestSpec(fbb, - fbb.CreateString(object_requests[i].object_id.binary()), - object_requests[i].type)); - } - - auto message = CreatePlasmaWaitRequest( - fbb, fbb.CreateVector(object_request_specs), num_ready_objects, timeout_ms); - return PlasmaSend(sock, MessageType_PlasmaWaitRequest, &fbb, message); -} - -Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests, - int64_t* timeout_ms, int* num_ready_objects) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *num_ready_objects = message->num_ready_objects(); - *timeout_ms = message->timeout(); - - for (uoffset_t i = 0; i < message->object_requests()->size(); i++) { - ObjectID object_id = - ObjectID::from_binary(message->object_requests()->Get(i)->object_id()->str()); - ObjectRequest object_request({object_id, message->object_requests()->Get(i)->type(), - ObjectStatus_Nonexistent}); - object_requests[object_id] = object_request; - } - return Status::OK(); -} - -Status SendWaitReply( - int sock, const ObjectRequestMap& object_requests, int num_ready_objects) { - flatbuffers::FlatBufferBuilder fbb; - - std::vector> object_replies; - for (const auto& entry : object_requests) { - const auto& object_request = entry.second; - object_replies.push_back(CreateObjectReply( - fbb, fbb.CreateString(object_request.object_id.binary()), object_request.status)); - } - - auto message = CreatePlasmaWaitReply( - fbb, fbb.CreateVector(object_replies.data(), num_ready_objects), num_ready_objects); - return PlasmaSend(sock, MessageType_PlasmaWaitReply, &fbb, message); -} - -Status ReadWaitReply( - uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects) { - DCHECK(data); - - auto message = flatbuffers::GetRoot(data); - *num_ready_objects = message->num_ready_objects(); - for (int i = 0; i < *num_ready_objects; i++) { - object_requests[i].object_id = - ObjectID::from_binary(message->object_requests()->Get(i)->object_id()->str()); - object_requests[i].status = message->object_requests()->Get(i)->status(); - } - return Status::OK(); -} - -// Subscribe messages. - -Status SendSubscribeRequest(int sock) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaSubscribeRequest(fbb); - return PlasmaSend(sock, MessageType_PlasmaSubscribeRequest, &fbb, message); -} - -// Data messages. - -Status SendDataRequest(int sock, ObjectID object_id, const char* address, int port) { - flatbuffers::FlatBufferBuilder fbb; - auto addr = fbb.CreateString(address, strlen(address)); - auto message = - CreatePlasmaDataRequest(fbb, fbb.CreateString(object_id.binary()), addr, port); - return PlasmaSend(sock, MessageType_PlasmaDataRequest, &fbb, message); -} - -Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* port) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - DCHECK(message->object_id()->size() == sizeof(ObjectID)); - *object_id = ObjectID::from_binary(message->object_id()->str()); - *address = strdup(message->address()->c_str()); - *port = message->port(); - return Status::OK(); -} - -Status SendDataReply( - int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size) { - flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaDataReply( - fbb, fbb.CreateString(object_id.binary()), object_size, metadata_size); - return PlasmaSend(sock, MessageType_PlasmaDataReply, &fbb, message); -} - -Status ReadDataReply( - uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size) { - DCHECK(data); - auto message = flatbuffers::GetRoot(data); - *object_id = ObjectID::from_binary(message->object_id()->str()); - *object_size = (int64_t)message->object_size(); - *metadata_size = (int64_t)message->metadata_size(); - return Status::OK(); -} diff --git a/cpp/src/plasma/protocol.h b/cpp/src/plasma/protocol.h deleted file mode 100644 index 5d9d13675144f..0000000000000 --- a/cpp/src/plasma/protocol.h +++ /dev/null @@ -1,170 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_PROTOCOL_H -#define PLASMA_PROTOCOL_H - -#include - -#include "arrow/status.h" -#include "format/plasma_generated.h" -#include "plasma/plasma.h" - -using arrow::Status; - -/* Plasma receive message. */ - -Status PlasmaReceive(int sock, int64_t message_type, std::vector* buffer); - -/* Plasma Create message functions. */ - -Status SendCreateRequest( - int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size); - -Status ReadCreateRequest( - uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size); - -Status SendCreateReply(int sock, ObjectID object_id, PlasmaObject* object, int error); - -Status ReadCreateReply(uint8_t* data, ObjectID* object_id, PlasmaObject* object); - -/* Plasma Seal message functions. */ - -Status SendSealRequest(int sock, ObjectID object_id, unsigned char* digest); - -Status ReadSealRequest(uint8_t* data, ObjectID* object_id, unsigned char* digest); - -Status SendSealReply(int sock, ObjectID object_id, int error); - -Status ReadSealReply(uint8_t* data, ObjectID* object_id); - -/* Plasma Get message functions. */ - -Status SendGetRequest( - int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms); - -Status ReadGetRequest( - uint8_t* data, std::vector& object_ids, int64_t* timeout_ms); - -Status SendGetReply(int sock, ObjectID object_ids[], - std::unordered_map& plasma_objects, - int64_t num_objects); - -Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[], - int64_t num_objects); - -/* Plasma Release message functions. */ - -Status SendReleaseRequest(int sock, ObjectID object_id); - -Status ReadReleaseRequest(uint8_t* data, ObjectID* object_id); - -Status SendReleaseReply(int sock, ObjectID object_id, int error); - -Status ReadReleaseReply(uint8_t* data, ObjectID* object_id); - -/* Plasma Delete message functions. */ - -Status SendDeleteRequest(int sock, ObjectID object_id); - -Status ReadDeleteRequest(uint8_t* data, ObjectID* object_id); - -Status SendDeleteReply(int sock, ObjectID object_id, int error); - -Status ReadDeleteReply(uint8_t* data, ObjectID* object_id); - -/* Satus messages. */ - -Status SendStatusRequest(int sock, const ObjectID* object_ids, int64_t num_objects); - -Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objects); - -Status SendStatusReply( - int sock, ObjectID object_ids[], int object_status[], int64_t num_objects); - -int64_t ReadStatusReply_num_objects(uint8_t* data); - -Status ReadStatusReply( - uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects); - -/* Plasma Constains message functions. */ - -Status SendContainsRequest(int sock, ObjectID object_id); - -Status ReadContainsRequest(uint8_t* data, ObjectID* object_id); - -Status SendContainsReply(int sock, ObjectID object_id, bool has_object); - -Status ReadContainsReply(uint8_t* data, ObjectID* object_id, bool* has_object); - -/* Plasma Connect message functions. */ - -Status SendConnectRequest(int sock); - -Status ReadConnectRequest(uint8_t* data); - -Status SendConnectReply(int sock, int64_t memory_capacity); - -Status ReadConnectReply(uint8_t* data, int64_t* memory_capacity); - -/* Plasma Evict message functions (no reply so far). */ - -Status SendEvictRequest(int sock, int64_t num_bytes); - -Status ReadEvictRequest(uint8_t* data, int64_t* num_bytes); - -Status SendEvictReply(int sock, int64_t num_bytes); - -Status ReadEvictReply(uint8_t* data, int64_t& num_bytes); - -/* Plasma Fetch Remote message functions. */ - -Status SendFetchRequest(int sock, const ObjectID* object_ids, int64_t num_objects); - -Status ReadFetchRequest(uint8_t* data, std::vector& object_ids); - -/* Plasma Wait message functions. */ - -Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests, - int num_ready_objects, int64_t timeout_ms); - -Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests, - int64_t* timeout_ms, int* num_ready_objects); - -Status SendWaitReply( - int sock, const ObjectRequestMap& object_requests, int num_ready_objects); - -Status ReadWaitReply( - uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects); - -/* Plasma Subscribe message functions. */ - -Status SendSubscribeRequest(int sock); - -/* Data messages. */ - -Status SendDataRequest(int sock, ObjectID object_id, const char* address, int port); - -Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* port); - -Status SendDataReply( - int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size); - -Status ReadDataReply( - uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size); - -#endif /* PLASMA_PROTOCOL */ diff --git a/cpp/src/plasma/store.cc b/cpp/src/plasma/store.cc deleted file mode 100644 index 9394e3de310b2..0000000000000 --- a/cpp/src/plasma/store.cc +++ /dev/null @@ -1,683 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// PLASMA STORE: This is a simple object store server process -// -// It accepts incoming client connections on a unix domain socket -// (name passed in via the -s option of the executable) and uses a -// single thread to serve the clients. Each client establishes a -// connection and can create objects, wait for objects and seal -// objects through that connection. -// -// It keeps a hash table that maps object_ids (which are 20 byte long, -// just enough to store and SHA1 hash) to memory mapped files. - -#include "plasma/store.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "format/common_generated.h" -#include "plasma/common.h" -#include "plasma/fling.h" -#include "plasma/io.h" -#include "plasma/malloc.h" - -extern "C" { -void* dlmalloc(size_t bytes); -void* dlmemalign(size_t alignment, size_t bytes); -void dlfree(void* mem); -size_t dlmalloc_set_footprint_limit(size_t bytes); -} - -struct GetRequest { - GetRequest(Client* client, const std::vector& object_ids); - /// The client that called get. - Client* client; - /// The ID of the timer that will time out and cause this wait to return to - /// the client if it hasn't already returned. - int64_t timer; - /// The object IDs involved in this request. This is used in the reply. - std::vector object_ids; - /// The object information for the objects in this request. This is used in - /// the reply. - std::unordered_map objects; - /// The minimum number of objects to wait for in this request. - int64_t num_objects_to_wait_for; - /// The number of object requests in this wait request that are already - /// satisfied. - int64_t num_satisfied; -}; - -GetRequest::GetRequest(Client* client, const std::vector& object_ids) - : client(client), - timer(-1), - object_ids(object_ids.begin(), object_ids.end()), - objects(object_ids.size()), - num_satisfied(0) { - std::unordered_set unique_ids( - object_ids.begin(), object_ids.end()); - num_objects_to_wait_for = unique_ids.size(); -} - -Client::Client(int fd) : fd(fd) {} - -PlasmaStore::PlasmaStore(EventLoop* loop, int64_t system_memory) - : loop_(loop), eviction_policy_(&store_info_) { - store_info_.memory_capacity = system_memory; -} - -// TODO(pcm): Get rid of this destructor by using RAII to clean up data. -PlasmaStore::~PlasmaStore() { - for (const auto& element : pending_notifications_) { - auto object_notifications = element.second.object_notifications; - for (size_t i = 0; i < object_notifications.size(); ++i) { - uint8_t* notification = reinterpret_cast(object_notifications.at(i)); - uint8_t* data = notification; - // TODO(pcm): Get rid of this delete. - delete[] data; - } - } -} - -// If this client is not already using the object, add the client to the -// object's list of clients, otherwise do nothing. -void PlasmaStore::add_client_to_object_clients(ObjectTableEntry* entry, Client* client) { - // Check if this client is already using the object. - if (entry->clients.find(client) != entry->clients.end()) { return; } - // If there are no other clients using this object, notify the eviction policy - // that the object is being used. - if (entry->clients.size() == 0) { - // Tell the eviction policy that this object is being used. - std::vector objects_to_evict; - eviction_policy_.begin_object_access(entry->object_id, &objects_to_evict); - delete_objects(objects_to_evict); - } - // Add the client pointer to the list of clients using this object. - entry->clients.insert(client); -} - -// Create a new object buffer in the hash table. -int PlasmaStore::create_object(const ObjectID& object_id, int64_t data_size, - int64_t metadata_size, Client* client, PlasmaObject* result) { - ARROW_LOG(DEBUG) << "creating object " << object_id.hex(); - if (store_info_.objects.count(object_id) != 0) { - // There is already an object with the same ID in the Plasma Store, so - // ignore this requst. - return PlasmaError_ObjectExists; - } - // Try to evict objects until there is enough space. - uint8_t* pointer; - do { - // Allocate space for the new object. We use dlmemalign instead of dlmalloc - // in order to align the allocated region to a 64-byte boundary. This is not - // strictly necessary, but it is an optimization that could speed up the - // computation of a hash of the data (see compute_object_hash_parallel in - // plasma_client.cc). Note that even though this pointer is 64-byte aligned, - // it is not guaranteed that the corresponding pointer in the client will be - // 64-byte aligned, but in practice it often will be. - pointer = - reinterpret_cast(dlmemalign(BLOCK_SIZE, data_size + metadata_size)); - if (pointer == NULL) { - // Tell the eviction policy how much space we need to create this object. - std::vector objects_to_evict; - bool success = - eviction_policy_.require_space(data_size + metadata_size, &objects_to_evict); - delete_objects(objects_to_evict); - // Return an error to the client if not enough space could be freed to - // create the object. - if (!success) { return PlasmaError_OutOfMemory; } - } - } while (pointer == NULL); - int fd; - int64_t map_size; - ptrdiff_t offset; - get_malloc_mapinfo(pointer, &fd, &map_size, &offset); - assert(fd != -1); - - auto entry = std::unique_ptr(new ObjectTableEntry()); - entry->object_id = object_id; - entry->info.object_id = object_id.binary(); - entry->info.data_size = data_size; - entry->info.metadata_size = metadata_size; - entry->pointer = pointer; - // TODO(pcm): Set the other fields. - entry->fd = fd; - entry->map_size = map_size; - entry->offset = offset; - entry->state = PLASMA_CREATED; - - store_info_.objects[object_id] = std::move(entry); - result->handle.store_fd = fd; - result->handle.mmap_size = map_size; - result->data_offset = offset; - result->metadata_offset = offset + data_size; - result->data_size = data_size; - result->metadata_size = metadata_size; - // Notify the eviction policy that this object was created. This must be done - // immediately before the call to add_client_to_object_clients so that the - // eviction policy does not have an opportunity to evict the object. - eviction_policy_.object_created(object_id); - // Record that this client is using this object. - add_client_to_object_clients(store_info_.objects[object_id].get(), client); - return PlasmaError_OK; -} - -void PlasmaObject_init(PlasmaObject* object, ObjectTableEntry* entry) { - DCHECK(object != NULL); - DCHECK(entry != NULL); - DCHECK(entry->state == PLASMA_SEALED); - object->handle.store_fd = entry->fd; - object->handle.mmap_size = entry->map_size; - object->data_offset = entry->offset; - object->metadata_offset = entry->offset + entry->info.data_size; - object->data_size = entry->info.data_size; - object->metadata_size = entry->info.metadata_size; -} - -void PlasmaStore::return_from_get(GetRequest* get_req) { - // Send the get reply to the client. - Status s = SendGetReply(get_req->client->fd, &get_req->object_ids[0], get_req->objects, - get_req->object_ids.size()); - warn_if_sigpipe(s.ok() ? 0 : -1, get_req->client->fd); - // If we successfully sent the get reply message to the client, then also send - // the file descriptors. - if (s.ok()) { - // Send all of the file descriptors for the present objects. - for (const auto& object_id : get_req->object_ids) { - PlasmaObject& object = get_req->objects[object_id]; - // We use the data size to indicate whether the object is present or not. - if (object.data_size != -1) { - int error_code = send_fd(get_req->client->fd, object.handle.store_fd); - // If we failed to send the file descriptor, loop until we have sent it - // successfully. TODO(rkn): This is problematic for two reasons. First - // of all, sending the file descriptor should just succeed without any - // errors, but sometimes I see a "Message too long" error number. - // Second, looping like this allows a client to potentially block the - // plasma store event loop which should never happen. - while (error_code < 0) { - if (errno == EMSGSIZE) { - ARROW_LOG(WARNING) << "Failed to send file descriptor, retrying."; - error_code = send_fd(get_req->client->fd, object.handle.store_fd); - continue; - } - warn_if_sigpipe(error_code, get_req->client->fd); - break; - } - } - } - } - - // Remove the get request from each of the relevant object_get_requests hash - // tables if it is present there. It should only be present there if the get - // request timed out. - for (ObjectID& object_id : get_req->object_ids) { - auto& get_requests = object_get_requests_[object_id]; - // Erase get_req from the vector. - auto it = std::find(get_requests.begin(), get_requests.end(), get_req); - if (it != get_requests.end()) { get_requests.erase(it); } - } - // Remove the get request. - if (get_req->timer != -1) { ARROW_CHECK(loop_->remove_timer(get_req->timer) == AE_OK); } - delete get_req; -} - -void PlasmaStore::update_object_get_requests(const ObjectID& object_id) { - std::vector& get_requests = object_get_requests_[object_id]; - size_t index = 0; - size_t num_requests = get_requests.size(); - for (size_t i = 0; i < num_requests; ++i) { - GetRequest* get_req = get_requests[index]; - auto entry = get_object_table_entry(&store_info_, object_id); - ARROW_CHECK(entry != NULL); - - PlasmaObject_init(&get_req->objects[object_id], entry); - get_req->num_satisfied += 1; - // Record the fact that this client will be using this object and will - // be responsible for releasing this object. - add_client_to_object_clients(entry, get_req->client); - - // If this get request is done, reply to the client. - if (get_req->num_satisfied == get_req->num_objects_to_wait_for) { - return_from_get(get_req); - } else { - // The call to return_from_get will remove the current element in the - // array, so we only increment the counter in the else branch. - index += 1; - } - } - - DCHECK(index == get_requests.size()); - // Remove the array of get requests for this object, since no one should be - // waiting for this object anymore. - object_get_requests_.erase(object_id); -} - -void PlasmaStore::process_get_request( - Client* client, const std::vector& object_ids, int64_t timeout_ms) { - // Create a get request for this object. - GetRequest* get_req = new GetRequest(client, object_ids); - - for (auto object_id : object_ids) { - // Check if this object is already present locally. If so, record that the - // object is being used and mark it as accounted for. - auto entry = get_object_table_entry(&store_info_, object_id); - if (entry && entry->state == PLASMA_SEALED) { - // Update the get request to take into account the present object. - PlasmaObject_init(&get_req->objects[object_id], entry); - get_req->num_satisfied += 1; - // If necessary, record that this client is using this object. In the case - // where entry == NULL, this will be called from seal_object. - add_client_to_object_clients(entry, client); - } else { - // Add a placeholder plasma object to the get request to indicate that the - // object is not present. This will be parsed by the client. We set the - // data size to -1 to indicate that the object is not present. - get_req->objects[object_id].data_size = -1; - // Add the get request to the relevant data structures. - object_get_requests_[object_id].push_back(get_req); - } - } - - // If all of the objects are present already or if the timeout is 0, return to - // the client. - if (get_req->num_satisfied == get_req->num_objects_to_wait_for || timeout_ms == 0) { - return_from_get(get_req); - } else if (timeout_ms != -1) { - // Set a timer that will cause the get request to return to the client. Note - // that a timeout of -1 is used to indicate that no timer should be set. - get_req->timer = loop_->add_timer(timeout_ms, [this, get_req](int64_t timer_id) { - return_from_get(get_req); - return kEventLoopTimerDone; - }); - } -} - -int PlasmaStore::remove_client_from_object_clients( - ObjectTableEntry* entry, Client* client) { - auto it = entry->clients.find(client); - if (it != entry->clients.end()) { - entry->clients.erase(it); - // If no more clients are using this object, notify the eviction policy - // that the object is no longer being used. - if (entry->clients.size() == 0) { - // Tell the eviction policy that this object is no longer being used. - std::vector objects_to_evict; - eviction_policy_.end_object_access(entry->object_id, &objects_to_evict); - delete_objects(objects_to_evict); - } - // Return 1 to indicate that the client was removed. - return 1; - } else { - // Return 0 to indicate that the client was not removed. - return 0; - } -} - -void PlasmaStore::release_object(const ObjectID& object_id, Client* client) { - auto entry = get_object_table_entry(&store_info_, object_id); - ARROW_CHECK(entry != NULL); - // Remove the client from the object's array of clients. - ARROW_CHECK(remove_client_from_object_clients(entry, client) == 1); -} - -// Check if an object is present. -int PlasmaStore::contains_object(const ObjectID& object_id) { - auto entry = get_object_table_entry(&store_info_, object_id); - return entry && (entry->state == PLASMA_SEALED) ? OBJECT_FOUND : OBJECT_NOT_FOUND; -} - -// Seal an object that has been created in the hash table. -void PlasmaStore::seal_object(const ObjectID& object_id, unsigned char digest[]) { - ARROW_LOG(DEBUG) << "sealing object " << object_id.hex(); - auto entry = get_object_table_entry(&store_info_, object_id); - ARROW_CHECK(entry != NULL); - ARROW_CHECK(entry->state == PLASMA_CREATED); - // Set the state of object to SEALED. - entry->state = PLASMA_SEALED; - // Set the object digest. - entry->info.digest = std::string(reinterpret_cast(&digest[0]), kDigestSize); - // Inform all subscribers that a new object has been sealed. - push_notification(&entry->info); - - // Update all get requests that involve this object. - update_object_get_requests(object_id); -} - -void PlasmaStore::delete_objects(const std::vector& object_ids) { - for (const auto& object_id : object_ids) { - ARROW_LOG(DEBUG) << "deleting object " << object_id.hex(); - auto entry = get_object_table_entry(&store_info_, object_id); - // TODO(rkn): This should probably not fail, but should instead throw an - // error. Maybe we should also support deleting objects that have been - // created but not sealed. - ARROW_CHECK(entry != NULL) << "To delete an object it must be in the object table."; - ARROW_CHECK(entry->state == PLASMA_SEALED) - << "To delete an object it must have been sealed."; - ARROW_CHECK(entry->clients.size() == 0) - << "To delete an object, there must be no clients currently using it."; - dlfree(entry->pointer); - store_info_.objects.erase(object_id); - // Inform all subscribers that the object has been deleted. - ObjectInfoT notification; - notification.object_id = object_id.binary(); - notification.is_deletion = true; - push_notification(¬ification); - } -} - -void PlasmaStore::connect_client(int listener_sock) { - int client_fd = AcceptClient(listener_sock); - // This is freed in disconnect_client. - Client* client = new Client(client_fd); - // Add a callback to handle events on this socket. - // TODO(pcm): Check return value. - loop_->add_file_event(client_fd, kEventLoopRead, [this, client](int events) { - Status s = process_message(client); - if (!s.ok()) { ARROW_LOG(FATAL) << "Failed to process file event: " << s; } - }); - ARROW_LOG(DEBUG) << "New connection with fd " << client_fd; -} - -void PlasmaStore::disconnect_client(Client* client) { - ARROW_CHECK(client != NULL); - ARROW_CHECK(client->fd > 0); - loop_->remove_file_event(client->fd); - // Close the socket. - close(client->fd); - ARROW_LOG(INFO) << "Disconnecting client on fd " << client->fd; - // If this client was using any objects, remove it from the appropriate - // lists. - for (const auto& entry : store_info_.objects) { - remove_client_from_object_clients(entry.second.get(), client); - } - // Note, the store may still attempt to send a message to the disconnected - // client (for example, when an object ID that the client was waiting for - // is ready). In these cases, the attempt to send the message will fail, but - // the store should just ignore the failure. - delete client; -} - -/// Send notifications about sealed objects to the subscribers. This is called -/// in seal_object. If the socket's send buffer is full, the notification will -/// be -/// buffered, and this will be called again when the send buffer has room. -/// -/// @param client The client to send the notification to. -/// @return Void. -void PlasmaStore::send_notifications(int client_fd) { - auto it = pending_notifications_.find(client_fd); - - int num_processed = 0; - bool closed = false; - // Loop over the array of pending notifications and send as many of them as - // possible. - for (size_t i = 0; i < it->second.object_notifications.size(); ++i) { - uint8_t* notification = - reinterpret_cast(it->second.object_notifications.at(i)); - // Decode the length, which is the first bytes of the message. - int64_t size = *(reinterpret_cast(notification)); - - // Attempt to send a notification about this object ID. - ssize_t nbytes = send(client_fd, notification, sizeof(int64_t) + size, 0); - if (nbytes >= 0) { - ARROW_CHECK(nbytes == static_cast(sizeof(int64_t)) + size); - } else if (nbytes == -1 && - (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { - ARROW_LOG(DEBUG) << "The socket's send buffer is full, so we are caching this " - "notification and will send it later."; - // Add a callback to the event loop to send queued notifications whenever - // there is room in the socket's send buffer. Callbacks can be added - // more than once here and will be overwritten. The callback is removed - // at the end of the method. - // TODO(pcm): Introduce status codes and check in case the file descriptor - // is added twice. - loop_->add_file_event(client_fd, kEventLoopWrite, - [this, client_fd](int events) { send_notifications(client_fd); }); - break; - } else { - ARROW_LOG(WARNING) << "Failed to send notification to client on fd " << client_fd; - if (errno == EPIPE) { - closed = true; - break; - } - } - num_processed += 1; - // The corresponding malloc happened in create_object_info_buffer - // within push_notification. - delete[] notification; - } - // Remove the sent notifications from the array. - it->second.object_notifications.erase(it->second.object_notifications.begin(), - it->second.object_notifications.begin() + num_processed); - - // Stop sending notifications if the pipe was broken. - if (closed) { - close(client_fd); - pending_notifications_.erase(client_fd); - } - - // If we have sent all notifications, remove the fd from the event loop. - if (it->second.object_notifications.empty()) { loop_->remove_file_event(client_fd); } -} - -void PlasmaStore::push_notification(ObjectInfoT* object_info) { - for (auto& element : pending_notifications_) { - uint8_t* notification = create_object_info_buffer(object_info); - element.second.object_notifications.push_back(notification); - send_notifications(element.first); - // The notification gets freed in send_notifications when the notification - // is sent over the socket. - } -} - -// Subscribe to notifications about sealed objects. -void PlasmaStore::subscribe_to_updates(Client* client) { - ARROW_LOG(DEBUG) << "subscribing to updates on fd " << client->fd; - // TODO(rkn): The store could block here if the client doesn't send a file - // descriptor. - int fd = recv_fd(client->fd); - if (fd < 0) { - // This may mean that the client died before sending the file descriptor. - ARROW_LOG(WARNING) << "Failed to receive file descriptor from client on fd " - << client->fd << "."; - return; - } - - // Create a new array to buffer notifications that can't be sent to the - // subscriber yet because the socket send buffer is full. TODO(rkn): the queue - // never gets freed. - // TODO(pcm): Is the following neccessary? - pending_notifications_[fd]; - - // Push notifications to the new subscriber about existing objects. - for (const auto& entry : store_info_.objects) { - push_notification(&entry.second->info); - } - send_notifications(fd); -} - -Status PlasmaStore::process_message(Client* client) { - int64_t type; - Status s = ReadMessage(client->fd, &type, &input_buffer_); - ARROW_CHECK(s.ok() || s.IsIOError()); - - uint8_t* input = input_buffer_.data(); - ObjectID object_id; - PlasmaObject object; - // TODO(pcm): Get rid of the following. - memset(&object, 0, sizeof(object)); - - // Process the different types of requests. - switch (type) { - case MessageType_PlasmaCreateRequest: { - int64_t data_size; - int64_t metadata_size; - RETURN_NOT_OK(ReadCreateRequest(input, &object_id, &data_size, &metadata_size)); - int error_code = - create_object(object_id, data_size, metadata_size, client, &object); - HANDLE_SIGPIPE( - SendCreateReply(client->fd, object_id, &object, error_code), client->fd); - if (error_code == PlasmaError_OK) { - warn_if_sigpipe(send_fd(client->fd, object.handle.store_fd), client->fd); - } - } break; - case MessageType_PlasmaGetRequest: { - std::vector object_ids_to_get; - int64_t timeout_ms; - RETURN_NOT_OK(ReadGetRequest(input, object_ids_to_get, &timeout_ms)); - process_get_request(client, object_ids_to_get, timeout_ms); - } break; - case MessageType_PlasmaReleaseRequest: - RETURN_NOT_OK(ReadReleaseRequest(input, &object_id)); - release_object(object_id, client); - break; - case MessageType_PlasmaContainsRequest: - RETURN_NOT_OK(ReadContainsRequest(input, &object_id)); - if (contains_object(object_id) == OBJECT_FOUND) { - HANDLE_SIGPIPE(SendContainsReply(client->fd, object_id, 1), client->fd); - } else { - HANDLE_SIGPIPE(SendContainsReply(client->fd, object_id, 0), client->fd); - } - break; - case MessageType_PlasmaSealRequest: { - unsigned char digest[kDigestSize]; - RETURN_NOT_OK(ReadSealRequest(input, &object_id, &digest[0])); - seal_object(object_id, &digest[0]); - } break; - case MessageType_PlasmaEvictRequest: { - // This code path should only be used for testing. - int64_t num_bytes; - RETURN_NOT_OK(ReadEvictRequest(input, &num_bytes)); - std::vector objects_to_evict; - int64_t num_bytes_evicted = - eviction_policy_.choose_objects_to_evict(num_bytes, &objects_to_evict); - delete_objects(objects_to_evict); - HANDLE_SIGPIPE(SendEvictReply(client->fd, num_bytes_evicted), client->fd); - } break; - case MessageType_PlasmaSubscribeRequest: - subscribe_to_updates(client); - break; - case MessageType_PlasmaConnectRequest: { - HANDLE_SIGPIPE( - SendConnectReply(client->fd, store_info_.memory_capacity), client->fd); - } break; - case DISCONNECT_CLIENT: - ARROW_LOG(DEBUG) << "Disconnecting client on fd " << client->fd; - disconnect_client(client); - break; - default: - // This code should be unreachable. - ARROW_CHECK(0); - } - return Status::OK(); -} - -// Report "success" to valgrind. -void signal_handler(int signal) { - if (signal == SIGTERM) { exit(0); } -} - -void start_server(char* socket_name, int64_t system_memory) { - // Ignore SIGPIPE signals. If we don't do this, then when we attempt to write - // to a client that has already died, the store could die. - signal(SIGPIPE, SIG_IGN); - // Create the event loop. - EventLoop loop; - PlasmaStore store(&loop, system_memory); - int socket = bind_ipc_sock(socket_name, true); - ARROW_CHECK(socket >= 0); - // TODO(pcm): Check return value. - loop.add_file_event(socket, kEventLoopRead, - [&store, socket](int events) { store.connect_client(socket); }); - loop.run(); -} - -int main(int argc, char* argv[]) { - signal(SIGTERM, signal_handler); - char* socket_name = NULL; - int64_t system_memory = -1; - int c; - while ((c = getopt(argc, argv, "s:m:")) != -1) { - switch (c) { - case 's': - socket_name = optarg; - break; - case 'm': { - char extra; - int scanned = sscanf(optarg, "%" SCNd64 "%c", &system_memory, &extra); - ARROW_CHECK(scanned == 1); - ARROW_LOG(INFO) << "Allowing the Plasma store to use up to " - << static_cast(system_memory) / 1000000000 - << "GB of memory."; - break; - } - default: - exit(-1); - } - } - if (!socket_name) { - ARROW_LOG(FATAL) << "please specify socket for incoming connections with -s switch"; - } - if (system_memory == -1) { - ARROW_LOG(FATAL) << "please specify the amount of system memory with -m switch"; - } -#ifdef __linux__ - // On Linux, check that the amount of memory available in /dev/shm is large - // enough to accommodate the request. If it isn't, then fail. - int shm_fd = open("/dev/shm", O_RDONLY); - struct statvfs shm_vfs_stats; - fstatvfs(shm_fd, &shm_vfs_stats); - // The value shm_vfs_stats.f_bsize is the block size, and the value - // shm_vfs_stats.f_bavail is the number of available blocks. - int64_t shm_mem_avail = shm_vfs_stats.f_bsize * shm_vfs_stats.f_bavail; - close(shm_fd); - if (system_memory > shm_mem_avail) { - ARROW_LOG(FATAL) << "System memory request exceeds memory available in /dev/shm. The " - "request is for " - << system_memory << " bytes, and the amount available is " - << shm_mem_avail - << " bytes. You may be able to free up space by deleting files in " - "/dev/shm. If you are inside a Docker container, you may need to " - "pass " - "an argument with the flag '--shm-size' to 'docker run'."; - } -#endif - // Make it so dlmalloc fails if we try to request more memory than is - // available. - dlmalloc_set_footprint_limit((size_t)system_memory); - ARROW_LOG(DEBUG) << "starting server listening on " << socket_name; - start_server(socket_name, system_memory); -} diff --git a/cpp/src/plasma/store.h b/cpp/src/plasma/store.h deleted file mode 100644 index 8bd94265410f6..0000000000000 --- a/cpp/src/plasma/store.h +++ /dev/null @@ -1,169 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_STORE_H -#define PLASMA_STORE_H - -#include -#include - -#include "plasma/common.h" -#include "plasma/events.h" -#include "plasma/eviction_policy.h" -#include "plasma/plasma.h" -#include "plasma/protocol.h" - -struct GetRequest; - -struct NotificationQueue { - /// The object notifications for clients. We notify the client about the - /// objects in the order that the objects were sealed or deleted. - std::deque object_notifications; -}; - -/// Contains all information that is associated with a Plasma store client. -struct Client { - explicit Client(int fd); - - /// The file descriptor used to communicate with the client. - int fd; -}; - -class PlasmaStore { - public: - PlasmaStore(EventLoop* loop, int64_t system_memory); - - ~PlasmaStore(); - - /// Create a new object. The client must do a call to release_object to tell - /// the store when it is done with the object. - /// - /// @param object_id Object ID of the object to be created. - /// @param data_size Size in bytes of the object to be created. - /// @param metadata_size Size in bytes of the object metadata. - /// @return One of the following error codes: - /// - PlasmaError_OK, if the object was created successfully. - /// - PlasmaError_ObjectExists, if an object with this ID is already - /// present in the store. In this case, the client should not call - /// plasma_release. - /// - PlasmaError_OutOfMemory, if the store is out of memory and - /// cannot create the object. In this case, the client should not call - /// plasma_release. - int create_object(const ObjectID& object_id, int64_t data_size, int64_t metadata_size, - Client* client, PlasmaObject* result); - - /// Delete objects that have been created in the hash table. This should only - /// be called on objects that are returned by the eviction policy to evict. - /// - /// @param object_ids Object IDs of the objects to be deleted. - /// @return Void. - void delete_objects(const std::vector& object_ids); - - /// Process a get request from a client. This method assumes that we will - /// eventually have these objects sealed. If one of the objects has not yet - /// been sealed, the client that requested the object will be notified when it - /// is sealed. - /// - /// For each object, the client must do a call to release_object to tell the - /// store when it is done with the object. - /// - /// @param client The client making this request. - /// @param object_ids Object IDs of the objects to be gotten. - /// @param timeout_ms The timeout for the get request in milliseconds. - /// @return Void. - void process_get_request( - Client* client, const std::vector& object_ids, int64_t timeout_ms); - - /// Seal an object. The object is now immutable and can be accessed with get. - /// - /// @param object_id Object ID of the object to be sealed. - /// @param digest The digest of the object. This is used to tell if two - /// objects - /// with the same object ID are the same. - /// @return Void. - void seal_object(const ObjectID& object_id, unsigned char digest[]); - - /// Check if the plasma store contains an object: - /// - /// @param object_id Object ID that will be checked. - /// @return OBJECT_FOUND if the object is in the store, OBJECT_NOT_FOUND if - /// not - int contains_object(const ObjectID& object_id); - - /// Record the fact that a particular client is no longer using an object. - /// - /// @param object_id The object ID of the object that is being released. - /// @param client The client making this request. - /// @param Void. - void release_object(const ObjectID& object_id, Client* client); - - /// Subscribe a file descriptor to updates about new sealed objects. - /// - /// @param client The client making this request. - /// @return Void. - void subscribe_to_updates(Client* client); - - /// Connect a new client to the PlasmaStore. - /// - /// @param listener_sock The socket that is listening to incoming connections. - /// @return Void. - void connect_client(int listener_sock); - - /// Disconnect a client from the PlasmaStore. - /// - /// @param client The client that is disconnected. - /// @return Void. - void disconnect_client(Client* client); - - void send_notifications(int client_fd); - - Status process_message(Client* client); - - private: - void push_notification(ObjectInfoT* object_notification); - - void add_client_to_object_clients(ObjectTableEntry* entry, Client* client); - - void return_from_get(GetRequest* get_req); - - void update_object_get_requests(const ObjectID& object_id); - - int remove_client_from_object_clients(ObjectTableEntry* entry, Client* client); - - /// Event loop of the plasma store. - EventLoop* loop_; - /// The plasma store information, including the object tables, that is exposed - /// to the eviction policy. - PlasmaStoreInfo store_info_; - /// The state that is managed by the eviction policy. - EvictionPolicy eviction_policy_; - /// Input buffer. This is allocated only once to avoid mallocs for every - /// call to process_message. - std::vector input_buffer_; - /// A hash table mapping object IDs to a vector of the get requests that are - /// waiting for the object to arrive. - std::unordered_map, UniqueIDHasher> - object_get_requests_; - /// The pending notifications that have not been sent to subscribers because - /// the socket send buffers were full. This is a hash table from client file - /// descriptor to an array of object_ids to send to that client. - /// TODO(pcm): Consider putting this into the Client data structure and - /// reorganize the code slightly. - std::unordered_map pending_notifications_; -}; - -#endif // PLASMA_STORE_H diff --git a/cpp/src/plasma/test/client_tests.cc b/cpp/src/plasma/test/client_tests.cc deleted file mode 100644 index 29b5b135144c3..0000000000000 --- a/cpp/src/plasma/test/client_tests.cc +++ /dev/null @@ -1,132 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "gtest/gtest.h" - -#include -#include -#include -#include -#include -#include - -#include "plasma/client.h" -#include "plasma/common.h" -#include "plasma/plasma.h" -#include "plasma/protocol.h" - -std::string g_test_executable; // NOLINT - -class TestPlasmaStore : public ::testing::Test { - public: - // TODO(pcm): At the moment, stdout of the test gets mixed up with - // stdout of the object store. Consider changing that. - void SetUp() { - std::string plasma_directory = - g_test_executable.substr(0, g_test_executable.find_last_of("/")); - std::string plasma_command = - plasma_directory + - "/plasma_store -m 1000000000 -s /tmp/store 1> /dev/null 2> /dev/null &"; - system(plasma_command.c_str()); - ARROW_CHECK_OK(client_.Connect("/tmp/store", "", PLASMA_DEFAULT_RELEASE_DELAY)); - } - virtual void Finish() { - ARROW_CHECK_OK(client_.Disconnect()); - system("killall plasma_store &"); - } - - protected: - PlasmaClient client_; -}; - -TEST_F(TestPlasmaStore, ContainsTest) { - ObjectID object_id = ObjectID::from_random(); - - // Test for object non-existence. - bool has_object; - ARROW_CHECK_OK(client_.Contains(object_id, &has_object)); - ASSERT_EQ(has_object, false); - - // Test for the object being in local Plasma store. - // First create object. - int64_t data_size = 100; - uint8_t metadata[] = {5}; - int64_t metadata_size = sizeof(metadata); - uint8_t* data; - ARROW_CHECK_OK(client_.Create(object_id, data_size, metadata, metadata_size, &data)); - ARROW_CHECK_OK(client_.Seal(object_id)); - // Avoid race condition of Plasma Manager waiting for notification. - ObjectBuffer object_buffer; - ARROW_CHECK_OK(client_.Get(&object_id, 1, -1, &object_buffer)); - ARROW_CHECK_OK(client_.Contains(object_id, &has_object)); - ASSERT_EQ(has_object, true); -} - -TEST_F(TestPlasmaStore, GetTest) { - ObjectID object_id = ObjectID::from_random(); - ObjectBuffer object_buffer; - - // Test for object non-existence. - ARROW_CHECK_OK(client_.Get(&object_id, 1, 0, &object_buffer)); - ASSERT_EQ(object_buffer.data_size, -1); - - // Test for the object being in local Plasma store. - // First create object. - int64_t data_size = 4; - uint8_t metadata[] = {5}; - int64_t metadata_size = sizeof(metadata); - uint8_t* data; - ARROW_CHECK_OK(client_.Create(object_id, data_size, metadata, metadata_size, &data)); - for (int64_t i = 0; i < data_size; i++) { - data[i] = static_cast(i % 4); - } - ARROW_CHECK_OK(client_.Seal(object_id)); - - ARROW_CHECK_OK(client_.Get(&object_id, 1, -1, &object_buffer)); - for (int64_t i = 0; i < data_size; i++) { - ASSERT_EQ(data[i], object_buffer.data[i]); - } -} - -TEST_F(TestPlasmaStore, MultipleGetTest) { - ObjectID object_id1 = ObjectID::from_random(); - ObjectID object_id2 = ObjectID::from_random(); - ObjectID object_ids[2] = {object_id1, object_id2}; - ObjectBuffer object_buffer[2]; - - int64_t data_size = 4; - uint8_t metadata[] = {5}; - int64_t metadata_size = sizeof(metadata); - uint8_t* data; - ARROW_CHECK_OK(client_.Create(object_id1, data_size, metadata, metadata_size, &data)); - data[0] = 1; - ARROW_CHECK_OK(client_.Seal(object_id1)); - - ARROW_CHECK_OK(client_.Create(object_id2, data_size, metadata, metadata_size, &data)); - data[0] = 2; - ARROW_CHECK_OK(client_.Seal(object_id2)); - - ARROW_CHECK_OK(client_.Get(object_ids, 2, -1, object_buffer)); - ASSERT_EQ(object_buffer[0].data[0], 1); - ASSERT_EQ(object_buffer[1].data[0], 2); -} - -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - g_test_executable = std::string(argv[0]); - return RUN_ALL_TESTS(); -} diff --git a/cpp/src/plasma/test/run_tests.sh b/cpp/src/plasma/test/run_tests.sh deleted file mode 100644 index 958bd08398e23..0000000000000 --- a/cpp/src/plasma/test/run_tests.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Cause the script to exit if a single command fails. -set -e - -./src/plasma/plasma_store -s /tmp/plasma_store_socket_1 -m 0 & -sleep 1 -./src/plasma/manager_tests -killall plasma_store -./src/plasma/serialization_tests - -# Start the Redis shards. -./src/common/thirdparty/redis/src/redis-server --loglevel warning --loadmodule ./src/common/redis_module/libray_redis_module.so --port 6379 & -redis_pid1=$! -./src/common/thirdparty/redis/src/redis-server --loglevel warning --loadmodule ./src/common/redis_module/libray_redis_module.so --port 6380 & -redis_pid2=$! -sleep 1 - -# Flush the redis server -./src/common/thirdparty/redis/src/redis-cli flushall -# Register the shard location with the primary shard. -./src/common/thirdparty/redis/src/redis-cli set NumRedisShards 1 -./src/common/thirdparty/redis/src/redis-cli rpush RedisShards 127.0.0.1:6380 -sleep 1 -./src/plasma/plasma_store -s /tmp/store1 -m 1000000000 & -plasma1_pid=$! -./src/plasma/plasma_manager -m /tmp/manager1 -s /tmp/store1 -h 127.0.0.1 -p 11111 -r 127.0.0.1:6379 & -plasma2_pid=$! -./src/plasma/plasma_store -s /tmp/store2 -m 1000000000 & -plasma3_pid=$! -./src/plasma/plasma_manager -m /tmp/manager2 -s /tmp/store2 -h 127.0.0.1 -p 22222 -r 127.0.0.1:6379 & -plasma4_pid=$! -sleep 1 - -./src/plasma/client_tests - -kill $plasma4_pid -kill $plasma3_pid -kill $plasma2_pid -kill $plasma1_pid -kill $redis_pid1 -wait $redis_pid1 -kill $redis_pid2 -wait $redis_pid2 diff --git a/cpp/src/plasma/test/run_valgrind.sh b/cpp/src/plasma/test/run_valgrind.sh deleted file mode 100644 index 0472194128679..0000000000000 --- a/cpp/src/plasma/test/run_valgrind.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Cause the script to exit if a single command fails. -set -e - -./src/plasma/plasma_store -s /tmp/plasma_store_socket_1 -m 0 & -sleep 1 -valgrind --leak-check=full --error-exitcode=1 ./src/plasma/manager_tests -killall plasma_store -valgrind --leak-check=full --error-exitcode=1 ./src/plasma/serialization_tests diff --git a/cpp/src/plasma/test/serialization_tests.cc b/cpp/src/plasma/test/serialization_tests.cc deleted file mode 100644 index 325cead06e770..0000000000000 --- a/cpp/src/plasma/test/serialization_tests.cc +++ /dev/null @@ -1,388 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "gtest/gtest.h" - -#include -#include - -#include "plasma/common.h" -#include "plasma/io.h" -#include "plasma/plasma.h" -#include "plasma/protocol.h" - -/** - * Create a temporary file. Needs to be closed by the caller. - * - * @return File descriptor of the file. - */ -int create_temp_file(void) { - static char temp[] = "/tmp/tempfileXXXXXX"; - char file_name[32]; - strncpy(file_name, temp, 32); - return mkstemp(file_name); -} - -/** - * Seek to the beginning of a file and read a message from it. - * - * @param fd File descriptor of the file. - * @param message type Message type that we expect in the file. - * - * @return Pointer to the content of the message. Needs to be freed by the - * caller. - */ -std::vector read_message_from_file(int fd, int message_type) { - /* Go to the beginning of the file. */ - lseek(fd, 0, SEEK_SET); - int64_t type; - std::vector data; - ARROW_CHECK_OK(ReadMessage(fd, &type, &data)); - ARROW_CHECK(type == message_type); - return data; -} - -PlasmaObject random_plasma_object(void) { - unsigned int seed = static_cast(time(NULL)); - int random = rand_r(&seed); - PlasmaObject object; - memset(&object, 0, sizeof(object)); - object.handle.store_fd = random + 7; - object.handle.mmap_size = random + 42; - object.data_offset = random + 1; - object.metadata_offset = random + 2; - object.data_size = random + 3; - object.metadata_size = random + 4; - return object; -} - -TEST(PlasmaSerialization, CreateRequest) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - int64_t data_size1 = 42; - int64_t metadata_size1 = 11; - ARROW_CHECK_OK(SendCreateRequest(fd, object_id1, data_size1, metadata_size1)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaCreateRequest); - ObjectID object_id2; - int64_t data_size2; - int64_t metadata_size2; - ARROW_CHECK_OK( - ReadCreateRequest(data.data(), &object_id2, &data_size2, &metadata_size2)); - ASSERT_EQ(data_size1, data_size2); - ASSERT_EQ(metadata_size1, metadata_size2); - ASSERT_EQ(object_id1, object_id2); - close(fd); -} - -TEST(PlasmaSerialization, CreateReply) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - PlasmaObject object1 = random_plasma_object(); - ARROW_CHECK_OK(SendCreateReply(fd, object_id1, &object1, 0)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaCreateReply); - ObjectID object_id2; - PlasmaObject object2; - memset(&object2, 0, sizeof(object2)); - ARROW_CHECK_OK(ReadCreateReply(data.data(), &object_id2, &object2)); - ASSERT_EQ(object_id1, object_id2); - ASSERT_EQ(memcmp(&object1, &object2, sizeof(object1)), 0); - close(fd); -} - -TEST(PlasmaSerialization, SealRequest) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - unsigned char digest1[kDigestSize]; - memset(&digest1[0], 7, kDigestSize); - ARROW_CHECK_OK(SendSealRequest(fd, object_id1, &digest1[0])); - std::vector data = read_message_from_file(fd, MessageType_PlasmaSealRequest); - ObjectID object_id2; - unsigned char digest2[kDigestSize]; - ARROW_CHECK_OK(ReadSealRequest(data.data(), &object_id2, &digest2[0])); - ASSERT_EQ(object_id1, object_id2); - ASSERT_EQ(memcmp(&digest1[0], &digest2[0], kDigestSize), 0); - close(fd); -} - -TEST(PlasmaSerialization, SealReply) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - ARROW_CHECK_OK(SendSealReply(fd, object_id1, PlasmaError_ObjectExists)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaSealReply); - ObjectID object_id2; - Status s = ReadSealReply(data.data(), &object_id2); - ASSERT_EQ(object_id1, object_id2); - ASSERT_TRUE(s.IsPlasmaObjectExists()); - close(fd); -} - -TEST(PlasmaSerialization, GetRequest) { - int fd = create_temp_file(); - ObjectID object_ids[2]; - object_ids[0] = ObjectID::from_random(); - object_ids[1] = ObjectID::from_random(); - int64_t timeout_ms = 1234; - ARROW_CHECK_OK(SendGetRequest(fd, object_ids, 2, timeout_ms)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaGetRequest); - std::vector object_ids_return; - int64_t timeout_ms_return; - ARROW_CHECK_OK(ReadGetRequest(data.data(), object_ids_return, &timeout_ms_return)); - ASSERT_EQ(object_ids[0], object_ids_return[0]); - ASSERT_EQ(object_ids[1], object_ids_return[1]); - ASSERT_EQ(timeout_ms, timeout_ms_return); - close(fd); -} - -TEST(PlasmaSerialization, GetReply) { - int fd = create_temp_file(); - ObjectID object_ids[2]; - object_ids[0] = ObjectID::from_random(); - object_ids[1] = ObjectID::from_random(); - std::unordered_map plasma_objects; - plasma_objects[object_ids[0]] = random_plasma_object(); - plasma_objects[object_ids[1]] = random_plasma_object(); - ARROW_CHECK_OK(SendGetReply(fd, object_ids, plasma_objects, 2)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaGetReply); - ObjectID object_ids_return[2]; - PlasmaObject plasma_objects_return[2]; - memset(&plasma_objects_return, 0, sizeof(plasma_objects_return)); - ARROW_CHECK_OK( - ReadGetReply(data.data(), object_ids_return, &plasma_objects_return[0], 2)); - ASSERT_EQ(object_ids[0], object_ids_return[0]); - ASSERT_EQ(object_ids[1], object_ids_return[1]); - ASSERT_EQ(memcmp(&plasma_objects[object_ids[0]], &plasma_objects_return[0], - sizeof(PlasmaObject)), - 0); - ASSERT_EQ(memcmp(&plasma_objects[object_ids[1]], &plasma_objects_return[1], - sizeof(PlasmaObject)), - 0); - close(fd); -} - -TEST(PlasmaSerialization, ReleaseRequest) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - ARROW_CHECK_OK(SendReleaseRequest(fd, object_id1)); - std::vector data = - read_message_from_file(fd, MessageType_PlasmaReleaseRequest); - ObjectID object_id2; - ARROW_CHECK_OK(ReadReleaseRequest(data.data(), &object_id2)); - ASSERT_EQ(object_id1, object_id2); - close(fd); -} - -TEST(PlasmaSerialization, ReleaseReply) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - ARROW_CHECK_OK(SendReleaseReply(fd, object_id1, PlasmaError_ObjectExists)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaReleaseReply); - ObjectID object_id2; - Status s = ReadReleaseReply(data.data(), &object_id2); - ASSERT_EQ(object_id1, object_id2); - ASSERT_TRUE(s.IsPlasmaObjectExists()); - close(fd); -} - -TEST(PlasmaSerialization, DeleteRequest) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - ARROW_CHECK_OK(SendDeleteRequest(fd, object_id1)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaDeleteRequest); - ObjectID object_id2; - ARROW_CHECK_OK(ReadDeleteRequest(data.data(), &object_id2)); - ASSERT_EQ(object_id1, object_id2); - close(fd); -} - -TEST(PlasmaSerialization, DeleteReply) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - int error1 = PlasmaError_ObjectExists; - ARROW_CHECK_OK(SendDeleteReply(fd, object_id1, error1)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaDeleteReply); - ObjectID object_id2; - Status s = ReadDeleteReply(data.data(), &object_id2); - ASSERT_EQ(object_id1, object_id2); - ASSERT_TRUE(s.IsPlasmaObjectExists()); - close(fd); -} - -TEST(PlasmaSerialization, StatusRequest) { - int fd = create_temp_file(); - int64_t num_objects = 2; - ObjectID object_ids[num_objects]; - object_ids[0] = ObjectID::from_random(); - object_ids[1] = ObjectID::from_random(); - ARROW_CHECK_OK(SendStatusRequest(fd, object_ids, num_objects)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaStatusRequest); - ObjectID object_ids_read[num_objects]; - ARROW_CHECK_OK(ReadStatusRequest(data.data(), object_ids_read, num_objects)); - ASSERT_EQ(object_ids[0], object_ids_read[0]); - ASSERT_EQ(object_ids[1], object_ids_read[1]); - close(fd); -} - -TEST(PlasmaSerialization, StatusReply) { - int fd = create_temp_file(); - ObjectID object_ids[2]; - object_ids[0] = ObjectID::from_random(); - object_ids[1] = ObjectID::from_random(); - int object_statuses[2] = {42, 43}; - ARROW_CHECK_OK(SendStatusReply(fd, object_ids, object_statuses, 2)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaStatusReply); - int64_t num_objects = ReadStatusReply_num_objects(data.data()); - ObjectID object_ids_read[num_objects]; - int object_statuses_read[num_objects]; - ARROW_CHECK_OK( - ReadStatusReply(data.data(), object_ids_read, object_statuses_read, num_objects)); - ASSERT_EQ(object_ids[0], object_ids_read[0]); - ASSERT_EQ(object_ids[1], object_ids_read[1]); - ASSERT_EQ(object_statuses[0], object_statuses_read[0]); - ASSERT_EQ(object_statuses[1], object_statuses_read[1]); - close(fd); -} - -TEST(PlasmaSerialization, EvictRequest) { - int fd = create_temp_file(); - int64_t num_bytes = 111; - ARROW_CHECK_OK(SendEvictRequest(fd, num_bytes)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaEvictRequest); - int64_t num_bytes_received; - ARROW_CHECK_OK(ReadEvictRequest(data.data(), &num_bytes_received)); - ASSERT_EQ(num_bytes, num_bytes_received); - close(fd); -} - -TEST(PlasmaSerialization, EvictReply) { - int fd = create_temp_file(); - int64_t num_bytes = 111; - ARROW_CHECK_OK(SendEvictReply(fd, num_bytes)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaEvictReply); - int64_t num_bytes_received; - ARROW_CHECK_OK(ReadEvictReply(data.data(), num_bytes_received)); - ASSERT_EQ(num_bytes, num_bytes_received); - close(fd); -} - -TEST(PlasmaSerialization, FetchRequest) { - int fd = create_temp_file(); - ObjectID object_ids[2]; - object_ids[0] = ObjectID::from_random(); - object_ids[1] = ObjectID::from_random(); - ARROW_CHECK_OK(SendFetchRequest(fd, object_ids, 2)); - std::vector data = read_message_from_file(fd, MessageType_PlasmaFetchRequest); - std::vector object_ids_read; - ARROW_CHECK_OK(ReadFetchRequest(data.data(), object_ids_read)); - ASSERT_EQ(object_ids[0], object_ids_read[0]); - ASSERT_EQ(object_ids[1], object_ids_read[1]); - close(fd); -} - -TEST(PlasmaSerialization, WaitRequest) { - int fd = create_temp_file(); - const int num_objects_in = 2; - ObjectRequest object_requests_in[num_objects_in] = { - ObjectRequest({ObjectID::from_random(), PLASMA_QUERY_ANYWHERE, 0}), - ObjectRequest({ObjectID::from_random(), PLASMA_QUERY_LOCAL, 0})}; - const int num_ready_objects_in = 1; - int64_t timeout_ms = 1000; - - ARROW_CHECK_OK(SendWaitRequest( - fd, &object_requests_in[0], num_objects_in, num_ready_objects_in, timeout_ms)); - /* Read message back. */ - std::vector data = read_message_from_file(fd, MessageType_PlasmaWaitRequest); - int num_ready_objects_out; - int64_t timeout_ms_read; - ObjectRequestMap object_requests_out; - ARROW_CHECK_OK(ReadWaitRequest( - data.data(), object_requests_out, &timeout_ms_read, &num_ready_objects_out)); - ASSERT_EQ(num_objects_in, object_requests_out.size()); - ASSERT_EQ(num_ready_objects_out, num_ready_objects_in); - for (int i = 0; i < num_objects_in; i++) { - const ObjectID& object_id = object_requests_in[i].object_id; - ASSERT_EQ(1, object_requests_out.count(object_id)); - const auto& entry = object_requests_out.find(object_id); - ASSERT_TRUE(entry != object_requests_out.end()); - ASSERT_EQ(entry->second.object_id, object_requests_in[i].object_id); - ASSERT_EQ(entry->second.type, object_requests_in[i].type); - } - close(fd); -} - -TEST(PlasmaSerialization, WaitReply) { - int fd = create_temp_file(); - const int num_objects_in = 2; - /* Create a map with two ObjectRequests in it. */ - ObjectRequestMap objects_in(num_objects_in); - ObjectID id1 = ObjectID::from_random(); - objects_in[id1] = ObjectRequest({id1, 0, ObjectStatus_Local}); - ObjectID id2 = ObjectID::from_random(); - objects_in[id2] = ObjectRequest({id2, 0, ObjectStatus_Nonexistent}); - - ARROW_CHECK_OK(SendWaitReply(fd, objects_in, num_objects_in)); - /* Read message back. */ - std::vector data = read_message_from_file(fd, MessageType_PlasmaWaitReply); - ObjectRequest objects_out[2]; - int num_objects_out; - ARROW_CHECK_OK(ReadWaitReply(data.data(), &objects_out[0], &num_objects_out)); - ASSERT_EQ(num_objects_in, num_objects_out); - for (int i = 0; i < num_objects_out; i++) { - /* Each object request must appear exactly once. */ - ASSERT_EQ(objects_in.count(objects_out[i].object_id), 1); - const auto& entry = objects_in.find(objects_out[i].object_id); - ASSERT_TRUE(entry != objects_in.end()); - ASSERT_EQ(entry->second.object_id, objects_out[i].object_id); - ASSERT_EQ(entry->second.status, objects_out[i].status); - } - close(fd); -} - -TEST(PlasmaSerialization, DataRequest) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - const char* address1 = "address1"; - int port1 = 12345; - ARROW_CHECK_OK(SendDataRequest(fd, object_id1, address1, port1)); - /* Reading message back. */ - std::vector data = read_message_from_file(fd, MessageType_PlasmaDataRequest); - ObjectID object_id2; - char* address2; - int port2; - ARROW_CHECK_OK(ReadDataRequest(data.data(), &object_id2, &address2, &port2)); - ASSERT_EQ(object_id1, object_id2); - ASSERT_EQ(strcmp(address1, address2), 0); - ASSERT_EQ(port1, port2); - free(address2); - close(fd); -} - -TEST(PlasmaSerialization, DataReply) { - int fd = create_temp_file(); - ObjectID object_id1 = ObjectID::from_random(); - int64_t object_size1 = 146; - int64_t metadata_size1 = 198; - ARROW_CHECK_OK(SendDataReply(fd, object_id1, object_size1, metadata_size1)); - /* Reading message back. */ - std::vector data = read_message_from_file(fd, MessageType_PlasmaDataReply); - ObjectID object_id2; - int64_t object_size2; - int64_t metadata_size2; - ARROW_CHECK_OK(ReadDataReply(data.data(), &object_id2, &object_size2, &metadata_size2)); - ASSERT_EQ(object_id1, object_id2); - ASSERT_EQ(object_size1, object_size2); - ASSERT_EQ(metadata_size1, metadata_size2); -} diff --git a/cpp/src/plasma/thirdparty/ae/ae.c b/cpp/src/plasma/thirdparty/ae/ae.c deleted file mode 100644 index e66808a81466d..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/ae.c +++ /dev/null @@ -1,465 +0,0 @@ -/* A simple event-driven programming library. Originally I wrote this code - * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated - * it in form of a library for easy reuse. - * - * Copyright (c) 2006-2010, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ae.h" -#include "zmalloc.h" -#include "config.h" - -/* Include the best multiplexing layer supported by this system. - * The following should be ordered by performances, descending. */ -#ifdef HAVE_EVPORT -#include "ae_evport.c" -#else - #ifdef HAVE_EPOLL - #include "ae_epoll.c" - #else - #ifdef HAVE_KQUEUE - #include "ae_kqueue.c" - #else - #include "ae_select.c" - #endif - #endif -#endif - -aeEventLoop *aeCreateEventLoop(int setsize) { - aeEventLoop *eventLoop; - int i; - - if ((eventLoop = zmalloc(sizeof(*eventLoop))) == NULL) goto err; - eventLoop->events = zmalloc(sizeof(aeFileEvent)*setsize); - eventLoop->fired = zmalloc(sizeof(aeFiredEvent)*setsize); - if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err; - eventLoop->setsize = setsize; - eventLoop->lastTime = time(NULL); - eventLoop->timeEventHead = NULL; - eventLoop->timeEventNextId = 0; - eventLoop->stop = 0; - eventLoop->maxfd = -1; - eventLoop->beforesleep = NULL; - if (aeApiCreate(eventLoop) == -1) goto err; - /* Events with mask == AE_NONE are not set. So let's initialize the - * vector with it. */ - for (i = 0; i < setsize; i++) - eventLoop->events[i].mask = AE_NONE; - return eventLoop; - -err: - if (eventLoop) { - zfree(eventLoop->events); - zfree(eventLoop->fired); - zfree(eventLoop); - } - return NULL; -} - -/* Return the current set size. */ -int aeGetSetSize(aeEventLoop *eventLoop) { - return eventLoop->setsize; -} - -/* Resize the maximum set size of the event loop. - * If the requested set size is smaller than the current set size, but - * there is already a file descriptor in use that is >= the requested - * set size minus one, AE_ERR is returned and the operation is not - * performed at all. - * - * Otherwise AE_OK is returned and the operation is successful. */ -int aeResizeSetSize(aeEventLoop *eventLoop, int setsize) { - int i; - - if (setsize == eventLoop->setsize) return AE_OK; - if (eventLoop->maxfd >= setsize) return AE_ERR; - if (aeApiResize(eventLoop,setsize) == -1) return AE_ERR; - - eventLoop->events = zrealloc(eventLoop->events,sizeof(aeFileEvent)*setsize); - eventLoop->fired = zrealloc(eventLoop->fired,sizeof(aeFiredEvent)*setsize); - eventLoop->setsize = setsize; - - /* Make sure that if we created new slots, they are initialized with - * an AE_NONE mask. */ - for (i = eventLoop->maxfd+1; i < setsize; i++) - eventLoop->events[i].mask = AE_NONE; - return AE_OK; -} - -void aeDeleteEventLoop(aeEventLoop *eventLoop) { - aeApiFree(eventLoop); - zfree(eventLoop->events); - zfree(eventLoop->fired); - zfree(eventLoop); -} - -void aeStop(aeEventLoop *eventLoop) { - eventLoop->stop = 1; -} - -int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, - aeFileProc *proc, void *clientData) -{ - if (fd >= eventLoop->setsize) { - errno = ERANGE; - return AE_ERR; - } - aeFileEvent *fe = &eventLoop->events[fd]; - - if (aeApiAddEvent(eventLoop, fd, mask) == -1) - return AE_ERR; - fe->mask |= mask; - if (mask & AE_READABLE) fe->rfileProc = proc; - if (mask & AE_WRITABLE) fe->wfileProc = proc; - fe->clientData = clientData; - if (fd > eventLoop->maxfd) - eventLoop->maxfd = fd; - return AE_OK; -} - -void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask) -{ - if (fd >= eventLoop->setsize) return; - aeFileEvent *fe = &eventLoop->events[fd]; - if (fe->mask == AE_NONE) return; - - aeApiDelEvent(eventLoop, fd, mask); - fe->mask = fe->mask & (~mask); - if (fd == eventLoop->maxfd && fe->mask == AE_NONE) { - /* Update the max fd */ - int j; - - for (j = eventLoop->maxfd-1; j >= 0; j--) - if (eventLoop->events[j].mask != AE_NONE) break; - eventLoop->maxfd = j; - } -} - -int aeGetFileEvents(aeEventLoop *eventLoop, int fd) { - if (fd >= eventLoop->setsize) return 0; - aeFileEvent *fe = &eventLoop->events[fd]; - - return fe->mask; -} - -static void aeGetTime(long *seconds, long *milliseconds) -{ - struct timeval tv; - - gettimeofday(&tv, NULL); - *seconds = tv.tv_sec; - *milliseconds = tv.tv_usec/1000; -} - -static void aeAddMillisecondsToNow(long long milliseconds, long *sec, long *ms) { - long cur_sec, cur_ms, when_sec, when_ms; - - aeGetTime(&cur_sec, &cur_ms); - when_sec = cur_sec + milliseconds/1000; - when_ms = cur_ms + milliseconds%1000; - if (when_ms >= 1000) { - when_sec ++; - when_ms -= 1000; - } - *sec = when_sec; - *ms = when_ms; -} - -long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, - aeTimeProc *proc, void *clientData, - aeEventFinalizerProc *finalizerProc) -{ - long long id = eventLoop->timeEventNextId++; - aeTimeEvent *te; - - te = zmalloc(sizeof(*te)); - if (te == NULL) return AE_ERR; - te->id = id; - aeAddMillisecondsToNow(milliseconds,&te->when_sec,&te->when_ms); - te->timeProc = proc; - te->finalizerProc = finalizerProc; - te->clientData = clientData; - te->next = eventLoop->timeEventHead; - eventLoop->timeEventHead = te; - return id; -} - -int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id) -{ - aeTimeEvent *te = eventLoop->timeEventHead; - while(te) { - if (te->id == id) { - te->id = AE_DELETED_EVENT_ID; - return AE_OK; - } - te = te->next; - } - return AE_ERR; /* NO event with the specified ID found */ -} - -/* Search the first timer to fire. - * This operation is useful to know how many time the select can be - * put in sleep without to delay any event. - * If there are no timers NULL is returned. - * - * Note that's O(N) since time events are unsorted. - * Possible optimizations (not needed by Redis so far, but...): - * 1) Insert the event in order, so that the nearest is just the head. - * Much better but still insertion or deletion of timers is O(N). - * 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)). - */ -static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) -{ - aeTimeEvent *te = eventLoop->timeEventHead; - aeTimeEvent *nearest = NULL; - - while(te) { - if (!nearest || te->when_sec < nearest->when_sec || - (te->when_sec == nearest->when_sec && - te->when_ms < nearest->when_ms)) - nearest = te; - te = te->next; - } - return nearest; -} - -/* Process time events */ -static int processTimeEvents(aeEventLoop *eventLoop) { - int processed = 0; - aeTimeEvent *te, *prev; - long long maxId; - time_t now = time(NULL); - - /* If the system clock is moved to the future, and then set back to the - * right value, time events may be delayed in a random way. Often this - * means that scheduled operations will not be performed soon enough. - * - * Here we try to detect system clock skews, and force all the time - * events to be processed ASAP when this happens: the idea is that - * processing events earlier is less dangerous than delaying them - * indefinitely, and practice suggests it is. */ - if (now < eventLoop->lastTime) { - te = eventLoop->timeEventHead; - while(te) { - te->when_sec = 0; - te = te->next; - } - } - eventLoop->lastTime = now; - - prev = NULL; - te = eventLoop->timeEventHead; - maxId = eventLoop->timeEventNextId-1; - while(te) { - long now_sec, now_ms; - long long id; - - /* Remove events scheduled for deletion. */ - if (te->id == AE_DELETED_EVENT_ID) { - aeTimeEvent *next = te->next; - if (prev == NULL) - eventLoop->timeEventHead = te->next; - else - prev->next = te->next; - if (te->finalizerProc) - te->finalizerProc(eventLoop, te->clientData); - zfree(te); - te = next; - continue; - } - - /* Make sure we don't process time events created by time events in - * this iteration. Note that this check is currently useless: we always - * add new timers on the head, however if we change the implementation - * detail, this check may be useful again: we keep it here for future - * defense. */ - if (te->id > maxId) { - te = te->next; - continue; - } - aeGetTime(&now_sec, &now_ms); - if (now_sec > te->when_sec || - (now_sec == te->when_sec && now_ms >= te->when_ms)) - { - int retval; - - id = te->id; - retval = te->timeProc(eventLoop, id, te->clientData); - processed++; - if (retval != AE_NOMORE) { - aeAddMillisecondsToNow(retval,&te->when_sec,&te->when_ms); - } else { - te->id = AE_DELETED_EVENT_ID; - } - } - prev = te; - te = te->next; - } - return processed; -} - -/* Process every pending time event, then every pending file event - * (that may be registered by time event callbacks just processed). - * Without special flags the function sleeps until some file event - * fires, or when the next time event occurs (if any). - * - * If flags is 0, the function does nothing and returns. - * if flags has AE_ALL_EVENTS set, all the kind of events are processed. - * if flags has AE_FILE_EVENTS set, file events are processed. - * if flags has AE_TIME_EVENTS set, time events are processed. - * if flags has AE_DONT_WAIT set the function returns ASAP until all - * the events that's possible to process without to wait are processed. - * - * The function returns the number of events processed. */ -int aeProcessEvents(aeEventLoop *eventLoop, int flags) -{ - int processed = 0, numevents; - - /* Nothing to do? return ASAP */ - if (!(flags & AE_TIME_EVENTS) && !(flags & AE_FILE_EVENTS)) return 0; - - /* Note that we want call select() even if there are no - * file events to process as long as we want to process time - * events, in order to sleep until the next time event is ready - * to fire. */ - if (eventLoop->maxfd != -1 || - ((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) { - int j; - aeTimeEvent *shortest = NULL; - struct timeval tv, *tvp; - - if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT)) - shortest = aeSearchNearestTimer(eventLoop); - if (shortest) { - long now_sec, now_ms; - - aeGetTime(&now_sec, &now_ms); - tvp = &tv; - - /* How many milliseconds we need to wait for the next - * time event to fire? */ - long long ms = - (shortest->when_sec - now_sec)*1000 + - shortest->when_ms - now_ms; - - if (ms > 0) { - tvp->tv_sec = ms/1000; - tvp->tv_usec = (ms % 1000)*1000; - } else { - tvp->tv_sec = 0; - tvp->tv_usec = 0; - } - } else { - /* If we have to check for events but need to return - * ASAP because of AE_DONT_WAIT we need to set the timeout - * to zero */ - if (flags & AE_DONT_WAIT) { - tv.tv_sec = tv.tv_usec = 0; - tvp = &tv; - } else { - /* Otherwise we can block */ - tvp = NULL; /* wait forever */ - } - } - - numevents = aeApiPoll(eventLoop, tvp); - for (j = 0; j < numevents; j++) { - aeFileEvent *fe = &eventLoop->events[eventLoop->fired[j].fd]; - int mask = eventLoop->fired[j].mask; - int fd = eventLoop->fired[j].fd; - int rfired = 0; - - /* note the fe->mask & mask & ... code: maybe an already processed - * event removed an element that fired and we still didn't - * processed, so we check if the event is still valid. */ - if (fe->mask & mask & AE_READABLE) { - rfired = 1; - fe->rfileProc(eventLoop,fd,fe->clientData,mask); - } - if (fe->mask & mask & AE_WRITABLE) { - if (!rfired || fe->wfileProc != fe->rfileProc) - fe->wfileProc(eventLoop,fd,fe->clientData,mask); - } - processed++; - } - } - /* Check time events */ - if (flags & AE_TIME_EVENTS) - processed += processTimeEvents(eventLoop); - - return processed; /* return the number of processed file/time events */ -} - -/* Wait for milliseconds until the given file descriptor becomes - * writable/readable/exception */ -int aeWait(int fd, int mask, long long milliseconds) { - struct pollfd pfd; - int retmask = 0, retval; - - memset(&pfd, 0, sizeof(pfd)); - pfd.fd = fd; - if (mask & AE_READABLE) pfd.events |= POLLIN; - if (mask & AE_WRITABLE) pfd.events |= POLLOUT; - - if ((retval = poll(&pfd, 1, milliseconds))== 1) { - if (pfd.revents & POLLIN) retmask |= AE_READABLE; - if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE; - if (pfd.revents & POLLERR) retmask |= AE_WRITABLE; - if (pfd.revents & POLLHUP) retmask |= AE_WRITABLE; - return retmask; - } else { - return retval; - } -} - -void aeMain(aeEventLoop *eventLoop) { - eventLoop->stop = 0; - while (!eventLoop->stop) { - if (eventLoop->beforesleep != NULL) - eventLoop->beforesleep(eventLoop); - aeProcessEvents(eventLoop, AE_ALL_EVENTS); - } -} - -char *aeGetApiName(void) { - return aeApiName(); -} - -void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep) { - eventLoop->beforesleep = beforesleep; -} diff --git a/cpp/src/plasma/thirdparty/ae/ae.h b/cpp/src/plasma/thirdparty/ae/ae.h deleted file mode 100644 index 827c4c9e4e59e..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/ae.h +++ /dev/null @@ -1,123 +0,0 @@ -/* A simple event-driven programming library. Originally I wrote this code - * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated - * it in form of a library for easy reuse. - * - * Copyright (c) 2006-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __AE_H__ -#define __AE_H__ - -#include - -#define AE_OK 0 -#define AE_ERR -1 - -#define AE_NONE 0 -#define AE_READABLE 1 -#define AE_WRITABLE 2 - -#define AE_FILE_EVENTS 1 -#define AE_TIME_EVENTS 2 -#define AE_ALL_EVENTS (AE_FILE_EVENTS|AE_TIME_EVENTS) -#define AE_DONT_WAIT 4 - -#define AE_NOMORE -1 -#define AE_DELETED_EVENT_ID -1 - -/* Macros */ -#define AE_NOTUSED(V) ((void) V) - -struct aeEventLoop; - -/* Types and data structures */ -typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); -typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData); -typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData); -typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop); - -/* File event structure */ -typedef struct aeFileEvent { - int mask; /* one of AE_(READABLE|WRITABLE) */ - aeFileProc *rfileProc; - aeFileProc *wfileProc; - void *clientData; -} aeFileEvent; - -/* Time event structure */ -typedef struct aeTimeEvent { - long long id; /* time event identifier. */ - long when_sec; /* seconds */ - long when_ms; /* milliseconds */ - aeTimeProc *timeProc; - aeEventFinalizerProc *finalizerProc; - void *clientData; - struct aeTimeEvent *next; -} aeTimeEvent; - -/* A fired event */ -typedef struct aeFiredEvent { - int fd; - int mask; -} aeFiredEvent; - -/* State of an event based program */ -typedef struct aeEventLoop { - int maxfd; /* highest file descriptor currently registered */ - int setsize; /* max number of file descriptors tracked */ - long long timeEventNextId; - time_t lastTime; /* Used to detect system clock skew */ - aeFileEvent *events; /* Registered events */ - aeFiredEvent *fired; /* Fired events */ - aeTimeEvent *timeEventHead; - int stop; - void *apidata; /* This is used for polling API specific data */ - aeBeforeSleepProc *beforesleep; -} aeEventLoop; - -/* Prototypes */ -aeEventLoop *aeCreateEventLoop(int setsize); -void aeDeleteEventLoop(aeEventLoop *eventLoop); -void aeStop(aeEventLoop *eventLoop); -int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, - aeFileProc *proc, void *clientData); -void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask); -int aeGetFileEvents(aeEventLoop *eventLoop, int fd); -long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, - aeTimeProc *proc, void *clientData, - aeEventFinalizerProc *finalizerProc); -int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id); -int aeProcessEvents(aeEventLoop *eventLoop, int flags); -int aeWait(int fd, int mask, long long milliseconds); -void aeMain(aeEventLoop *eventLoop); -char *aeGetApiName(void); -void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep); -int aeGetSetSize(aeEventLoop *eventLoop); -int aeResizeSetSize(aeEventLoop *eventLoop, int setsize); - -#endif diff --git a/cpp/src/plasma/thirdparty/ae/ae_epoll.c b/cpp/src/plasma/thirdparty/ae/ae_epoll.c deleted file mode 100644 index 410aac70dc5af..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/ae_epoll.c +++ /dev/null @@ -1,135 +0,0 @@ -/* Linux epoll(2) based ae.c module - * - * Copyright (c) 2009-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -#include - -typedef struct aeApiState { - int epfd; - struct epoll_event *events; -} aeApiState; - -static int aeApiCreate(aeEventLoop *eventLoop) { - aeApiState *state = zmalloc(sizeof(aeApiState)); - - if (!state) return -1; - state->events = zmalloc(sizeof(struct epoll_event)*eventLoop->setsize); - if (!state->events) { - zfree(state); - return -1; - } - state->epfd = epoll_create(1024); /* 1024 is just a hint for the kernel */ - if (state->epfd == -1) { - zfree(state->events); - zfree(state); - return -1; - } - eventLoop->apidata = state; - return 0; -} - -static int aeApiResize(aeEventLoop *eventLoop, int setsize) { - aeApiState *state = eventLoop->apidata; - - state->events = zrealloc(state->events, sizeof(struct epoll_event)*setsize); - return 0; -} - -static void aeApiFree(aeEventLoop *eventLoop) { - aeApiState *state = eventLoop->apidata; - - close(state->epfd); - zfree(state->events); - zfree(state); -} - -static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { - aeApiState *state = eventLoop->apidata; - struct epoll_event ee = {0}; /* avoid valgrind warning */ - /* If the fd was already monitored for some event, we need a MOD - * operation. Otherwise we need an ADD operation. */ - int op = eventLoop->events[fd].mask == AE_NONE ? - EPOLL_CTL_ADD : EPOLL_CTL_MOD; - - ee.events = 0; - mask |= eventLoop->events[fd].mask; /* Merge old events */ - if (mask & AE_READABLE) ee.events |= EPOLLIN; - if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; - ee.data.fd = fd; - if (epoll_ctl(state->epfd,op,fd,&ee) == -1) return -1; - return 0; -} - -static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int delmask) { - aeApiState *state = eventLoop->apidata; - struct epoll_event ee = {0}; /* avoid valgrind warning */ - int mask = eventLoop->events[fd].mask & (~delmask); - - ee.events = 0; - if (mask & AE_READABLE) ee.events |= EPOLLIN; - if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; - ee.data.fd = fd; - if (mask != AE_NONE) { - epoll_ctl(state->epfd,EPOLL_CTL_MOD,fd,&ee); - } else { - /* Note, Kernel < 2.6.9 requires a non null event pointer even for - * EPOLL_CTL_DEL. */ - epoll_ctl(state->epfd,EPOLL_CTL_DEL,fd,&ee); - } -} - -static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { - aeApiState *state = eventLoop->apidata; - int retval, numevents = 0; - - retval = epoll_wait(state->epfd,state->events,eventLoop->setsize, - tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1); - if (retval > 0) { - int j; - - numevents = retval; - for (j = 0; j < numevents; j++) { - int mask = 0; - struct epoll_event *e = state->events+j; - - if (e->events & EPOLLIN) mask |= AE_READABLE; - if (e->events & EPOLLOUT) mask |= AE_WRITABLE; - if (e->events & EPOLLERR) mask |= AE_WRITABLE; - if (e->events & EPOLLHUP) mask |= AE_WRITABLE; - eventLoop->fired[j].fd = e->data.fd; - eventLoop->fired[j].mask = mask; - } - } - return numevents; -} - -static char *aeApiName(void) { - return "epoll"; -} diff --git a/cpp/src/plasma/thirdparty/ae/ae_evport.c b/cpp/src/plasma/thirdparty/ae/ae_evport.c deleted file mode 100644 index 5c317becb6f7d..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/ae_evport.c +++ /dev/null @@ -1,320 +0,0 @@ -/* ae.c module for illumos event ports. - * - * Copyright (c) 2012, Joyent, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -#include -#include -#include -#include - -#include -#include - -#include - -static int evport_debug = 0; - -/* - * This file implements the ae API using event ports, present on Solaris-based - * systems since Solaris 10. Using the event port interface, we associate file - * descriptors with the port. Each association also includes the set of poll(2) - * events that the consumer is interested in (e.g., POLLIN and POLLOUT). - * - * There's one tricky piece to this implementation: when we return events via - * aeApiPoll, the corresponding file descriptors become dissociated from the - * port. This is necessary because poll events are level-triggered, so if the - * fd didn't become dissociated, it would immediately fire another event since - * the underlying state hasn't changed yet. We must re-associate the file - * descriptor, but only after we know that our caller has actually read from it. - * The ae API does not tell us exactly when that happens, but we do know that - * it must happen by the time aeApiPoll is called again. Our solution is to - * keep track of the last fds returned by aeApiPoll and re-associate them next - * time aeApiPoll is invoked. - * - * To summarize, in this module, each fd association is EITHER (a) represented - * only via the in-kernel association OR (b) represented by pending_fds and - * pending_masks. (b) is only true for the last fds we returned from aeApiPoll, - * and only until we enter aeApiPoll again (at which point we restore the - * in-kernel association). - */ -#define MAX_EVENT_BATCHSZ 512 - -typedef struct aeApiState { - int portfd; /* event port */ - int npending; /* # of pending fds */ - int pending_fds[MAX_EVENT_BATCHSZ]; /* pending fds */ - int pending_masks[MAX_EVENT_BATCHSZ]; /* pending fds' masks */ -} aeApiState; - -static int aeApiCreate(aeEventLoop *eventLoop) { - int i; - aeApiState *state = zmalloc(sizeof(aeApiState)); - if (!state) return -1; - - state->portfd = port_create(); - if (state->portfd == -1) { - zfree(state); - return -1; - } - - state->npending = 0; - - for (i = 0; i < MAX_EVENT_BATCHSZ; i++) { - state->pending_fds[i] = -1; - state->pending_masks[i] = AE_NONE; - } - - eventLoop->apidata = state; - return 0; -} - -static int aeApiResize(aeEventLoop *eventLoop, int setsize) { - /* Nothing to resize here. */ - return 0; -} - -static void aeApiFree(aeEventLoop *eventLoop) { - aeApiState *state = eventLoop->apidata; - - close(state->portfd); - zfree(state); -} - -static int aeApiLookupPending(aeApiState *state, int fd) { - int i; - - for (i = 0; i < state->npending; i++) { - if (state->pending_fds[i] == fd) - return (i); - } - - return (-1); -} - -/* - * Helper function to invoke port_associate for the given fd and mask. - */ -static int aeApiAssociate(const char *where, int portfd, int fd, int mask) { - int events = 0; - int rv, err; - - if (mask & AE_READABLE) - events |= POLLIN; - if (mask & AE_WRITABLE) - events |= POLLOUT; - - if (evport_debug) - fprintf(stderr, "%s: port_associate(%d, 0x%x) = ", where, fd, events); - - rv = port_associate(portfd, PORT_SOURCE_FD, fd, events, - (void *)(uintptr_t)mask); - err = errno; - - if (evport_debug) - fprintf(stderr, "%d (%s)\n", rv, rv == 0 ? "no error" : strerror(err)); - - if (rv == -1) { - fprintf(stderr, "%s: port_associate: %s\n", where, strerror(err)); - - if (err == EAGAIN) - fprintf(stderr, "aeApiAssociate: event port limit exceeded."); - } - - return rv; -} - -static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { - aeApiState *state = eventLoop->apidata; - int fullmask, pfd; - - if (evport_debug) - fprintf(stderr, "aeApiAddEvent: fd %d mask 0x%x\n", fd, mask); - - /* - * Since port_associate's "events" argument replaces any existing events, we - * must be sure to include whatever events are already associated when - * we call port_associate() again. - */ - fullmask = mask | eventLoop->events[fd].mask; - pfd = aeApiLookupPending(state, fd); - - if (pfd != -1) { - /* - * This fd was recently returned from aeApiPoll. It should be safe to - * assume that the consumer has processed that poll event, but we play - * it safer by simply updating pending_mask. The fd will be - * re-associated as usual when aeApiPoll is called again. - */ - if (evport_debug) - fprintf(stderr, "aeApiAddEvent: adding to pending fd %d\n", fd); - state->pending_masks[pfd] |= fullmask; - return 0; - } - - return (aeApiAssociate("aeApiAddEvent", state->portfd, fd, fullmask)); -} - -static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { - aeApiState *state = eventLoop->apidata; - int fullmask, pfd; - - if (evport_debug) - fprintf(stderr, "del fd %d mask 0x%x\n", fd, mask); - - pfd = aeApiLookupPending(state, fd); - - if (pfd != -1) { - if (evport_debug) - fprintf(stderr, "deleting event from pending fd %d\n", fd); - - /* - * This fd was just returned from aeApiPoll, so it's not currently - * associated with the port. All we need to do is update - * pending_mask appropriately. - */ - state->pending_masks[pfd] &= ~mask; - - if (state->pending_masks[pfd] == AE_NONE) - state->pending_fds[pfd] = -1; - - return; - } - - /* - * The fd is currently associated with the port. Like with the add case - * above, we must look at the full mask for the file descriptor before - * updating that association. We don't have a good way of knowing what the - * events are without looking into the eventLoop state directly. We rely on - * the fact that our caller has already updated the mask in the eventLoop. - */ - - fullmask = eventLoop->events[fd].mask; - if (fullmask == AE_NONE) { - /* - * We're removing *all* events, so use port_dissociate to remove the - * association completely. Failure here indicates a bug. - */ - if (evport_debug) - fprintf(stderr, "aeApiDelEvent: port_dissociate(%d)\n", fd); - - if (port_dissociate(state->portfd, PORT_SOURCE_FD, fd) != 0) { - perror("aeApiDelEvent: port_dissociate"); - abort(); /* will not return */ - } - } else if (aeApiAssociate("aeApiDelEvent", state->portfd, fd, - fullmask) != 0) { - /* - * ENOMEM is a potentially transient condition, but the kernel won't - * generally return it unless things are really bad. EAGAIN indicates - * we've reached an resource limit, for which it doesn't make sense to - * retry (counter-intuitively). All other errors indicate a bug. In any - * of these cases, the best we can do is to abort. - */ - abort(); /* will not return */ - } -} - -static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { - aeApiState *state = eventLoop->apidata; - struct timespec timeout, *tsp; - int mask, i; - uint_t nevents; - port_event_t event[MAX_EVENT_BATCHSZ]; - - /* - * If we've returned fd events before, we must re-associate them with the - * port now, before calling port_get(). See the block comment at the top of - * this file for an explanation of why. - */ - for (i = 0; i < state->npending; i++) { - if (state->pending_fds[i] == -1) - /* This fd has since been deleted. */ - continue; - - if (aeApiAssociate("aeApiPoll", state->portfd, - state->pending_fds[i], state->pending_masks[i]) != 0) { - /* See aeApiDelEvent for why this case is fatal. */ - abort(); - } - - state->pending_masks[i] = AE_NONE; - state->pending_fds[i] = -1; - } - - state->npending = 0; - - if (tvp != NULL) { - timeout.tv_sec = tvp->tv_sec; - timeout.tv_nsec = tvp->tv_usec * 1000; - tsp = &timeout; - } else { - tsp = NULL; - } - - /* - * port_getn can return with errno == ETIME having returned some events (!). - * So if we get ETIME, we check nevents, too. - */ - nevents = 1; - if (port_getn(state->portfd, event, MAX_EVENT_BATCHSZ, &nevents, - tsp) == -1 && (errno != ETIME || nevents == 0)) { - if (errno == ETIME || errno == EINTR) - return 0; - - /* Any other error indicates a bug. */ - perror("aeApiPoll: port_get"); - abort(); - } - - state->npending = nevents; - - for (i = 0; i < nevents; i++) { - mask = 0; - if (event[i].portev_events & POLLIN) - mask |= AE_READABLE; - if (event[i].portev_events & POLLOUT) - mask |= AE_WRITABLE; - - eventLoop->fired[i].fd = event[i].portev_object; - eventLoop->fired[i].mask = mask; - - if (evport_debug) - fprintf(stderr, "aeApiPoll: fd %d mask 0x%x\n", - (int)event[i].portev_object, mask); - - state->pending_fds[i] = event[i].portev_object; - state->pending_masks[i] = (uintptr_t)event[i].portev_user; - } - - return nevents; -} - -static char *aeApiName(void) { - return "evport"; -} diff --git a/cpp/src/plasma/thirdparty/ae/ae_kqueue.c b/cpp/src/plasma/thirdparty/ae/ae_kqueue.c deleted file mode 100644 index 6796f4ceb5939..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/ae_kqueue.c +++ /dev/null @@ -1,138 +0,0 @@ -/* Kqueue(2)-based ae.c module - * - * Copyright (C) 2009 Harish Mallipeddi - harish.mallipeddi@gmail.com - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -#include -#include -#include - -typedef struct aeApiState { - int kqfd; - struct kevent *events; -} aeApiState; - -static int aeApiCreate(aeEventLoop *eventLoop) { - aeApiState *state = zmalloc(sizeof(aeApiState)); - - if (!state) return -1; - state->events = zmalloc(sizeof(struct kevent)*eventLoop->setsize); - if (!state->events) { - zfree(state); - return -1; - } - state->kqfd = kqueue(); - if (state->kqfd == -1) { - zfree(state->events); - zfree(state); - return -1; - } - eventLoop->apidata = state; - return 0; -} - -static int aeApiResize(aeEventLoop *eventLoop, int setsize) { - aeApiState *state = eventLoop->apidata; - - state->events = zrealloc(state->events, sizeof(struct kevent)*setsize); - return 0; -} - -static void aeApiFree(aeEventLoop *eventLoop) { - aeApiState *state = eventLoop->apidata; - - close(state->kqfd); - zfree(state->events); - zfree(state); -} - -static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { - aeApiState *state = eventLoop->apidata; - struct kevent ke; - - if (mask & AE_READABLE) { - EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); - if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; - } - if (mask & AE_WRITABLE) { - EV_SET(&ke, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); - if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; - } - return 0; -} - -static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { - aeApiState *state = eventLoop->apidata; - struct kevent ke; - - if (mask & AE_READABLE) { - EV_SET(&ke, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); - kevent(state->kqfd, &ke, 1, NULL, 0, NULL); - } - if (mask & AE_WRITABLE) { - EV_SET(&ke, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); - kevent(state->kqfd, &ke, 1, NULL, 0, NULL); - } -} - -static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { - aeApiState *state = eventLoop->apidata; - int retval, numevents = 0; - - if (tvp != NULL) { - struct timespec timeout; - timeout.tv_sec = tvp->tv_sec; - timeout.tv_nsec = tvp->tv_usec * 1000; - retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, - &timeout); - } else { - retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, - NULL); - } - - if (retval > 0) { - int j; - - numevents = retval; - for(j = 0; j < numevents; j++) { - int mask = 0; - struct kevent *e = state->events+j; - - if (e->filter == EVFILT_READ) mask |= AE_READABLE; - if (e->filter == EVFILT_WRITE) mask |= AE_WRITABLE; - eventLoop->fired[j].fd = e->ident; - eventLoop->fired[j].mask = mask; - } - } - return numevents; -} - -static char *aeApiName(void) { - return "kqueue"; -} diff --git a/cpp/src/plasma/thirdparty/ae/ae_select.c b/cpp/src/plasma/thirdparty/ae/ae_select.c deleted file mode 100644 index c039a8ea3128d..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/ae_select.c +++ /dev/null @@ -1,106 +0,0 @@ -/* Select()-based ae.c module. - * - * Copyright (c) 2009-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -#include -#include - -typedef struct aeApiState { - fd_set rfds, wfds; - /* We need to have a copy of the fd sets as it's not safe to reuse - * FD sets after select(). */ - fd_set _rfds, _wfds; -} aeApiState; - -static int aeApiCreate(aeEventLoop *eventLoop) { - aeApiState *state = zmalloc(sizeof(aeApiState)); - - if (!state) return -1; - FD_ZERO(&state->rfds); - FD_ZERO(&state->wfds); - eventLoop->apidata = state; - return 0; -} - -static int aeApiResize(aeEventLoop *eventLoop, int setsize) { - /* Just ensure we have enough room in the fd_set type. */ - if (setsize >= FD_SETSIZE) return -1; - return 0; -} - -static void aeApiFree(aeEventLoop *eventLoop) { - zfree(eventLoop->apidata); -} - -static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { - aeApiState *state = eventLoop->apidata; - - if (mask & AE_READABLE) FD_SET(fd,&state->rfds); - if (mask & AE_WRITABLE) FD_SET(fd,&state->wfds); - return 0; -} - -static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { - aeApiState *state = eventLoop->apidata; - - if (mask & AE_READABLE) FD_CLR(fd,&state->rfds); - if (mask & AE_WRITABLE) FD_CLR(fd,&state->wfds); -} - -static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { - aeApiState *state = eventLoop->apidata; - int retval, j, numevents = 0; - - memcpy(&state->_rfds,&state->rfds,sizeof(fd_set)); - memcpy(&state->_wfds,&state->wfds,sizeof(fd_set)); - - retval = select(eventLoop->maxfd+1, - &state->_rfds,&state->_wfds,NULL,tvp); - if (retval > 0) { - for (j = 0; j <= eventLoop->maxfd; j++) { - int mask = 0; - aeFileEvent *fe = &eventLoop->events[j]; - - if (fe->mask == AE_NONE) continue; - if (fe->mask & AE_READABLE && FD_ISSET(j,&state->_rfds)) - mask |= AE_READABLE; - if (fe->mask & AE_WRITABLE && FD_ISSET(j,&state->_wfds)) - mask |= AE_WRITABLE; - eventLoop->fired[numevents].fd = j; - eventLoop->fired[numevents].mask = mask; - numevents++; - } - } - return numevents; -} - -static char *aeApiName(void) { - return "select"; -} diff --git a/cpp/src/plasma/thirdparty/ae/config.h b/cpp/src/plasma/thirdparty/ae/config.h deleted file mode 100644 index 4f8e1ea1bc38c..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/config.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2009-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __CONFIG_H -#define __CONFIG_H - -#ifdef __APPLE__ -#include -#endif - -/* Test for polling API */ -#ifdef __linux__ -#define HAVE_EPOLL 1 -#endif - -#if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__) -#define HAVE_KQUEUE 1 -#endif - -#ifdef __sun -#include -#ifdef _DTRACE_VERSION -#define HAVE_EVPORT 1 -#endif -#endif - - -#endif diff --git a/cpp/src/plasma/thirdparty/ae/zmalloc.h b/cpp/src/plasma/thirdparty/ae/zmalloc.h deleted file mode 100644 index 6c27dd4e5c3d3..0000000000000 --- a/cpp/src/plasma/thirdparty/ae/zmalloc.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2009-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ZMALLOC_H -#define _ZMALLOC_H - -#ifndef zmalloc -#define zmalloc malloc -#endif - -#ifndef zfree -#define zfree free -#endif - -#ifndef zrealloc -#define zrealloc realloc -#endif - -#endif /* _ZMALLOC_H */ diff --git a/cpp/src/plasma/thirdparty/dlmalloc.c b/cpp/src/plasma/thirdparty/dlmalloc.c deleted file mode 100644 index 84ccbd28fc4ec..0000000000000 --- a/cpp/src/plasma/thirdparty/dlmalloc.c +++ /dev/null @@ -1,6281 +0,0 @@ -/* - This is a version (aka dlmalloc) of malloc/free/realloc written by - Doug Lea and released to the public domain, as explained at - http://creativecommons.org/publicdomain/zero/1.0/ Send questions, - comments, complaints, performance data, etc to dl@cs.oswego.edu - -* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea - Note: There may be an updated version of this malloc obtainable at - ftp://gee.cs.oswego.edu/pub/misc/malloc.c - Check before installing! - -* Quickstart - - This library is all in one file to simplify the most common usage: - ftp it, compile it (-O3), and link it into another program. All of - the compile-time options default to reasonable values for use on - most platforms. You might later want to step through various - compile-time and dynamic tuning options. - - For convenience, an include file for code using this malloc is at: - ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h - You don't really need this .h file unless you call functions not - defined in your system include files. The .h file contains only the - excerpts from this file needed for using this malloc on ANSI C/C++ - systems, so long as you haven't changed compile-time options about - naming and tuning parameters. If you do, then you can create your - own malloc.h that does include all settings by cutting at the point - indicated below. Note that you may already by default be using a C - library containing a malloc that is based on some version of this - malloc (for example in linux). You might still want to use the one - in this file to customize settings or to avoid overheads associated - with library versions. - -* Vital statistics: - - Supported pointer/size_t representation: 4 or 8 bytes - size_t MUST be an unsigned type of the same width as - pointers. (If you are using an ancient system that declares - size_t as a signed type, or need it to be a different width - than pointers, you can use a previous release of this malloc - (e.g. 2.7.2) supporting these.) - - Alignment: 8 bytes (minimum) - This suffices for nearly all current machines and C compilers. - However, you can define MALLOC_ALIGNMENT to be wider than this - if necessary (up to 128bytes), at the expense of using more space. - - Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) - 8 or 16 bytes (if 8byte sizes) - Each malloced chunk has a hidden word of overhead holding size - and status information, and additional cross-check word - if FOOTERS is defined. - - Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) - 8-byte ptrs: 32 bytes (including overhead) - - Even a request for zero bytes (i.e., malloc(0)) returns a - pointer to something of the minimum allocatable size. - The maximum overhead wastage (i.e., number of extra bytes - allocated than were requested in malloc) is less than or equal - to the minimum size, except for requests >= mmap_threshold that - are serviced via mmap(), where the worst case wastage is about - 32 bytes plus the remainder from a system page (the minimal - mmap unit); typically 4096 or 8192 bytes. - - Security: static-safe; optionally more or less - The "security" of malloc refers to the ability of malicious - code to accentuate the effects of errors (for example, freeing - space that is not currently malloc'ed or overwriting past the - ends of chunks) in code that calls malloc. This malloc - guarantees not to modify any memory locations below the base of - heap, i.e., static variables, even in the presence of usage - errors. The routines additionally detect most improper frees - and reallocs. All this holds as long as the static bookkeeping - for malloc itself is not corrupted by some other means. This - is only one aspect of security -- these checks do not, and - cannot, detect all possible programming errors. - - If FOOTERS is defined nonzero, then each allocated chunk - carries an additional check word to verify that it was malloced - from its space. These check words are the same within each - execution of a program using malloc, but differ across - executions, so externally crafted fake chunks cannot be - freed. This improves security by rejecting frees/reallocs that - could corrupt heap memory, in addition to the checks preventing - writes to statics that are always on. This may further improve - security at the expense of time and space overhead. (Note that - FOOTERS may also be worth using with MSPACES.) - - By default detected errors cause the program to abort (calling - "abort()"). You can override this to instead proceed past - errors by defining PROCEED_ON_ERROR. In this case, a bad free - has no effect, and a malloc that encounters a bad address - caused by user overwrites will ignore the bad address by - dropping pointers and indices to all known memory. This may - be appropriate for programs that should continue if at all - possible in the face of programming errors, although they may - run out of memory because dropped memory is never reclaimed. - - If you don't like either of these options, you can define - CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything - else. And if if you are sure that your program using malloc has - no errors or vulnerabilities, you can define INSECURE to 1, - which might (or might not) provide a small performance improvement. - - It is also possible to limit the maximum total allocatable - space, using malloc_set_footprint_limit. This is not - designed as a security feature in itself (calls to set limits - are not screened or privileged), but may be useful as one - aspect of a secure implementation. - - Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero - When USE_LOCKS is defined, each public call to malloc, free, - etc is surrounded with a lock. By default, this uses a plain - pthread mutex, win32 critical section, or a spin-lock if if - available for the platform and not disabled by setting - USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined, - recursive versions are used instead (which are not required for - base functionality but may be needed in layered extensions). - Using a global lock is not especially fast, and can be a major - bottleneck. It is designed only to provide minimal protection - in concurrent environments, and to provide a basis for - extensions. If you are using malloc in a concurrent program, - consider instead using nedmalloc - (http://www.nedprod.com/programs/portable/nedmalloc/) or - ptmalloc (See http://www.malloc.de), which are derived from - versions of this malloc. - - System requirements: Any combination of MORECORE and/or MMAP/MUNMAP - This malloc can use unix sbrk or any emulation (invoked using - the CALL_MORECORE macro) and/or mmap/munmap or any emulation - (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system - memory. On most unix systems, it tends to work best if both - MORECORE and MMAP are enabled. On Win32, it uses emulations - based on VirtualAlloc. It also uses common C library functions - like memset. - - Compliance: I believe it is compliant with the Single Unix Specification - (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably - others as well. - -* Overview of algorithms - - This is not the fastest, most space-conserving, most portable, or - most tunable malloc ever written. However it is among the fastest - while also being among the most space-conserving, portable and - tunable. Consistent balance across these factors results in a good - general-purpose allocator for malloc-intensive programs. - - In most ways, this malloc is a best-fit allocator. Generally, it - chooses the best-fitting existing chunk for a request, with ties - broken in approximately least-recently-used order. (This strategy - normally maintains low fragmentation.) However, for requests less - than 256bytes, it deviates from best-fit when there is not an - exactly fitting available chunk by preferring to use space adjacent - to that used for the previous small request, as well as by breaking - ties in approximately most-recently-used order. (These enhance - locality of series of small allocations.) And for very large requests - (>= 256Kb by default), it relies on system memory mapping - facilities, if supported. (This helps avoid carrying around and - possibly fragmenting memory used only for large chunks.) - - All operations (except malloc_stats and mallinfo) have execution - times that are bounded by a constant factor of the number of bits in - a size_t, not counting any clearing in calloc or copying in realloc, - or actions surrounding MORECORE and MMAP that have times - proportional to the number of non-contiguous regions returned by - system allocation routines, which is often just 1. In real-time - applications, you can optionally suppress segment traversals using - NO_SEGMENT_TRAVERSAL, which assures bounded execution even when - system allocators return non-contiguous spaces, at the typical - expense of carrying around more memory and increased fragmentation. - - The implementation is not very modular and seriously overuses - macros. Perhaps someday all C compilers will do as good a job - inlining modular code as can now be done by brute-force expansion, - but now, enough of them seem not to. - - Some compilers issue a lot of warnings about code that is - dead/unreachable only on some platforms, and also about intentional - uses of negation on unsigned types. All known cases of each can be - ignored. - - For a longer but out of date high-level description, see - http://gee.cs.oswego.edu/dl/html/malloc.html - -* MSPACES - If MSPACES is defined, then in addition to malloc, free, etc., - this file also defines mspace_malloc, mspace_free, etc. These - are versions of malloc routines that take an "mspace" argument - obtained using create_mspace, to control all internal bookkeeping. - If ONLY_MSPACES is defined, only these versions are compiled. - So if you would like to use this allocator for only some allocations, - and your system malloc for others, you can compile with - ONLY_MSPACES and then do something like... - static mspace mymspace = create_mspace(0,0); // for example - #define mymalloc(bytes) mspace_malloc(mymspace, bytes) - - (Note: If you only need one instance of an mspace, you can instead - use "USE_DL_PREFIX" to relabel the global malloc.) - - You can similarly create thread-local allocators by storing - mspaces as thread-locals. For example: - static __thread mspace tlms = 0; - void* tlmalloc(size_t bytes) { - if (tlms == 0) tlms = create_mspace(0, 0); - return mspace_malloc(tlms, bytes); - } - void tlfree(void* mem) { mspace_free(tlms, mem); } - - Unless FOOTERS is defined, each mspace is completely independent. - You cannot allocate from one and free to another (although - conformance is only weakly checked, so usage errors are not always - caught). If FOOTERS is defined, then each chunk carries around a tag - indicating its originating mspace, and frees are directed to their - originating spaces. Normally, this requires use of locks. - - ------------------------- Compile-time options --------------------------- - -Be careful in setting #define values for numerical constants of type -size_t. On some systems, literal values are not automatically extended -to size_t precision unless they are explicitly casted. You can also -use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. - -WIN32 default: defined if _WIN32 defined - Defining WIN32 sets up defaults for MS environment and compilers. - Otherwise defaults are for unix. Beware that there seem to be some - cases where this malloc might not be a pure drop-in replacement for - Win32 malloc: Random-looking failures from Win32 GDI API's (eg; - SetDIBits()) may be due to bugs in some video driver implementations - when pixel buffers are malloc()ed, and the region spans more than - one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) - default granularity, pixel buffers may straddle virtual allocation - regions more often than when using the Microsoft allocator. You can - avoid this by using VirtualAlloc() and VirtualFree() for all pixel - buffers rather than using malloc(). If this is not possible, - recompile this malloc with a larger DEFAULT_GRANULARITY. Note: - in cases where MSC and gcc (cygwin) are known to differ on WIN32, - conditions use _MSC_VER to distinguish them. - -DLMALLOC_EXPORT default: extern - Defines how public APIs are declared. If you want to export via a - Windows DLL, you might define this as - #define DLMALLOC_EXPORT extern __declspec(dllexport) - If you want a POSIX ELF shared object, you might use - #define DLMALLOC_EXPORT extern __attribute__((visibility("default"))) - -MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *)) - Controls the minimum alignment for malloc'ed chunks. It must be a - power of two and at least 8, even on machines for which smaller - alignments would suffice. It may be defined as larger than this - though. Note however that code and data structures are optimized for - the case of 8-byte alignment. - -MSPACES default: 0 (false) - If true, compile in support for independent allocation spaces. - This is only supported if HAVE_MMAP is true. - -ONLY_MSPACES default: 0 (false) - If true, only compile in mspace versions, not regular versions. - -USE_LOCKS default: 0 (false) - Causes each call to each public routine to be surrounded with - pthread or WIN32 mutex lock/unlock. (If set true, this can be - overridden on a per-mspace basis for mspace versions.) If set to a - non-zero value other than 1, locks are used, but their - implementation is left out, so lock functions must be supplied manually, - as described below. - -USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available - If true, uses custom spin locks for locking. This is currently - supported only gcc >= 4.1, older gccs on x86 platforms, and recent - MS compilers. Otherwise, posix locks or win32 critical sections are - used. - -USE_RECURSIVE_LOCKS default: not defined - If defined nonzero, uses recursive (aka reentrant) locks, otherwise - uses plain mutexes. This is not required for malloc proper, but may - be needed for layered allocators such as nedmalloc. - -LOCK_AT_FORK default: not defined - If defined nonzero, performs pthread_atfork upon initialization - to initialize child lock while holding parent lock. The implementation - assumes that pthread locks (not custom locks) are being used. In other - cases, you may need to customize the implementation. - -FOOTERS default: 0 - If true, provide extra checking and dispatching by placing - information in the footers of allocated chunks. This adds - space and time overhead. - -INSECURE default: 0 - If true, omit checks for usage errors and heap space overwrites. - -USE_DL_PREFIX default: NOT defined - Causes compiler to prefix all public routines with the string 'dl'. - This can be useful when you only want to use this malloc in one part - of a program, using your regular system malloc elsewhere. - -MALLOC_INSPECT_ALL default: NOT defined - If defined, compiles malloc_inspect_all and mspace_inspect_all, that - perform traversal of all heap space. Unless access to these - functions is otherwise restricted, you probably do not want to - include them in secure implementations. - -ABORT default: defined as abort() - Defines how to abort on failed checks. On most systems, a failed - check cannot die with an "assert" or even print an informative - message, because the underlying print routines in turn call malloc, - which will fail again. Generally, the best policy is to simply call - abort(). It's not very useful to do more than this because many - errors due to overwriting will show up as address faults (null, odd - addresses etc) rather than malloc-triggered checks, so will also - abort. Also, most compilers know that abort() does not return, so - can better optimize code conditionally calling it. - -PROCEED_ON_ERROR default: defined as 0 (false) - Controls whether detected bad addresses cause them to bypassed - rather than aborting. If set, detected bad arguments to free and - realloc are ignored. And all bookkeeping information is zeroed out - upon a detected overwrite of freed heap space, thus losing the - ability to ever return it from malloc again, but enabling the - application to proceed. If PROCEED_ON_ERROR is defined, the - static variable malloc_corruption_error_count is compiled in - and can be examined to see if errors have occurred. This option - generates slower code than the default abort policy. - -DEBUG default: NOT defined - The DEBUG setting is mainly intended for people trying to modify - this code or diagnose problems when porting to new platforms. - However, it may also be able to better isolate user errors than just - using runtime checks. The assertions in the check routines spell - out in more detail the assumptions and invariants underlying the - algorithms. The checking is fairly extensive, and will slow down - execution noticeably. Calling malloc_stats or mallinfo with DEBUG - set will attempt to check every non-mmapped allocated and free chunk - in the course of computing the summaries. - -ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) - Debugging assertion failures can be nearly impossible if your - version of the assert macro causes malloc to be called, which will - lead to a cascade of further failures, blowing the runtime stack. - ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), - which will usually make debugging easier. - -MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 - The action to take before "return 0" when malloc fails to be able to - return memory because there is none available. - -HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES - True if this system supports sbrk or an emulation of it. - -MORECORE default: sbrk - The name of the sbrk-style system routine to call to obtain more - memory. See below for guidance on writing custom MORECORE - functions. The type of the argument to sbrk/MORECORE varies across - systems. It cannot be size_t, because it supports negative - arguments, so it is normally the signed type of the same width as - size_t (sometimes declared as "intptr_t"). It doesn't much matter - though. Internally, we only call it with arguments less than half - the max value of a size_t, which should work across all reasonable - possibilities, although sometimes generating compiler warnings. - -MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE - If true, take advantage of fact that consecutive calls to MORECORE - with positive arguments always return contiguous increasing - addresses. This is true of unix sbrk. It does not hurt too much to - set it true anyway, since malloc copes with non-contiguities. - Setting it false when definitely non-contiguous saves time - and possibly wasted space it would take to discover this though. - -MORECORE_CANNOT_TRIM default: NOT defined - True if MORECORE cannot release space back to the system when given - negative arguments. This is generally necessary only if you are - using a hand-crafted MORECORE function that cannot handle negative - arguments. - -NO_SEGMENT_TRAVERSAL default: 0 - If non-zero, suppresses traversals of memory segments - returned by either MORECORE or CALL_MMAP. This disables - merging of segments that are contiguous, and selectively - releasing them to the OS if unused, but bounds execution times. - -HAVE_MMAP default: 1 (true) - True if this system supports mmap or an emulation of it. If so, and - HAVE_MORECORE is not true, MMAP is used for all system - allocation. If set and HAVE_MORECORE is true as well, MMAP is - primarily used to directly allocate very large blocks. It is also - used as a backup strategy in cases where MORECORE fails to provide - space from system. Note: A single call to MUNMAP is assumed to be - able to unmap memory that may have be allocated using multiple calls - to MMAP, so long as they are adjacent. - -HAVE_MREMAP default: 1 on linux, else 0 - If true realloc() uses mremap() to re-allocate large blocks and - extend or shrink allocation spaces. - -MMAP_CLEARS default: 1 except on WINCE. - True if mmap clears memory so calloc doesn't need to. This is true - for standard unix mmap using /dev/zero and on WIN32 except for WINCE. - -USE_BUILTIN_FFS default: 0 (i.e., not used) - Causes malloc to use the builtin ffs() function to compute indices. - Some compilers may recognize and intrinsify ffs to be faster than the - supplied C version. Also, the case of x86 using gcc is special-cased - to an asm instruction, so is already as fast as it can be, and so - this setting has no effect. Similarly for Win32 under recent MS compilers. - (On most x86s, the asm version is only slightly faster than the C version.) - -malloc_getpagesize default: derive from system includes, or 4096. - The system page size. To the extent possible, this malloc manages - memory from the system in page-size units. This may be (and - usually is) a function rather than a constant. This is ignored - if WIN32, where page size is determined using getSystemInfo during - initialization. - -USE_DEV_RANDOM default: 0 (i.e., not used) - Causes malloc to use /dev/random to initialize secure magic seed for - stamping footers. Otherwise, the current time is used. - -NO_MALLINFO default: 0 - If defined, don't compile "mallinfo". This can be a simple way - of dealing with mismatches between system declarations and - those in this file. - -MALLINFO_FIELD_TYPE default: size_t - The type of the fields in the mallinfo struct. This was originally - defined as "int" in SVID etc, but is more usefully defined as - size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set - -NO_MALLOC_STATS default: 0 - If defined, don't compile "malloc_stats". This avoids calls to - fprintf and bringing in stdio dependencies you might not want. - -REALLOC_ZERO_BYTES_FREES default: not defined - This should be set if a call to realloc with zero bytes should - be the same as a call to free. Some people think it should. Otherwise, - since this malloc returns a unique pointer for malloc(0), so does - realloc(p, 0). - -LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H -LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H -LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32 - Define these if your system does not have these header files. - You might need to manually insert some of the declarations they provide. - -DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, - system_info.dwAllocationGranularity in WIN32, - otherwise 64K. - Also settable using mallopt(M_GRANULARITY, x) - The unit for allocating and deallocating memory from the system. On - most systems with contiguous MORECORE, there is no reason to - make this more than a page. However, systems with MMAP tend to - either require or encourage larger granularities. You can increase - this value to prevent system allocation functions to be called so - often, especially if they are slow. The value must be at least one - page and must be a power of two. Setting to 0 causes initialization - to either page size or win32 region size. (Note: In previous - versions of malloc, the equivalent of this option was called - "TOP_PAD") - -DEFAULT_TRIM_THRESHOLD default: 2MB - Also settable using mallopt(M_TRIM_THRESHOLD, x) - The maximum amount of unused top-most memory to keep before - releasing via malloc_trim in free(). Automatic trimming is mainly - useful in long-lived programs using contiguous MORECORE. Because - trimming via sbrk can be slow on some systems, and can sometimes be - wasteful (in cases where programs immediately afterward allocate - more large chunks) the value should be high enough so that your - overall system performance would improve by releasing this much - memory. As a rough guide, you might set to a value close to the - average size of a process (program) running on your system. - Releasing this much memory would allow such a process to run in - memory. Generally, it is worth tuning trim thresholds when a - program undergoes phases where several large chunks are allocated - and released in ways that can reuse each other's storage, perhaps - mixed with phases where there are no such chunks at all. The trim - value must be greater than page size to have any useful effect. To - disable trimming completely, you can set to MAX_SIZE_T. Note that the trick - some people use of mallocing a huge space and then freeing it at - program startup, in an attempt to reserve system memory, doesn't - have the intended effect under automatic trimming, since that memory - will immediately be returned to the system. - -DEFAULT_MMAP_THRESHOLD default: 256K - Also settable using mallopt(M_MMAP_THRESHOLD, x) - The request size threshold for using MMAP to directly service a - request. Requests of at least this size that cannot be allocated - using already-existing space will be serviced via mmap. (If enough - normal freed space already exists it is used instead.) Using mmap - segregates relatively large chunks of memory so that they can be - individually obtained and released from the host system. A request - serviced through mmap is never reused by any other request (at least - not directly; the system may just so happen to remap successive - requests to the same locations). Segregating space in this way has - the benefits that: Mmapped space can always be individually released - back to the system, which helps keep the system level memory demands - of a long-lived program low. Also, mapped memory doesn't become - `locked' between other chunks, as can happen with normally allocated - chunks, which means that even trimming via malloc_trim would not - release them. However, it has the disadvantage that the space - cannot be reclaimed, consolidated, and then used to service later - requests, as happens with normal chunks. The advantages of mmap - nearly always outweigh disadvantages for "large" chunks, but the - value of "large" may vary across systems. The default is an - empirically derived value that works well in most systems. You can - disable mmap by setting to MAX_SIZE_T. - -MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP - The number of consolidated frees between checks to release - unused segments when freeing. When using non-contiguous segments, - especially with multiple mspaces, checking only for topmost space - doesn't always suffice to trigger trimming. To compensate for this, - free() will, with a period of MAX_RELEASE_CHECK_RATE (or the - current number of segments, if greater) try to release unused - segments to the OS when freeing chunks that result in - consolidation. The best value for this parameter is a compromise - between slowing down frees with relatively costly checks that - rarely trigger versus holding on to unused memory. To effectively - disable, set to MAX_SIZE_T. This may lead to a very slight speed - improvement at the expense of carrying around more memory. -*/ - -/* Version identifier to allow people to support multiple versions */ -#ifndef DLMALLOC_VERSION -#define DLMALLOC_VERSION 20806 -#endif /* DLMALLOC_VERSION */ - -#ifndef DLMALLOC_EXPORT -#define DLMALLOC_EXPORT extern -#endif - -#ifndef WIN32 -#ifdef _WIN32 -#define WIN32 1 -#endif /* _WIN32 */ -#ifdef _WIN32_WCE -#define LACKS_FCNTL_H -#define WIN32 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ -#ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#include -#include -#define HAVE_MMAP 1 -#define HAVE_MORECORE 0 -#define LACKS_UNISTD_H -#define LACKS_SYS_PARAM_H -#define LACKS_SYS_MMAN_H -#define LACKS_STRING_H -#define LACKS_STRINGS_H -#define LACKS_SYS_TYPES_H -#define LACKS_ERRNO_H -#define LACKS_SCHED_H -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef MMAP_CLEARS -#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ -#define MMAP_CLEARS 0 -#else -#define MMAP_CLEARS 1 -#endif /* _WIN32_WCE */ -#endif /*MMAP_CLEARS */ -#endif /* WIN32 */ - -#if defined(DARWIN) || defined(_DARWIN) -/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ -#ifndef HAVE_MORECORE -#define HAVE_MORECORE 0 -#define HAVE_MMAP 1 -/* OSX allocators provide 16 byte alignment */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)16U) -#endif -#endif /* HAVE_MORECORE */ -#endif /* DARWIN */ - -#ifndef LACKS_SYS_TYPES_H -#include /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ - -/* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ -#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ - (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) -#endif /* USE_LOCKS */ - -#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ -#if ((defined(__GNUC__) && \ - ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ - defined(__i386__) || defined(__x86_64__))) || \ - (defined(_MSC_VER) && _MSC_VER>=1310)) -#ifndef USE_SPIN_LOCKS -#define USE_SPIN_LOCKS 1 -#endif /* USE_SPIN_LOCKS */ -#elif USE_SPIN_LOCKS -#error "USE_SPIN_LOCKS defined without implementation" -#endif /* ... locks available... */ -#elif !defined(USE_SPIN_LOCKS) -#define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS */ - -#ifndef ONLY_MSPACES -#define ONLY_MSPACES 0 -#endif /* ONLY_MSPACES */ -#ifndef MSPACES -#if ONLY_MSPACES -#define MSPACES 1 -#else /* ONLY_MSPACES */ -#define MSPACES 0 -#endif /* ONLY_MSPACES */ -#endif /* MSPACES */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) -#endif /* MALLOC_ALIGNMENT */ -#ifndef FOOTERS -#define FOOTERS 0 -#endif /* FOOTERS */ -#ifndef ABORT -#define ABORT abort() -#endif /* ABORT */ -#ifndef ABORT_ON_ASSERT_FAILURE -#define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ -#ifndef PROCEED_ON_ERROR -#define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ - -#ifndef INSECURE -#define INSECURE 0 -#endif /* INSECURE */ -#ifndef MALLOC_INSPECT_ALL -#define MALLOC_INSPECT_ALL 0 -#endif /* MALLOC_INSPECT_ALL */ -#ifndef HAVE_MMAP -#define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ -#ifndef MMAP_CLEARS -#define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ -#ifndef HAVE_MREMAP -#ifdef linux -#define HAVE_MREMAP 1 -#define _GNU_SOURCE /* Turns on mremap() definition */ -#else /* linux */ -#define HAVE_MREMAP 0 -#endif /* linux */ -#endif /* HAVE_MREMAP */ -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef HAVE_MORECORE -#if ONLY_MSPACES -#define HAVE_MORECORE 0 -#else /* ONLY_MSPACES */ -#define HAVE_MORECORE 1 -#endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ -#if !HAVE_MORECORE -#define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ -#define MORECORE_DEFAULT sbrk -#ifndef MORECORE_CONTIGUOUS -#define MORECORE_CONTIGUOUS 1 -#endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ -#ifndef DEFAULT_GRANULARITY -#if (MORECORE_CONTIGUOUS || defined(WIN32)) -#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ -#else /* MORECORE_CONTIGUOUS */ -#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) -#endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ -#ifndef DEFAULT_TRIM_THRESHOLD -#ifndef MORECORE_CANNOT_TRIM -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#else /* MORECORE_CANNOT_TRIM */ -#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T -#endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ -#ifndef DEFAULT_MMAP_THRESHOLD -#if HAVE_MMAP -#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) -#else /* HAVE_MMAP */ -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ -#ifndef MAX_RELEASE_CHECK_RATE -#if HAVE_MMAP -#define MAX_RELEASE_CHECK_RATE 4095 -#else -#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ -#ifndef USE_BUILTIN_FFS -#define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ -#ifndef USE_DEV_RANDOM -#define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ -#ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ -#ifndef MALLINFO_FIELD_TYPE -#define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ -#ifndef NO_MALLOC_STATS -#define NO_MALLOC_STATS 0 -#endif /* NO_MALLOC_STATS */ -#ifndef NO_SEGMENT_TRAVERSAL -#define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ - -/* - mallopt tuning options. SVID/XPG defines four standard parameter - numbers for mallopt, normally defined in malloc.h. None of these - are used in this malloc, so setting them has no effect. But this - malloc does support the following options. -*/ - -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) - -/* ------------------------ Mallinfo declarations ------------------------ */ - -#if !NO_MALLINFO -/* - This version of malloc supports the standard SVID/XPG mallinfo - routine that returns a struct containing usage properties and - statistics. It should work on any system that has a - /usr/include/malloc.h defining struct mallinfo. The main - declaration needed is the mallinfo struct that is returned (by-copy) - by mallinfo(). The malloinfo struct contains a bunch of fields that - are not even meaningful in this version of malloc. These fields are - are instead filled by mallinfo() with other numbers that might be of - interest. - - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a - /usr/include/malloc.h file that includes a declaration of struct - mallinfo. If so, it is included; else a compliant version is - declared below. These must be precisely the same for mallinfo() to - work. The original SVID version of this struct, defined on most - systems with mallinfo, declares all fields as ints. But some others - define as unsigned long. If your system defines the fields using a - type of different width than listed here, you MUST #include your - system version and #define HAVE_USR_INCLUDE_MALLOC_H. -*/ - -/* #define HAVE_USR_INCLUDE_MALLOC_H */ - -#ifdef HAVE_USR_INCLUDE_MALLOC_H -#include "/usr/include/malloc.h" -#else /* HAVE_USR_INCLUDE_MALLOC_H */ -#ifndef STRUCT_MALLINFO_DECLARED -/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ -#define _STRUCT_MALLINFO -#define STRUCT_MALLINFO_DECLARED 1 -struct mallinfo { - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ -}; -#endif /* STRUCT_MALLINFO_DECLARED */ -#endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ - -/* - Try to persuade compilers to inline. The most critical functions for - inlining are defined as macros, so these aren't used for them. -*/ - -#ifndef FORCEINLINE - #if defined(__GNUC__) -#define FORCEINLINE __inline __attribute__ ((always_inline)) - #elif defined(_MSC_VER) - #define FORCEINLINE __forceinline - #endif -#endif -#ifndef NOINLINE - #if defined(__GNUC__) - #define NOINLINE __attribute__ ((noinline)) - #elif defined(_MSC_VER) - #define NOINLINE __declspec(noinline) - #else - #define NOINLINE - #endif -#endif - -#ifdef __cplusplus -extern "C" { -#ifndef FORCEINLINE - #define FORCEINLINE inline -#endif -#endif /* __cplusplus */ -#ifndef FORCEINLINE - #define FORCEINLINE -#endif - -#if !ONLY_MSPACES - -/* ------------------- Declarations of public routines ------------------- */ - -#ifndef USE_DL_PREFIX -#define dlcalloc calloc -#define dlfree free -#define dlmalloc malloc -#define dlmemalign memalign -#define dlposix_memalign posix_memalign -#define dlrealloc realloc -#define dlrealloc_in_place realloc_in_place -#define dlvalloc valloc -#define dlpvalloc pvalloc -#define dlmallinfo mallinfo -#define dlmallopt mallopt -#define dlmalloc_trim malloc_trim -#define dlmalloc_stats malloc_stats -#define dlmalloc_usable_size malloc_usable_size -#define dlmalloc_footprint malloc_footprint -#define dlmalloc_max_footprint malloc_max_footprint -#define dlmalloc_footprint_limit malloc_footprint_limit -#define dlmalloc_set_footprint_limit malloc_set_footprint_limit -#define dlmalloc_inspect_all malloc_inspect_all -#define dlindependent_calloc independent_calloc -#define dlindependent_comalloc independent_comalloc -#define dlbulk_free bulk_free -#endif /* USE_DL_PREFIX */ - -/* - malloc(size_t n) - Returns a pointer to a newly allocated chunk of at least n bytes, or - null if no space is available, in which case errno is set to ENOMEM - on ANSI C systems. - - If n is zero, malloc returns a minimum-sized chunk. (The minimum - size is 16 bytes on most 32bit systems, and 32 bytes on 64bit - systems.) Note that size_t is an unsigned type, so calls with - arguments that would be negative if signed are interpreted as - requests for huge amounts of space, which will often fail. The - maximum supported value of n differs across systems, but is in all - cases less than the maximum representable value of a size_t. -*/ -DLMALLOC_EXPORT void* dlmalloc(size_t); - -/* - free(void* p) - Releases the chunk of memory pointed to by p, that had been previously - allocated using malloc or a related routine such as realloc. - It has no effect if p is null. If p was not malloced or already - freed, free(p) will by default cause the current program to abort. -*/ -DLMALLOC_EXPORT void dlfree(void*); - -/* - calloc(size_t n_elements, size_t element_size); - Returns a pointer to n_elements * element_size bytes, with all locations - set to zero. -*/ -DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); - -/* - realloc(void* p, size_t n) - Returns a pointer to a chunk of size n that contains the same data - as does chunk p up to the minimum of (n, p's size) bytes, or null - if no space is available. - - The returned pointer may or may not be the same as p. The algorithm - prefers extending p in most cases when possible, otherwise it - employs the equivalent of a malloc-copy-free sequence. - - If p is null, realloc is equivalent to malloc. - - If space is not available, realloc returns null, errno is set (if on - ANSI) and p is NOT freed. - - if n is for fewer bytes than already held by p, the newly unused - space is lopped off and freed if possible. realloc with a size - argument of zero (re)allocates a minimum-sized chunk. - - The old unix realloc convention of allowing the last-free'd chunk - to be used as an argument to realloc is not supported. -*/ -DLMALLOC_EXPORT void* dlrealloc(void*, size_t); - -/* - realloc_in_place(void* p, size_t n) - Resizes the space allocated for p to size n, only if this can be - done without moving p (i.e., only if there is adjacent space - available if n is greater than p's current allocated size, or n is - less than or equal to p's size). This may be used instead of plain - realloc if an alternative allocation strategy is needed upon failure - to expand space; for example, reallocation of a buffer that must be - memory-aligned or cleared. You can use realloc_in_place to trigger - these alternatives only when needed. - - Returns p if successful; otherwise null. -*/ -DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); - -/* - memalign(size_t alignment, size_t n); - Returns a pointer to a newly allocated chunk of n bytes, aligned - in accord with the alignment argument. - - The alignment argument should be a power of two. If the argument is - not a power of two, the nearest greater power is used. - 8-byte alignment is guaranteed by normal malloc calls, so don't - bother calling memalign with an argument of 8 or less. - - Overreliance on memalign is a sure way to fragment space. -*/ -DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); - -/* - int posix_memalign(void** pp, size_t alignment, size_t n); - Allocates a chunk of n bytes, aligned in accord with the alignment - argument. Differs from memalign only in that it (1) assigns the - allocated memory to *pp rather than returning it, (2) fails and - returns EINVAL if the alignment is not a power of two (3) fails and - returns ENOMEM if memory cannot be allocated. -*/ -DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); - -/* - valloc(size_t n); - Equivalent to memalign(pagesize, n), where pagesize is the page - size of the system. If the pagesize is unknown, 4096 is used. -*/ -DLMALLOC_EXPORT void* dlvalloc(size_t); - -/* - mallopt(int parameter_number, int parameter_value) - Sets tunable parameters The format is to provide a - (parameter-number, parameter-value) pair. mallopt then sets the - corresponding parameter to the argument value if it can (i.e., so - long as the value is meaningful), and returns 1 if successful else - 0. To workaround the fact that mallopt is specified to use int, - not size_t parameters, the value -1 is specially treated as the - maximum unsigned size_t value. - - SVID/XPG/ANSI defines four standard param numbers for mallopt, - normally defined in malloc.h. None of these are use in this malloc, - so setting them has no effect. But this malloc also supports other - options in mallopt. See below for details. Briefly, supported - parameters are as follows (listed defaults are for "typical" - configurations). - - Symbol param # default allowed param values - M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables) - M_GRANULARITY -2 page size any power of 2 >= page size - M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) -*/ -DLMALLOC_EXPORT int dlmallopt(int, int); - -/* - malloc_footprint(); - Returns the number of bytes obtained from the system. The total - number of bytes allocated by malloc, realloc etc., is less than this - value. Unlike mallinfo, this function returns only a precomputed - result, so can be called frequently to monitor memory consumption. - Even if locks are otherwise defined, this function does not use them, - so results might not be up to date. -*/ -DLMALLOC_EXPORT size_t dlmalloc_footprint(void); - -/* - malloc_max_footprint(); - Returns the maximum number of bytes obtained from the system. This - value will be greater than current footprint if deallocated space - has been reclaimed by the system. The peak number of bytes allocated - by malloc, realloc etc., is less than this value. Unlike mallinfo, - this function returns only a precomputed result, so can be called - frequently to monitor memory consumption. Even if locks are - otherwise defined, this function does not use them, so results might - not be up to date. -*/ -DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void); - -/* - malloc_footprint_limit(); - Returns the number of bytes that the heap is allowed to obtain from - the system, returning the last value returned by - malloc_set_footprint_limit, or the maximum size_t value if - never set. The returned value reflects a permission. There is no - guarantee that this number of bytes can actually be obtained from - the system. -*/ -DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); - -/* - malloc_set_footprint_limit(); - Sets the maximum number of bytes to obtain from the system, causing - failure returns from malloc and related functions upon attempts to - exceed this value. The argument value may be subject to page - rounding to an enforceable limit; this actual value is returned. - Using an argument of the maximum possible size_t effectively - disables checks. If the argument is less than or equal to the - current malloc_footprint, then all future allocations that require - additional system memory will fail. However, invocation cannot - retroactively deallocate existing used memory. -*/ -DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); - -#if MALLOC_INSPECT_ALL -/* - malloc_inspect_all(void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg); - Traverses the heap and calls the given handler for each managed - region, skipping all bytes that are (or may be) used for bookkeeping - purposes. Traversal does not include include chunks that have been - directly memory mapped. Each reported region begins at the start - address, and continues up to but not including the end address. The - first used_bytes of the region contain allocated data. If - used_bytes is zero, the region is unallocated. The handler is - invoked with the given callback argument. If locks are defined, they - are held during the entire traversal. It is a bad idea to invoke - other malloc functions from within the handler. - - For example, to count the number of in-use chunks with size greater - than 1000, you could write: - static int count = 0; - void count_chunks(void* start, void* end, size_t used, void* arg) { - if (used >= 1000) ++count; - } - then: - malloc_inspect_all(count_chunks, NULL); - - malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. -*/ -DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), - void* arg); - -#endif /* MALLOC_INSPECT_ALL */ - -#if !NO_MALLINFO -/* - mallinfo() - Returns (by copy) a struct containing various summary statistics: - - arena: current total non-mmapped bytes allocated from system - ordblks: the number of free chunks - smblks: always zero. - hblks: current number of mmapped regions - hblkhd: total bytes held in mmapped regions - usmblks: the maximum total allocated space. This will be greater - than current total if trimming has occurred. - fsmblks: always zero - uordblks: current total allocated space (normal or mmapped) - fordblks: total free space - keepcost: the maximum number of bytes that could ideally be released - back to system via malloc_trim. ("ideally" means that - it ignores page restrictions etc.) - - Because these fields are ints, but internal bookkeeping may - be kept as longs, the reported values may wrap around zero and - thus be inaccurate. -*/ -DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); -#endif /* NO_MALLINFO */ - -/* - independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); - - independent_calloc is similar to calloc, but instead of returning a - single cleared space, it returns an array of pointers to n_elements - independent elements that can hold contents of size elem_size, each - of which starts out cleared, and can be independently freed, - realloc'ed etc. The elements are guaranteed to be adjacently - allocated (this is not guaranteed to occur with multiple callocs or - mallocs), which may also improve cache locality in some - applications. - - The "chunks" argument is optional (i.e., may be null, which is - probably the most typical usage). If it is null, the returned array - is itself dynamically allocated and should also be freed when it is - no longer needed. Otherwise, the chunks array must be of at least - n_elements in length. It is filled in with the pointers to the - chunks. - - In either case, independent_calloc returns this pointer array, or - null if the allocation failed. If n_elements is zero and "chunks" - is null, it returns a chunk representing an array with zero elements - (which should be freed if not wanted). - - Each element must be freed when it is no longer needed. This can be - done all at once using bulk_free. - - independent_calloc simplifies and speeds up implementations of many - kinds of pools. It may also be useful when constructing large data - structures that initially have a fixed number of fixed-sized nodes, - but the number is not known at compile time, and some of the nodes - may later need to be freed. For example: - - struct Node { int item; struct Node* next; }; - - struct Node* build_list() { - struct Node** pool; - int n = read_number_of_nodes_needed(); - if (n <= 0) return 0; - pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); - if (pool == 0) die(); - // organize into a linked list... - struct Node* first = pool[0]; - for (i = 0; i < n-1; ++i) - pool[i]->next = pool[i+1]; - free(pool); // Can now free the array (or not, if it is needed later) - return first; - } -*/ -DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); - -/* - independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); - - independent_comalloc allocates, all at once, a set of n_elements - chunks with sizes indicated in the "sizes" array. It returns - an array of pointers to these elements, each of which can be - independently freed, realloc'ed etc. The elements are guaranteed to - be adjacently allocated (this is not guaranteed to occur with - multiple callocs or mallocs), which may also improve cache locality - in some applications. - - The "chunks" argument is optional (i.e., may be null). If it is null - the returned array is itself dynamically allocated and should also - be freed when it is no longer needed. Otherwise, the chunks array - must be of at least n_elements in length. It is filled in with the - pointers to the chunks. - - In either case, independent_comalloc returns this pointer array, or - null if the allocation failed. If n_elements is zero and chunks is - null, it returns a chunk representing an array with zero elements - (which should be freed if not wanted). - - Each element must be freed when it is no longer needed. This can be - done all at once using bulk_free. - - independent_comallac differs from independent_calloc in that each - element may have a different size, and also that it does not - automatically clear elements. - - independent_comalloc can be used to speed up allocation in cases - where several structs or objects must always be allocated at the - same time. For example: - - struct Head { ... } - struct Foot { ... } - - void send_message(char* msg) { - int msglen = strlen(msg); - size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; - void* chunks[3]; - if (independent_comalloc(3, sizes, chunks) == 0) - die(); - struct Head* head = (struct Head*)(chunks[0]); - char* body = (char*)(chunks[1]); - struct Foot* foot = (struct Foot*)(chunks[2]); - // ... - } - - In general though, independent_comalloc is worth using only for - larger values of n_elements. For small values, you probably won't - detect enough difference from series of malloc calls to bother. - - Overuse of independent_comalloc can increase overall memory usage, - since it cannot reuse existing noncontiguous small chunks that - might be available for some of the elements. -*/ -DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); - -/* - bulk_free(void* array[], size_t n_elements) - Frees and clears (sets to null) each non-null pointer in the given - array. This is likely to be faster than freeing them one-by-one. - If footers are used, pointers that have been allocated in different - mspaces are not freed or cleared, and the count of all such pointers - is returned. For large arrays of pointers with poor locality, it - may be worthwhile to sort this array before calling bulk_free. -*/ -DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); - -/* - pvalloc(size_t n); - Equivalent to valloc(minimum-page-that-holds(n)), that is, - round up n to nearest pagesize. - */ -DLMALLOC_EXPORT void* dlpvalloc(size_t); - -/* - malloc_trim(size_t pad); - - If possible, gives memory back to the system (via negative arguments - to sbrk) if there is unused memory at the `high' end of the malloc - pool or in unused MMAP segments. You can call this after freeing - large blocks of memory to potentially reduce the system-level memory - requirements of a program. However, it cannot guarantee to reduce - memory. Under some allocation patterns, some large free blocks of - memory will be locked between two used chunks, so they cannot be - given back to the system. - - The `pad' argument to malloc_trim represents the amount of free - trailing space to leave untrimmed. If this argument is zero, only - the minimum amount of memory to maintain internal data structures - will be left. Non-zero arguments can be supplied to maintain enough - trailing space to service future expected allocations without having - to re-obtain memory from the system. - - Malloc_trim returns 1 if it actually released any memory, else 0. -*/ -DLMALLOC_EXPORT int dlmalloc_trim(size_t); - -/* - malloc_stats(); - Prints on stderr the amount of space obtained from the system (both - via sbrk and mmap), the maximum amount (which may be more than - current if malloc_trim and/or munmap got called), and the current - number of bytes allocated via malloc (or realloc, etc) but not yet - freed. Note that this is the number of bytes allocated, not the - number requested. It will be larger than the number requested - because of alignment and bookkeeping overhead. Because it includes - alignment wastage as being in use, this figure may be greater than - zero even when no user-level chunks are allocated. - - The reported current and maximum system memory can be inaccurate if - a program makes other calls to system memory allocation functions - (normally sbrk) outside of malloc. - - malloc_stats prints only the most commonly interesting statistics. - More information can be obtained by calling mallinfo. -*/ -DLMALLOC_EXPORT void dlmalloc_stats(void); - -/* - malloc_usable_size(void* p); - - Returns the number of bytes you can actually use in - an allocated chunk, which may be more than you requested (although - often not) due to alignment and minimum size constraints. - You can use this many bytes without worrying about - overwriting other allocated objects. This is not a particularly great - programming practice. malloc_usable_size can be more useful in - debugging and assertions, for example: - - p = malloc(n); - assert(malloc_usable_size(p) >= 256); -*/ -size_t dlmalloc_usable_size(void*); - -#endif /* ONLY_MSPACES */ - -#if MSPACES - -/* - mspace is an opaque type representing an independent - region of space that supports mspace_malloc, etc. -*/ -typedef void* mspace; - -/* - create_mspace creates and returns a new independent space with the - given initial capacity, or, if 0, the default granularity size. It - returns null if there is no system memory available to create the - space. If argument locked is non-zero, the space uses a separate - lock to control access. The capacity of the space will grow - dynamically as needed to service mspace_malloc requests. You can - control the sizes of incremental increases of this space by - compiling with a different DEFAULT_GRANULARITY or dynamically - setting with mallopt(M_GRANULARITY, value). -*/ -DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked); - -/* - destroy_mspace destroys the given space, and attempts to return all - of its memory back to the system, returning the total number of - bytes freed. After destruction, the results of access to all memory - used by the space become undefined. -*/ -DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); - -/* - create_mspace_with_base uses the memory supplied as the initial base - of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this - space is used for bookkeeping, so the capacity must be at least this - large. (Otherwise 0 is returned.) When this initial space is - exhausted, additional memory will be obtained from the system. - Destroying this space will deallocate all additionally allocated - space (if possible) but not the initial base. -*/ -DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); - -/* - mspace_track_large_chunks controls whether requests for large chunks - are allocated in their own untracked mmapped regions, separate from - others in this mspace. By default large chunks are not tracked, - which reduces fragmentation. However, such chunks are not - necessarily released to the system upon destroy_mspace. Enabling - tracking by setting to true may increase fragmentation, but avoids - leakage when relying on destroy_mspace to release all memory - allocated using this space. The function returns the previous - setting. -*/ -DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); - - -/* - mspace_malloc behaves as malloc, but operates within - the given space. -*/ -DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); - -/* - mspace_free behaves as free, but operates within - the given space. - - If compiled with FOOTERS==1, mspace_free is not actually needed. - free may be called instead of mspace_free because freed chunks from - any space are handled by their originating spaces. -*/ -DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); - -/* - mspace_realloc behaves as realloc, but operates within - the given space. - - If compiled with FOOTERS==1, mspace_realloc is not actually - needed. realloc may be called instead of mspace_realloc because - realloced chunks from any space are handled by their originating - spaces. -*/ -DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); - -/* - mspace_calloc behaves as calloc, but operates within - the given space. -*/ -DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); - -/* - mspace_memalign behaves as memalign, but operates within - the given space. -*/ -DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); - -/* - mspace_independent_calloc behaves as independent_calloc, but - operates within the given space. -*/ -DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]); - -/* - mspace_independent_comalloc behaves as independent_comalloc, but - operates within the given space. -*/ -DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]); - -/* - mspace_footprint() returns the number of bytes obtained from the - system for this space. -*/ -DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); - -/* - mspace_max_footprint() returns the peak number of bytes obtained from the - system for this space. -*/ -DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); - - -#if !NO_MALLINFO -/* - mspace_mallinfo behaves as mallinfo, but reports properties of - the given space. -*/ -DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); -#endif /* NO_MALLINFO */ - -/* - malloc_usable_size(void* p) behaves the same as malloc_usable_size; -*/ -DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); - -/* - mspace_malloc_stats behaves as malloc_stats, but reports - properties of the given space. -*/ -DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp); - -/* - mspace_trim behaves as malloc_trim, but - operates within the given space. -*/ -DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); - -/* - An alias for mallopt. -*/ -DLMALLOC_EXPORT int mspace_mallopt(int, int); - -#endif /* MSPACES */ - -#ifdef __cplusplus -} /* end of extern "C" */ -#endif /* __cplusplus */ - -/* - ======================================================================== - To make a fully customizable malloc.h header file, cut everything - above this line, put into file malloc.h, edit to suit, and #include it - on the next line, as well as in programs that use this malloc. - ======================================================================== -*/ - -/* #include "malloc.h" */ - -/*------------------------------ internal #includes ---------------------- */ - -#ifdef _MSC_VER -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ -#endif /* _MSC_VER */ -#if !NO_MALLOC_STATS -#include /* for printing in malloc_stats */ -#endif /* NO_MALLOC_STATS */ -#ifndef LACKS_ERRNO_H -#include /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ -#ifdef DEBUG -#if ABORT_ON_ASSERT_FAILURE -#undef assert -#define assert(x) if(!(x)) ABORT -#else /* ABORT_ON_ASSERT_FAILURE */ -#include -#endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ -#ifndef assert -#define assert(x) -#endif -#define DEBUG 0 -#endif /* DEBUG */ -#if !defined(WIN32) && !defined(LACKS_TIME_H) -#include /* for magic initialization */ -#endif /* WIN32 */ -#ifndef LACKS_STDLIB_H -#include /* for abort() */ -#endif /* LACKS_STDLIB_H */ -#ifndef LACKS_STRING_H -#include /* for memset etc */ -#endif /* LACKS_STRING_H */ -#if USE_BUILTIN_FFS -#ifndef LACKS_STRINGS_H -#include /* for ffs */ -#endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ -#if HAVE_MMAP -#ifndef LACKS_SYS_MMAN_H -/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ -#if (defined(linux) && !defined(__USE_GNU)) -#define __USE_GNU 1 -#include /* for mmap */ -#undef __USE_GNU -#else -#include /* for mmap */ -#endif /* linux */ -#endif /* LACKS_SYS_MMAN_H */ -#ifndef LACKS_FCNTL_H -#include -#endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ -#ifndef LACKS_UNISTD_H -#include /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void* sbrk(ptrdiff_t); -#endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ - -/* Declarations for locking */ -#if USE_LOCKS -#ifndef WIN32 -#if defined (__SVR4) && defined (__sun) /* solaris */ -#include -#elif !defined(LACKS_SCHED_H) -#include -#endif /* solaris or LACKS_SCHED_H */ -#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS -#include -#endif /* USE_RECURSIVE_LOCKS ... */ -#elif defined(_MSC_VER) -#ifndef _M_AMD64 -/* These are already defined on AMD64 builds */ -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); -LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* _M_AMD64 */ -#pragma intrinsic (_InterlockedCompareExchange) -#pragma intrinsic (_InterlockedExchange) -#define interlockedcompareexchange _InterlockedCompareExchange -#define interlockedexchange _InterlockedExchange -#elif defined(WIN32) && defined(__GNUC__) -#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) -#define interlockedexchange __sync_lock_test_and_set -#endif /* Win32 */ -#else /* USE_LOCKS */ -#endif /* USE_LOCKS */ - -#ifndef LOCK_AT_FORK -#define LOCK_AT_FORK 0 -#endif - -/* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER>=1300 -#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -unsigned char _BitScanForward(unsigned long *index, unsigned long mask); -unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#define BitScanForward _BitScanForward -#define BitScanReverse _BitScanReverse -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) -#endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ - -#ifndef WIN32 -#ifndef malloc_getpagesize -# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ -# ifndef _SC_PAGE_SIZE -# define _SC_PAGE_SIZE _SC_PAGESIZE -# endif -# endif -# ifdef _SC_PAGE_SIZE -# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) -# else -# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) - extern size_t getpagesize(); -# define malloc_getpagesize getpagesize() -# else -# ifdef WIN32 /* use supplied emulation of getpagesize */ -# define malloc_getpagesize getpagesize() -# else -# ifndef LACKS_SYS_PARAM_H -# include -# endif -# ifdef EXEC_PAGESIZE -# define malloc_getpagesize EXEC_PAGESIZE -# else -# ifdef NBPG -# ifndef CLSIZE -# define malloc_getpagesize NBPG -# else -# define malloc_getpagesize (NBPG * CLSIZE) -# endif -# else -# ifdef NBPC -# define malloc_getpagesize NBPC -# else -# ifdef PAGESIZE -# define malloc_getpagesize PAGESIZE -# else /* just guess */ -# define malloc_getpagesize ((size_t)4096U) -# endif -# endif -# endif -# endif -# endif -# endif -# endif -#endif -#endif - -/* ------------------- size_t and alignment properties -------------------- */ - -/* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) - -/* Some constants coerced to size_t */ -/* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) - -/* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) - -/* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) - -/* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) - -/* -------------------------- MMAP preliminaries ------------------------- */ - -/* - If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and - checks to fail so compiler optimizer can delete code rather than - using so many "#if"s. -*/ - - -/* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void*)(MAX_SIZE_T)) -#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ - -#if HAVE_MMAP - -#ifndef WIN32 -#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANON */ -#ifdef MAP_ANONYMOUS -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) -#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) -#else /* MAP_ANONYMOUS */ -/* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. -*/ -#define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ - (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) -#endif /* MAP_ANONYMOUS */ - -#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) - -#else /* WIN32 */ - -/* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void* win32mmap(size_t size) { - void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - return (ptr != 0)? ptr: MFAIL; -} - -/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void* win32direct_mmap(size_t size) { - void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - return (ptr != 0)? ptr: MFAIL; -} - -/* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void* ptr, size_t size) { - MEMORY_BASIC_INFORMATION minfo; - char* cptr = (char*)ptr; - while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; - if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || - minfo.State != MEM_COMMIT || minfo.RegionSize > size) - return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; - cptr += minfo.RegionSize; - size -= minfo.RegionSize; - } - return 0; -} - -#define MMAP_DEFAULT(s) win32mmap(s) -#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) -#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) -#endif /* WIN32 */ -#endif /* HAVE_MMAP */ - -#if HAVE_MREMAP -#ifndef WIN32 -#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) -#endif /* WIN32 */ -#endif /* HAVE_MREMAP */ - -/** - * Define CALL_MORECORE - */ -#if HAVE_MORECORE - #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ - -/** - * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP - */ -#if HAVE_MMAP - #define USE_MMAP_BIT (SIZE_T_ONE) - - #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ - #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ - #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ - -/** - * Define CALL_MREMAP - */ -#if HAVE_MMAP && HAVE_MREMAP - #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ - -/* mstate bit set if continguous morecore disabled or failed */ -#define USE_NONCONTIGUOUS_BIT (4U) - -/* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) - - -/* --------------------------- Lock preliminaries ------------------------ */ - -/* - When locks are defined, there is one global lock, plus - one per-mspace lock. - - The global lock_ensures that mparams.magic and other unique - mparams values are initialized only once. It also protects - sequences of calls to MORECORE. In many cases sys_alloc requires - two calls, that should not be interleaved with calls by other - threads. This does not protect against direct calls to MORECORE - by other threads not using this lock, so there is still code to - cope the best we can on interference. - - Per-mspace locks surround calls to malloc, free, etc. - By default, locks are simple non-reentrant mutexes. - - Because lock-protected regions generally have bounded times, it is - OK to use the supplied simple spinlocks. Spinlocks are likely to - improve performance for lightly contended applications, but worsen - performance under heavy contention. - - If USE_LOCKS is > 1, the definitions of lock routines here are - bypassed, in which case you will need to define the type MLOCK_T, - and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK - and TRY_LOCK. You must also declare a - static MLOCK_T malloc_global_mutex = { initialization values };. - -*/ - -#if !USE_LOCKS -#define USE_LOCK_BIT (0U) -#define INITIAL_LOCK(l) (0) -#define DESTROY_LOCK(l) (0) -#define ACQUIRE_MALLOC_GLOBAL_LOCK() -#define RELEASE_MALLOC_GLOBAL_LOCK() - -#else -#if USE_LOCKS > 1 -/* ----------------------- User-defined locks ------------------------ */ -/* Define your own lock implementation here */ -/* #define INITIAL_LOCK(lk) ... */ -/* #define DESTROY_LOCK(lk) ... */ -/* #define ACQUIRE_LOCK(lk) ... */ -/* #define RELEASE_LOCK(lk) ... */ -/* #define TRY_LOCK(lk) ... */ -/* static MLOCK_T malloc_global_mutex = ... */ - -#elif USE_SPIN_LOCKS - -/* First, define CAS_LOCK and CLEAR_LOCK on ints */ -/* Note CAS_LOCK defined to return 0 on success */ - -#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) -#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) -#define CLEAR_LOCK(sl) __sync_lock_release(sl) - -#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) -/* Custom spin locks for older gcc on x86 */ -static FORCEINLINE int x86_cas_lock(int *sl) { - int ret; - int val = 1; - int cmp = 0; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(sl)), "0"(cmp) - : "memory", "cc"); - return ret; -} - -static FORCEINLINE void x86_clear_lock(int* sl) { - assert(*sl != 0); - int prev = 0; - int ret; - __asm__ __volatile__ ("lock; xchgl %0, %1" - : "=r" (ret) - : "m" (*(sl)), "0"(prev) - : "memory"); -} - -#define CAS_LOCK(sl) x86_cas_lock(sl) -#define CLEAR_LOCK(sl) x86_clear_lock(sl) - -#else /* Win32 MSC */ -#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1) -#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0) - -#endif /* ... gcc spins locks ... */ - -/* How to yield for a spin lock */ -#define SPINS_PER_YIELD 63 -#if defined(_MSC_VER) -#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ -#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) -#elif defined (__SVR4) && defined (__sun) /* solaris */ -#define SPIN_LOCK_YIELD thr_yield(); -#elif !defined(LACKS_SCHED_H) -#define SPIN_LOCK_YIELD sched_yield(); -#else -#define SPIN_LOCK_YIELD -#endif /* ... yield ... */ - -#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 -/* Plain spin locks use single word (embedded in malloc_states) */ -static int spin_acquire_lock(int *sl) { - int spins = 0; - while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } - } - return 0; -} - -#define MLOCK_T int -#define TRY_LOCK(sl) !CAS_LOCK(sl) -#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) -#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) -#define INITIAL_LOCK(sl) (*sl = 0) -#define DESTROY_LOCK(sl) (0) -static MLOCK_T malloc_global_mutex = 0; - -#else /* USE_RECURSIVE_LOCKS */ -/* types for lock owners */ -#ifdef WIN32 -#define THREAD_ID_T DWORD -#define CURRENT_THREAD GetCurrentThreadId() -#define EQ_OWNER(X,Y) ((X) == (Y)) -#else -/* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need to - somehow redefine these or not use spin locks. -*/ -#define THREAD_ID_T pthread_t -#define CURRENT_THREAD pthread_self() -#define EQ_OWNER(X,Y) pthread_equal(X, Y) -#endif - -struct malloc_recursive_lock { - int sl; - unsigned int c; - THREAD_ID_T threadid; -}; - -#define MLOCK_T struct malloc_recursive_lock -static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; - -static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { - assert(lk->sl != 0); - if (--lk->c == 0) { - CLEAR_LOCK(&lk->sl); - } -} - -static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { - THREAD_ID_T mythreadid = CURRENT_THREAD; - int spins = 0; - for (;;) { - if (*((volatile int *)(&lk->sl)) == 0) { - if (!CAS_LOCK(&lk->sl)) { - lk->threadid = mythreadid; - lk->c = 1; - return 0; - } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { - ++lk->c; - return 0; - } - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } - } -} - -static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { - THREAD_ID_T mythreadid = CURRENT_THREAD; - if (*((volatile int *)(&lk->sl)) == 0) { - if (!CAS_LOCK(&lk->sl)) { - lk->threadid = mythreadid; - lk->c = 1; - return 1; - } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { - ++lk->c; - return 1; - } - return 0; -} - -#define RELEASE_LOCK(lk) recursive_release_lock(lk) -#define TRY_LOCK(lk) recursive_try_lock(lk) -#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) -#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) -#define DESTROY_LOCK(lk) (0) -#endif /* USE_RECURSIVE_LOCKS */ - -#elif defined(WIN32) /* Win32 critical sections */ -#define MLOCK_T CRITICAL_SECTION -#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) -#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) -#define TRY_LOCK(lk) TryEnterCriticalSection(lk) -#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) -#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) -#define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; -static volatile LONG malloc_global_mutex_status; - -/* Use spin loop to initialize global lock */ -static void init_malloc_global_mutex() { - for (;;) { - long stat = malloc_global_mutex_status; - if (stat > 0) - return; - /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && - interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { - InitializeCriticalSection(&malloc_global_mutex); - interlockedexchange(&malloc_global_mutex_status, (LONG)1); - return; - } - SleepEx(0, FALSE); - } -} - -#else /* pthreads-based locks */ -#define MLOCK_T pthread_mutex_t -#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) -#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) -#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) -#define INITIAL_LOCK(lk) pthread_init_lock(lk) -#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) - -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) -/* Cope with old-style linux recursive lock initialization by adding */ -/* skipped internal declaration from pthread.h */ -extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, - int __kind)); -#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP -#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) -#endif /* USE_RECURSIVE_LOCKS ... */ - -static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; - -static int pthread_init_lock (MLOCK_T *lk) { - pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr)) return 1; -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 - if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; -#endif - if (pthread_mutex_init(lk, &attr)) return 1; - if (pthread_mutexattr_destroy(&attr)) return 1; - return 0; -} - -#endif /* ... lock types ... */ - -/* Common code for all lock types */ -#define USE_LOCK_BIT (2U) - -#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK -#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); -#endif - -#ifndef RELEASE_MALLOC_GLOBAL_LOCK -#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); -#endif - -#endif /* USE_LOCKS */ - -/* ----------------------- Chunk representations ------------------------ */ - -/* - (The following includes lightly edited explanations by Colin Plumb.) - - The malloc_chunk declaration below is misleading (but accurate and - necessary). It declares a "view" into memory allowing access to - necessary fields at known offsets from a given base. - - Chunks of memory are maintained using a `boundary tag' method as - originally described by Knuth. (See the paper by Paul Wilson - ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such - techniques.) Sizes of free chunks are stored both in the front of - each chunk and at the end. This makes consolidating fragmented - chunks into bigger chunks fast. The head fields also hold bits - representing whether chunks are free or in use. - - Here are some pictures to make it clearer. They are "exploded" to - show that the state of a chunk can be thought of as extending from - the high 31 bits of the head field of its header through the - prev_foot and PINUSE_BIT bit of the following chunk header. - - A chunk that's in use looks like: - - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk (if P = 0) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| - | Size of this chunk 1| +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | | - +- -+ - | | - +- -+ - | : - +- size - sizeof(size_t) available payload bytes -+ - : | - chunk-> +- -+ - | | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| - | Size of next chunk (may or may not be in use) | +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - And if it's free, it looks like this: - - chunk-> +- -+ - | User payload (must be in use, or we would have merged!) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| - | Size of this chunk 0| +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Next pointer | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Prev pointer | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | : - +- size - sizeof(struct chunk) unused bytes -+ - : | - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of this chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| - | Size of next chunk (must be in use, or we would have merged)| +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | : - +- User payload -+ - : | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |0| - +-+ - Note that since we always merge adjacent free chunks, the chunks - adjacent to a free chunk must be in use. - - Given a pointer to a chunk (which can be derived trivially from the - payload pointer) we can, in O(1) time, find out whether the adjacent - chunks are free, and if so, unlink them from the lists that they - are on and merge them with the current chunk. - - Chunks always begin on even word boundaries, so the mem portion - (which is returned to the user) is also on an even word boundary, and - thus at least double-word aligned. - - The P (PINUSE_BIT) bit, stored in the unused low-order bit of the - chunk size (which is always a multiple of two words), is an in-use - bit for the *previous* chunk. If that bit is *clear*, then the - word before the current chunk size contains the previous chunk - size, and can be used to find the front of the previous chunk. - The very first chunk allocated always has this bit set, preventing - access to non-existent (or non-owned) memory. If pinuse is set for - any given chunk, then you CANNOT determine the size of the - previous chunk, and might even get a memory addressing fault when - trying to do so. - - The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of - the chunk size redundantly records whether the current chunk is - inuse (unless the chunk is mmapped). This redundancy enables usage - checks within free and realloc, and reduces indirection when freeing - and consolidating chunks. - - Each freshly allocated chunk must have both cinuse and pinuse set. - That is, each allocated chunk borders either a previously allocated - and still in-use chunk, or the base of its memory arena. This is - ensured by making all allocations from the `lowest' part of any - found chunk. Further, no free chunk physically borders another one, - so each free chunk is known to be preceded and followed by either - inuse chunks or the ends of memory. - - Note that the `foot' of the current chunk is actually represented - as the prev_foot of the NEXT chunk. This makes it easier to - deal with alignments etc but can be very confusing when trying - to extend or adapt this code. - - The exceptions to all this are - - 1. The special chunk `top' is the top-most available chunk (i.e., - the one bordering the end of available memory). It is treated - specially. Top is never included in any bin, is used only if - no other chunk is available, and is released back to the - system if it is very large (see M_TRIM_THRESHOLD). In effect, - the top chunk is treated as larger (and thus less well - fitting) than any other available chunk. The top chunk - doesn't update its trailing size field since there is no next - contiguous chunk that would have to index off it. However, - space is still allocated for it (TOP_FOOT_SIZE) to enable - separation or merging when space is extended. - - 3. Chunks allocated via mmap, have both cinuse and pinuse bits - cleared in their head fields. Because they are allocated - one-by-one, each must carry its own prev_foot field, which is - also used to hold the offset this chunk has within its mmapped - region, which is needed to preserve alignment. Each mmapped - chunk is trailed by the first two fields of a fake next-chunk - for sake of usage checks. - -*/ - -struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk* fd; /* double links -- used only if free. */ - struct malloc_chunk* bk; -}; - -typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk* mchunkptr; -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ - -/* ------------------- Chunks sizes and alignments ----------------------- */ - -#define MCHUNK_SIZE (sizeof(mchunk)) - -#if FOOTERS -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ -#define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ - -/* MMapped chunks need a second word of overhead ... */ -#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -/* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) - -/* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) -/* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) - -/* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) - -/* pad request bytes into a usable size */ -#define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* pad request, checking for minimum (but not maximum) */ -#define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) - - -/* ------------------ Operations on head and foot fields ----------------- */ - -/* - The head field of a chunk is or'ed with PINUSE_BIT when previous - adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in - use, unless mmapped, in which case both bits are cleared. - - FLAG4_BIT is not used by this malloc, but might be useful in extensions. -*/ - -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) - -/* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) - -/* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define flag4inuse(p) ((p)->head & FLAG4_BIT) -#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) -#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) - -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) - -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define set_flag4(p) ((p)->head |= FLAG4_BIT) -#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) - -/* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) - -/* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) - -/* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) - -/* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) - -/* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) - -/* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) - -/* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) - -/* Return true if malloced space is not necessarily cleared */ -#if MMAP_CLEARS -#define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ -#define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ - -/* ---------------------- Overlaid data structures ----------------------- */ - -/* - When chunks are not in use, they are treated as nodes of either - lists or trees. - - "Small" chunks are stored in circular doubly-linked lists, and look - like this: - - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `head:' | Size of chunk, in bytes |P| - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Forward pointer to next chunk in list | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Back pointer to previous chunk in list | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Unused space (may be 0 bytes long) . - . . - . | -nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `foot:' | Size of chunk, in bytes | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - Larger chunks are kept in a form of bitwise digital trees (aka - tries) keyed on chunksizes. Because malloc_tree_chunks are only for - free chunks greater than 256 bytes, their size doesn't impose any - constraints on user chunk sizes. Each node looks like: - - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `head:' | Size of chunk, in bytes |P| - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Forward pointer to next chunk of same size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Back pointer to previous chunk of same size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Pointer to left child (child[0]) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Pointer to right child (child[1]) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Pointer to parent | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | bin index of this chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Unused space . - . | -nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `foot:' | Size of chunk, in bytes | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - Each tree holding treenodes is a tree of unique chunk sizes. Chunks - of the same size are arranged in a circularly-linked list, with only - the oldest chunk (the next to be used, in our FIFO ordering) - actually in the tree. (Tree members are distinguished by a non-null - parent pointer.) If a chunk with the same size an an existing node - is inserted, it is linked off the existing node using pointers that - work in the same way as fd/bk pointers of small chunks. - - Each tree contains a power of 2 sized range of chunk sizes (the - smallest is 0x100 <= x < 0x180), which is is divided in half at each - tree level, with the chunks in the smaller half of the range (0x100 - <= x < 0x140 for the top nose) in the left subtree and the larger - half (0x140 <= x < 0x180) in the right subtree. This is, of course, - done by inspecting individual bits. - - Using these rules, each node's left subtree contains all smaller - sizes than its right subtree. However, the node at the root of each - subtree has no particular ordering relationship to either. (The - dividing line between the subtree sizes is based on trie relation.) - If we remove the last chunk of a given size from the interior of the - tree, we need to replace it with a leaf node. The tree ordering - rules permit a node to be replaced by any leaf below it. - - The smallest chunk in a tree (a common operation in a best-fit - allocator) can be found by walking a path to the leftmost leaf in - the tree. Unlike a usual binary tree, where we follow left child - pointers until we reach a null, here we follow the right child - pointer any time the left one is null, until we reach a leaf with - both child pointers null. The smallest chunk in the tree will be - somewhere along that path. - - The worst case number of steps to add, find, or remove a node is - bounded by the number of bits differentiating chunks within - bins. Under current bin calculations, this ranges from 6 up to 21 - (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case - is of course much better. -*/ - -struct malloc_tree_chunk { - /* The first four fields must be compatible with malloc_chunk */ - size_t prev_foot; - size_t head; - struct malloc_tree_chunk* fd; - struct malloc_tree_chunk* bk; - - struct malloc_tree_chunk* child[2]; - struct malloc_tree_chunk* parent; - bindex_t index; -}; - -typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk* tchunkptr; -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ - -/* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) - -/* ----------------------------- Segments -------------------------------- */ - -/* - Each malloc space may include non-contiguous segments, held in a - list headed by an embedded malloc_segment record representing the - top-most space. Segments also include flags holding properties of - the space. Large chunks that are directly allocated by mmap are not - included in this list. They are instead independently created and - destroyed without otherwise keeping track of them. - - Segment management mainly comes into play for spaces allocated by - MMAP. Any call to MMAP might or might not return memory that is - adjacent to an existing segment. MORECORE normally contiguously - extends the current space, so this space is almost always adjacent, - which is simpler and faster to deal with. (This is why MORECORE is - used preferentially to MMAP when both are available -- see - sys_alloc.) When allocating using MMAP, we don't use any of the - hinting mechanisms (inconsistently) supported in various - implementations of unix mmap, or distinguish reserving from - committing memory. Instead, we just ask for space, and exploit - contiguity when we get it. It is probably possible to do - better than this on some systems, but no general scheme seems - to be significantly better. - - Management entails a simpler variant of the consolidation scheme - used for chunks to reduce fragmentation -- new adjacent memory is - normally prepended or appended to an existing segment. However, - there are limitations compared to chunk consolidation that mostly - reflect the fact that segment processing is relatively infrequent - (occurring only when getting memory from system) and that we - don't expect to have huge numbers of segments: - - * Segments are not indexed, so traversal requires linear scans. (It - would be possible to index these, but is not worth the extra - overhead and complexity for most programs on most platforms.) - * New segments are only appended to old ones when holding top-most - memory; if they cannot be prepended to others, they are held in - different segments. - - Except for the top-most segment of an mstate, each segment record - is kept at the tail of its segment. Segments are added by pushing - segment records onto the list headed by &mstate.seg for the - containing mstate. - - Segment flags control allocation/merge/deallocation policies: - * If EXTERN_BIT set, then we did not allocate this segment, - and so should not try to deallocate or merge with others. - (This currently holds only for the initial segment passed - into create_mspace_with_base.) - * If USE_MMAP_BIT set, the segment may be merged with - other surrounding mmapped segments and trimmed/de-allocated - using munmap. - * If neither bit is set, then the segment was obtained using - MORECORE so can be merged with surrounding MORECORE'd segments - and deallocated/trimmed using MORECORE with negative arguments. -*/ - -struct malloc_segment { - char* base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment* next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ -}; - -#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) - -typedef struct malloc_segment msegment; -typedef struct malloc_segment* msegmentptr; - -/* ---------------------------- malloc_state ----------------------------- */ - -/* - A malloc_state holds all of the bookkeeping for a space. - The main fields are: - - Top - The topmost chunk of the currently active segment. Its size is - cached in topsize. The actual size of topmost space is - topsize+TOP_FOOT_SIZE, which includes space reserved for adding - fenceposts and segment records if necessary when getting more - space from the system. The size at which to autotrim top is - cached from mparams in trim_check, except that it is disabled if - an autotrim fails. - - Designated victim (dv) - This is the preferred chunk for servicing small requests that - don't have exact fits. It is normally the chunk split off most - recently to service another small request. Its size is cached in - dvsize. The link fields of this chunk are not maintained since it - is not kept in a bin. - - SmallBins - An array of bin headers for free chunks. These bins hold chunks - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains - chunks of all the same size, spaced 8 bytes apart. To simplify - use in double-linked lists, each bin header acts as a malloc_chunk - pointing to the real first node, if it exists (else pointing to - itself). This avoids special-casing for headers. But to avoid - waste, we allocate only the fd/bk pointers of bins, and then use - repositioning tricks to treat these as the fields of a chunk. - - TreeBins - Treebins are pointers to the roots of trees holding a range of - sizes. There are 2 equally spaced treebins for each power of two - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything - larger. - - Bin maps - There is one bit map for small bins ("smallmap") and one for - treebins ("treemap). Each bin sets its bit when non-empty, and - clears the bit when empty. Bit operations are then used to avoid - bin-by-bin searching -- nearly all "search" is done without ever - looking at bins that won't be selected. The bit maps - conservatively use 32 bits per map word, even if on 64bit system. - For a good description of some of the bit-based techniques used - here, see Henry S. Warren Jr's book "Hacker's Delight" (and - supplement at http://hackersdelight.org/). Many of these are - intended to reduce the branchiness of paths through malloc etc, as - well as to reduce the number of memory locations read or written. - - Segments - A list of segments headed by an embedded malloc_segment record - representing the initial space. - - Address check support - The least_addr field is the least address ever obtained from - MORECORE or MMAP. Attempted frees and reallocs of any address less - than this are trapped (unless INSECURE is defined). - - Magic tag - A cross-check field that should always hold same value as mparams.magic. - - Max allowed footprint - The maximum allowed bytes to allocate from system (zero means no limit) - - Flags - Bits recording whether to use MMAP, locks, or contiguous MORECORE - - Statistics - Each space keeps track of current and maximum system memory - obtained via MORECORE or MMAP. - - Trim support - Fields holding the amount of unused topmost memory that should trigger - trimming, and a counter to force periodic scanning to release unused - non-topmost segments. - - Locking - If USE_LOCKS is defined, the "mutex" lock is acquired and released - around every public call using this mspace. - - Extension support - A void* pointer and a size_t field that can be used to help implement - extensions to this malloc. -*/ - -/* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) - -struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char* least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - size_t footprint_limit; /* zero means no limit */ - flag_t mflags; -#if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void* extp; /* Unused but available for extensions */ - size_t exts; -}; - -typedef struct malloc_state* mstate; - -/* ------------- Global malloc_state and malloc_params ------------------- */ - -/* - malloc_params holds global properties, including those that can be - dynamically set using mallopt. There is a single instance, mparams, - initialized in init_mparams. Note that the non-zeroness of "magic" - also serves as an initialization flag. -*/ - -struct malloc_params { - size_t magic; - size_t page_size; - size_t granularity; - size_t mmap_threshold; - size_t trim_threshold; - flag_t default_mflags; -}; - -static struct malloc_params mparams; - -/* Ensure mparams initialized */ -#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) - -#if !ONLY_MSPACES - -/* The global malloc_state used for all non-"mspace" calls */ -static struct malloc_state _gm_; -#define gm (&_gm_) -#define is_global(M) ((M) == &_gm_) - -#endif /* !ONLY_MSPACES */ - -#define is_initialized(M) ((M)->top != 0) - -/* -------------------------- system alloc setup ------------------------- */ - -/* Operations on mflags */ - -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) -#if USE_LOCKS -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) -#else -#define disable_lock(M) -#endif - -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) -#if HAVE_MMAP -#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) -#else -#define disable_mmap(M) -#endif - -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) - -#define set_lock(M,L)\ - ((M)->mflags = (L)?\ - ((M)->mflags | USE_LOCK_BIT) :\ - ((M)->mflags & ~USE_LOCK_BIT)) - -/* page-align a size */ -#define page_align(S)\ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) - -/* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (mparams.granularity - SIZE_T_ONE))\ - & ~(mparams.granularity - SIZE_T_ONE)) - - -/* For mmap, use granularity alignment on windows, else page-align */ -#ifdef WIN32 -#define mmap_align(S) granularity_align(S) -#else -#define mmap_align(S) page_align(S) -#endif - -/* For sys_alloc, enough padding to ensure can malloc request on success */ -#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) - -#define is_page_aligned(S)\ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S)\ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) - -/* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) - -/* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char* addr) { - msegmentptr sp = &m->seg; - for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; - } -} - -/* Return true if segment contains a segment link */ -static int has_segment_link(mstate m, msegmentptr ss) { - msegmentptr sp = &m->seg; - for (;;) { - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; - } -} - -#ifndef MORECORE_CANNOT_TRIM -#define should_trim(M,s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ -#define should_trim(M,s) (0) -#endif /* MORECORE_CANNOT_TRIM */ - -/* - TOP_FOOT_SIZE is padding at the end of a segment, including space - that may be needed to place segment records and fenceposts when new - noncontiguous segments are added. -*/ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) - - -/* ------------------------------- Hooks -------------------------------- */ - -/* - PREACTION should be defined to return 0 on success, and nonzero on - failure. If you are not using locking, you can redefine these to do - anything you like. -*/ - -#if USE_LOCKS -#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } -#else /* USE_LOCKS */ - -#ifndef PREACTION -#define PREACTION(M) (0) -#endif /* PREACTION */ - -#ifndef POSTACTION -#define POSTACTION(M) -#endif /* POSTACTION */ - -#endif /* USE_LOCKS */ - -/* - CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. - USAGE_ERROR_ACTION is triggered on detected bad frees and - reallocs. The argument p is an address that might have triggered the - fault. It is ignored by the two predefined actions, but might be - useful in custom actions that try to help diagnose errors. -*/ - -#if PROCEED_ON_ERROR - -/* A count of the number of corruption errors causing resets */ -int malloc_corruption_error_count; - -/* default corruption action */ -static void reset_on_error(mstate m); - -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) -#define USAGE_ERROR_ACTION(m, p) - -#else /* PROCEED_ON_ERROR */ - -#ifndef CORRUPTION_ERROR_ACTION -#define CORRUPTION_ERROR_ACTION(m) ABORT -#endif /* CORRUPTION_ERROR_ACTION */ - -#ifndef USAGE_ERROR_ACTION -#define USAGE_ERROR_ACTION(m,p) ABORT -#endif /* USAGE_ERROR_ACTION */ - -#endif /* PROCEED_ON_ERROR */ - - -/* -------------------------- Debugging setup ---------------------------- */ - -#if ! DEBUG - -#define check_free_chunk(M,P) -#define check_inuse_chunk(M,P) -#define check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) -#define check_malloc_state(M) -#define check_top_chunk(M,P) - -#else /* DEBUG */ -#define check_free_chunk(M,P) do_check_free_chunk(M,P) -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) -#define check_top_chunk(M,P) do_check_top_chunk(M,P) -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) -#define check_malloc_state(M) do_check_malloc_state(M) - -static void do_check_any_chunk(mstate m, mchunkptr p); -static void do_check_top_chunk(mstate m, mchunkptr p); -static void do_check_mmapped_chunk(mstate m, mchunkptr p); -static void do_check_inuse_chunk(mstate m, mchunkptr p); -static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void* mem, size_t s); -static void do_check_tree(mstate m, tchunkptr t); -static void do_check_treebin(mstate m, bindex_t i); -static void do_check_smallbin(mstate m, bindex_t i); -static void do_check_malloc_state(mstate m); -static int bin_find(mstate m, mchunkptr x); -static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ - -/* ---------------------------- Indexing Bins ---------------------------- */ - -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) - -/* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) - -/* assign tree index for size S to variable I. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_tree_index(S, I)\ -{\ - unsigned int X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = _bit_scan_reverse (X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - _BitScanReverse((DWORD *) &K, (DWORD) X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#else /* GNUC */ -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int Y = (unsigned int)X;\ - unsigned int N = ((Y - 0x100) >> 16) & 8;\ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ - N += K;\ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ - K = 14 - N + ((Y <<= K) >> 15);\ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ - }\ -} -#endif /* GNUC */ - -/* Bit representing maximum resolved size in a treebin at i */ -#define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) - -/* Shift placing maximum resolved bit in a treebin at i as sign bit */ -#define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) - -/* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) - - -/* ------------------------ Operations on bin maps ----------------------- */ - -/* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) - -/* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) - -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) - -/* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) - -/* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | -(x<<1)) - -/* mask with all bits to left of or equal to least bit of x on */ -#define same_or_left_bits(x) ((x) | -(x)) - -/* index corresponding to given bit. Use x86 asm if possible */ - -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = __builtin_ctz(X); \ - I = (bindex_t)J;\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = _bit_scan_forward (X); \ - I = (bindex_t)J;\ -} - -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - _BitScanForward((DWORD *) &J, X);\ - I = (bindex_t)J;\ -} - -#elif USE_BUILTIN_FFS -#define compute_bit2idx(X, I) I = ffs(X)-1 - -#else -#define compute_bit2idx(X, I)\ -{\ - unsigned int Y = X - 1;\ - unsigned int K = Y >> (16-4) & 16;\ - unsigned int N = K; Y >>= K;\ - N += K = Y >> (8-3) & 8; Y >>= K;\ - N += K = Y >> (4-2) & 4; Y >>= K;\ - N += K = Y >> (2-1) & 2; Y >>= K;\ - N += K = Y >> (1-0) & 1; Y >>= K;\ - I = (bindex_t)(N + Y);\ -} -#endif /* GNUC */ - - -/* ----------------------- Runtime Check Support ------------------------- */ - -/* - For security, the main invariant is that malloc/free/etc never - writes to a static address other than malloc_state, unless static - malloc_state itself has been corrupted, which cannot occur via - malloc (because of these checks). In essence this means that we - believe all pointers, sizes, maps etc held in malloc_state, but - check all of those linked or offsetted from other embedded data - structures. These checks are interspersed with main code in a way - that tends to minimize their run-time cost. - - When FOOTERS is defined, in addition to range checking, we also - verify footer fields of inuse chunks, which can be used guarantee - that the mstate controlling malloc/free is intact. This is a - streamlined version of the approach described by William Robertson - et al in "Run-time Detection of Heap-based Overflows" LISA'03 - http://www.usenix.org/events/lisa03/tech/robertson.html The footer - of an inuse chunk holds the xor of its mstate and a random seed, - that is checked upon calls to free() and realloc(). This is - (probabalistically) unguessable from outside the program, but can be - computed by any code successfully malloc'ing any chunk, so does not - itself provide protection against code that has already broken - security through some other means. Unlike Robertson et al, we - always dynamically check addresses of all offset chunks (previous, - next, etc). This turns out to be cheaper than relying on hashes. -*/ - -#if !INSECURE -/* Check if address a is at least as high as any from MORECORE or MMAP */ -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) -/* Check if address of next chunk n is higher than base chunk p */ -#define ok_next(p, n) ((char*)(p) < (char*)(n)) -/* Check if p has inuse status */ -#define ok_inuse(p) is_inuse(p) -/* Check if p has its pinuse bit on */ -#define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ -#define ok_address(M, a) (1) -#define ok_next(b, n) (1) -#define ok_inuse(p) (1) -#define ok_pinuse(p) (1) -#endif /* !INSECURE */ - -#if (FOOTERS && !INSECURE) -/* Check if (alleged) mstate m has expected magic field */ -#define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ -#define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ - -/* In gcc, use __builtin_expect to minimize impact of checks */ -#if !INSECURE -#if defined(__GNUC__) && __GNUC__ >= 3 -#define RTCHECK(e) __builtin_expect(e, 1) -#else /* GNUC */ -#define RTCHECK(e) (e) -#endif /* GNUC */ -#else /* !INSECURE */ -#define RTCHECK(e) (1) -#endif /* !INSECURE */ - -/* macros to set up inuse chunks with or without footers */ - -#if !FOOTERS - -#define mark_inuse_foot(M,p,s) - -/* Macros for setting head/foot of non-mmapped chunks */ - -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) - -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) - -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) - -#else /* FOOTERS */ - -/* Set foot of inuse chunk to be xor of mstate and seed */ -#define mark_inuse_foot(M,p,s)\ - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) - -#define get_mstate_for(p)\ - ((mstate)(((mchunkptr)((char*)(p) +\ - (chunksize(p))))->prev_foot ^ mparams.magic)) - -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M,p,s)) - -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ - mark_inuse_foot(M,p,s)) - -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - mark_inuse_foot(M, p, s)) - -#endif /* !FOOTERS */ - -/* ---------------------------- setting mparams -------------------------- */ - -#if LOCK_AT_FORK -static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } -static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } -static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } -#endif /* LOCK_AT_FORK */ - -/* Initialize mparams */ -static int init_mparams(void) { -#ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) - init_malloc_global_mutex(); -#endif - - ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (mparams.magic == 0) { - size_t magic; - size_t psize; - size_t gsize; - -#ifndef WIN32 - psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ - { - SYSTEM_INFO system_info; - GetSystemInfo(&system_info); - psize = system_info.dwPageSize; - gsize = ((DEFAULT_GRANULARITY != 0)? - DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); - } -#endif /* WIN32 */ - - /* Sanity-check configuration: - size_t must be unsigned and as wide as pointer type. - ints must be at least 4 bytes. - alignment must be at least 8. - Alignment, min chunk size, and page size must all be powers of 2. - */ - if ((sizeof(size_t) != sizeof(char*)) || - (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || - (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || - ((gsize & (gsize-SIZE_T_ONE)) != 0) || - ((psize & (psize-SIZE_T_ONE)) != 0)) - ABORT; - mparams.granularity = gsize; - mparams.page_size = psize; - mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; - mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; -#if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ - -#if !ONLY_MSPACES - /* Set up lock for main malloc area */ - gm->mflags = mparams.default_mflags; - (void)INITIAL_LOCK(&gm->mutex); -#endif -#if LOCK_AT_FORK - pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); -#endif - - { -#if USE_DEV_RANDOM - int fd; - unsigned char buf[sizeof(size_t)]; - /* Try to use /dev/urandom, else fall back on using time */ - if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && - read(fd, buf, sizeof(buf)) == sizeof(buf)) { - magic = *((size_t *) buf); - close(fd); - } - else -#endif /* USE_DEV_RANDOM */ -#ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); -#elif defined(LACKS_TIME_H) - magic = (size_t)&magic ^ (size_t)0x55555555U; -#else - magic = (size_t)(time(0) ^ (size_t)0x55555555U); -#endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ - /* Until memory modes commonly available, use volatile-write */ - (*(volatile size_t *)(&(mparams.magic))) = magic; - } - } - - RELEASE_MALLOC_GLOBAL_LOCK(); - return 1; -} - -/* support for mallopt */ -static int change_mparam(int param_number, int value) { - size_t val; - ensure_initialization(); - val = (value == -1)? MAX_SIZE_T : (size_t)value; - switch(param_number) { - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val-1)) == 0)) { - mparams.granularity = val; - return 1; - } - else - return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; - return 1; - default: - return 0; - } -} - -#if DEBUG -/* ------------------------- Debugging Support --------------------------- */ - -/* Check properties of any chunk, whether free, inuse, mmapped etc */ -static void do_check_any_chunk(mstate m, mchunkptr p) { - assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); - assert(ok_address(m, p)); -} - -/* Check properties of top chunk */ -static void do_check_top_chunk(mstate m, mchunkptr p) { - msegmentptr sp = segment_holding(m, (char*)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ - assert(sp != 0); - assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); - assert(ok_address(m, p)); - assert(sz == m->topsize); - assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); - assert(pinuse(p)); - assert(!pinuse(chunk_plus_offset(p, sz))); -} - -/* Check properties of (inuse) mmapped chunks */ -static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); - size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); - assert(is_mmapped(p)); - assert(use_mmap(m)); - assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); - assert(ok_address(m, p)); - assert(!is_small(sz)); - assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); - assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); -} - -/* Check properties of inuse chunks */ -static void do_check_inuse_chunk(mstate m, mchunkptr p) { - do_check_any_chunk(m, p); - assert(is_inuse(p)); - assert(next_pinuse(p)); - /* If not pinuse and not mmapped, previous chunk has OK offset */ - assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) - do_check_mmapped_chunk(m, p); -} - -/* Check properties of free chunks */ -static void do_check_free_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); - mchunkptr next = chunk_plus_offset(p, sz); - do_check_any_chunk(m, p); - assert(!is_inuse(p)); - assert(!next_pinuse(p)); - assert (!is_mmapped(p)); - if (p != m->dv && p != m->top) { - if (sz >= MIN_CHUNK_SIZE) { - assert((sz & CHUNK_ALIGN_MASK) == 0); - assert(is_aligned(chunk2mem(p))); - assert(next->prev_foot == sz); - assert(pinuse(p)); - assert (next == m->top || is_inuse(next)); - assert(p->fd->bk == p); - assert(p->bk->fd == p); - } - else /* markers are always of size SIZE_T_SIZE */ - assert(sz == SIZE_T_SIZE); - } -} - -/* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~INUSE_BITS; - do_check_inuse_chunk(m, p); - assert((sz & CHUNK_ALIGN_MASK) == 0); - assert(sz >= MIN_CHUNK_SIZE); - assert(sz >= s); - /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ - assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); - } -} - -/* Check a tree and its subtrees. */ -static void do_check_tree(mstate m, tchunkptr t) { - tchunkptr head = 0; - tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; - compute_tree_index(tsize, idx); - assert(tindex == idx); - assert(tsize >= MIN_LARGE_SIZE); - assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); - - do { /* traverse through chain of same-sized nodes */ - do_check_any_chunk(m, ((mchunkptr)u)); - assert(u->index == tindex); - assert(chunksize(u) == tsize); - assert(!is_inuse(u)); - assert(!next_pinuse(u)); - assert(u->fd->bk == u); - assert(u->bk->fd == u); - if (u->parent == 0) { - assert(u->child[0] == 0); - assert(u->child[1] == 0); - } - else { - assert(head == 0); /* only one node on chain has parent */ - head = u; - assert(u->parent != u); - assert (u->parent->child[0] == u || - u->parent->child[1] == u || - *((tbinptr*)(u->parent)) == u); - if (u->child[0] != 0) { - assert(u->child[0]->parent == u); - assert(u->child[0] != u); - do_check_tree(m, u->child[0]); - } - if (u->child[1] != 0) { - assert(u->child[1]->parent == u); - assert(u->child[1] != u); - do_check_tree(m, u->child[1]); - } - if (u->child[0] != 0 && u->child[1] != 0) { - assert(chunksize(u->child[0]) < chunksize(u->child[1])); - } - } - u = u->fd; - } while (u != t); - assert(head != 0); -} - -/* Check all the chunks in a treebin. */ -static void do_check_treebin(mstate m, bindex_t i) { - tbinptr* tb = treebin_at(m, i); - tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) - assert(empty); - if (!empty) - do_check_tree(m, t); -} - -/* Check all the chunks in a smallbin. */ -static void do_check_smallbin(mstate m, bindex_t i) { - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; - unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) - assert(empty); - if (!empty) { - for (; p != b; p = p->bk) { - size_t size = chunksize(p); - mchunkptr q; - /* each chunk claims to be free */ - do_check_free_chunk(m, p); - /* chunk belongs in bin */ - assert(small_index(size) == i); - assert(p->bk == b || chunksize(p->bk) == chunksize(p)); - /* chunk is followed by an inuse chunk */ - q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) - do_check_inuse_chunk(m, q); - } - } -} - -/* Find x in a bin. Used in other check functions. */ -static int bin_find(mstate m, mchunkptr x) { - size_t size = chunksize(x); - if (is_small(size)) { - bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); - if (smallmap_is_marked(m, sidx)) { - mchunkptr p = b; - do { - if (p == x) - return 1; - } while ((p = p->fd) != b); - } - } - else { - bindex_t tidx; - compute_tree_index(size, tidx); - if (treemap_is_marked(m, tidx)) { - tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); - while (t != 0 && chunksize(t) != size) { - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - sizebits <<= 1; - } - if (t != 0) { - tchunkptr u = t; - do { - if (u == (tchunkptr)x) - return 1; - } while ((u = u->fd) != t); - } - } - } - return 0; -} - -/* Traverse each chunk and check it; return total */ -static size_t traverse_and_check(mstate m) { - size_t sum = 0; - if (is_initialized(m)) { - msegmentptr s = &m->seg; - sum += m->topsize + TOP_FOOT_SIZE; - while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - mchunkptr lastq = 0; - assert(pinuse(q)); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - sum += chunksize(q); - if (is_inuse(q)) { - assert(!bin_find(m, q)); - do_check_inuse_chunk(m, q); - } - else { - assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ - do_check_free_chunk(m, q); - } - lastq = q; - q = next_chunk(q); - } - s = s->next; - } - } - return sum; -} - - -/* Check all properties of malloc_state. */ -static void do_check_malloc_state(mstate m) { - bindex_t i; - size_t total; - /* check bins */ - for (i = 0; i < NSMALLBINS; ++i) - do_check_smallbin(m, i); - for (i = 0; i < NTREEBINS; ++i) - do_check_treebin(m, i); - - if (m->dvsize != 0) { /* check dv chunk */ - do_check_any_chunk(m, m->dv); - assert(m->dvsize == chunksize(m->dv)); - assert(m->dvsize >= MIN_CHUNK_SIZE); - assert(bin_find(m, m->dv) == 0); - } - - if (m->top != 0) { /* check top chunk */ - do_check_top_chunk(m, m->top); - /*assert(m->topsize == chunksize(m->top)); redundant */ - assert(m->topsize > 0); - assert(bin_find(m, m->top) == 0); - } - - total = traverse_and_check(m); - assert(total <= m->footprint); - assert(m->footprint <= m->max_footprint); -} -#endif /* DEBUG */ - -/* ----------------------------- statistics ------------------------------ */ - -#if !NO_MALLINFO -static struct mallinfo internal_mallinfo(mstate m) { - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - ensure_initialization(); - if (!PREACTION(m)) { - check_malloc_state(m); - if (is_initialized(m)) { - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; - msegmentptr s = &m->seg; - while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - size_t sz = chunksize(q); - sum += sz; - if (!is_inuse(q)) { - mfree += sz; - ++nfree; - } - q = next_chunk(q); - } - s = s->next; - } - - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; - nm.uordblks = m->footprint - mfree; - nm.fordblks = mfree; - nm.keepcost = m->topsize; - } - - POSTACTION(m); - } - return nm; -} -#endif /* !NO_MALLINFO */ - -#if !NO_MALLOC_STATS -static void internal_malloc_stats(mstate m) { - ensure_initialization(); - if (!PREACTION(m)) { - size_t maxfp = 0; - size_t fp = 0; - size_t used = 0; - check_malloc_state(m); - if (is_initialized(m)) { - msegmentptr s = &m->seg; - maxfp = m->max_footprint; - fp = m->footprint; - used = fp - (m->topsize + TOP_FOOT_SIZE); - - while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - if (!is_inuse(q)) - used -= chunksize(q); - q = next_chunk(q); - } - s = s->next; - } - } - POSTACTION(m); /* drop lock */ - fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); - fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); - fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); - } -} -#endif /* NO_MALLOC_STATS */ - -/* ----------------------- Operations on smallbins ----------------------- */ - -/* - Various forms of linking and unlinking are defined as macros. Even - the ones for trees, which are very long but have very short typical - paths. This is ugly but reduces reliance on inlining support of - compilers. -*/ - -/* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - assert(S >= MIN_CHUNK_SIZE);\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, B->fd)))\ - F = B->fd;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} - -/* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(B == smallbin_at(M,I) ||\ - (ok_address(M, B) && B->fd == P))) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} - -/* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} - -/* Replace dv node, binning the old one */ -/* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - assert(is_small(DVS));\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} - -/* ------------------------- Operations on trees ------------------------- */ - -/* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr* H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - }\ - else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0)\ - T = *C;\ - else if (RTCHECK(ok_address(M, C))) {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - else {\ - tchunkptr F = T->fd;\ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - }\ - }\ -} - -/* - Unlink steps: - - 1. If x is a chained node, unlink it from its same-sized fd/bk links - and choose its bk node as its replacement. - 2. If x was the last node of its size, but not a leaf node, it must - be replaced with a leaf node (not merely one with an open left or - right), to make sure that lefts and rights of descendents - correspond properly to bit masks. We use the rightmost descendent - of x. We could use any other leaf, but this is easy to locate and - tends to counteract removal of leftmosts elsewhere, and so keeps - paths shorter than minimally guaranteed. This doesn't loop much - because on average a node in a tree is near the bottom. - 3. If x is the base of a chain (i.e., has parent links) relink - x's parent and children to x's replacement (or null if none). -*/ - -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ - F->bk = R;\ - R->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - tchunkptr* RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr* CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - if (RTCHECK(ok_address(M, RP)))\ - *RP = 0;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - }\ - if (XP != 0) {\ - tbinptr* H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - }\ - else if (RTCHECK(ok_address(M, XP))) {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - if (R != 0) {\ - if (RTCHECK(ok_address(M, R))) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - if (RTCHECK(ok_address(M, C0))) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - if ((C1 = X->child[1]) != 0) {\ - if (RTCHECK(ok_address(M, C1))) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ -} - -/* Relays to large vs small bin operations */ - -#define insert_chunk(M, P, S)\ - if (is_small(S)) insert_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) unlink_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } - - -/* Relays to internal calls to malloc/free from realloc, memalign etc */ - -#if ONLY_MSPACES -#define internal_malloc(m, b) mspace_malloc(m, b) -#define internal_free(m, mem) mspace_free(m,mem); -#else /* ONLY_MSPACES */ -#if MSPACES -#define internal_malloc(m, b)\ - ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) -#define internal_free(m, mem)\ - if (m == gm) dlfree(mem); else mspace_free(m,mem); -#else /* MSPACES */ -#define internal_malloc(m, b) dlmalloc(b) -#define internal_free(m, mem) dlfree(mem) -#endif /* MSPACES */ -#endif /* ONLY_MSPACES */ - -/* ----------------------- Direct-mmapping chunks ----------------------- */ - -/* - Directly mmapped chunks are set up with an offset to the start of - the mmapped region stored in the prev_foot field of the chunk. This - allows reconstruction of the required argument to MUNMAP when freed, - and also allows adjustment of the returned chunk to meet alignment - requirements (especially in memalign). -*/ - -/* Malloc using mmap */ -static void* mmap_alloc(mstate m, size_t nb) { - size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - if (m->footprint_limit != 0) { - size_t fp = m->footprint + mmsize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; - } - if (mmsize > nb) { /* Check for wrap around 0 */ - char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); - if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; - mchunkptr p = (mchunkptr)(mm + offset); - p->prev_foot = offset; - p->head = psize; - mark_inuse_foot(m, p, psize); - chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; - - if (m->least_addr == 0 || mm < m->least_addr) - m->least_addr = mm; - if ((m->footprint += mmsize) > m->max_footprint) - m->max_footprint = m->footprint; - assert(is_aligned(chunk2mem(p))); - check_mmapped_chunk(m, p); - return chunk2mem(p); - } - } - return 0; -} - -/* Realloc using mmap */ -static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { - size_t oldsize = chunksize(oldp); - (void)flags; /* placate people compiling -Wunused */ - if (is_small(nb)) /* Can't shrink mmap regions below small size */ - return 0; - /* Keep old chunk if big enough but not too big */ - if (oldsize >= nb + SIZE_T_SIZE && - (oldsize - nb) <= (mparams.granularity << 1)) - return oldp; - else { - size_t offset = oldp->prev_foot; - size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; - size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char* cp = (char*)CALL_MREMAP((char*)oldp - offset, - oldmmsize, newmmsize, flags); - if (cp != CMFAIL) { - mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; - newp->head = psize; - mark_inuse_foot(m, newp, psize); - chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; - - if (cp < m->least_addr) - m->least_addr = cp; - if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) - m->max_footprint = m->footprint; - check_mmapped_chunk(m, newp); - return newp; - } - } - return 0; -} - - -/* -------------------------- mspace management -------------------------- */ - -/* Initialize top chunk and its size */ -static void init_top(mstate m, mchunkptr p, size_t psize) { - /* Ensure alignment */ - size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char*)p + offset); - psize -= offset; - - m->top = p; - m->topsize = psize; - p->head = psize | PINUSE_BIT; - /* set size of fake trailing chunk holding overhead space only once */ - chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ -} - -/* Initialize bins for a new mstate that is otherwise zeroed out */ -static void init_bins(mstate m) { - /* Establish circular links for smallbins */ - bindex_t i; - for (i = 0; i < NSMALLBINS; ++i) { - sbinptr bin = smallbin_at(m,i); - bin->fd = bin->bk = bin; - } -} - -#if PROCEED_ON_ERROR - -/* default corruption action */ -static void reset_on_error(mstate m) { - int i; - ++malloc_corruption_error_count; - /* Reinitialize fields to forget about all memory */ - m->smallmap = m->treemap = 0; - m->dvsize = m->topsize = 0; - m->seg.base = 0; - m->seg.size = 0; - m->seg.next = 0; - m->top = m->dv = 0; - for (i = 0; i < NTREEBINS; ++i) - *treebin_at(m, i) = 0; - init_bins(m); -} -#endif /* PROCEED_ON_ERROR */ - -/* Allocate chunk and prepend remainder with chunk in successor base. */ -static void* prepend_alloc(mstate m, char* newbase, char* oldbase, - size_t nb) { - mchunkptr p = align_as_chunk(newbase); - mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char*)oldfirst - (char*)p; - mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; - set_size_and_pinuse_of_inuse_chunk(m, p, nb); - - assert((char*)oldfirst > (char*)q); - assert(pinuse(oldfirst)); - assert(qsize >= MIN_CHUNK_SIZE); - - /* consolidate remainder with first chunk of old base */ - if (oldfirst == m->top) { - size_t tsize = m->topsize += qsize; - m->top = q; - q->head = tsize | PINUSE_BIT; - check_top_chunk(m, q); - } - else if (oldfirst == m->dv) { - size_t dsize = m->dvsize += qsize; - m->dv = q; - set_size_and_pinuse_of_free_chunk(q, dsize); - } - else { - if (!is_inuse(oldfirst)) { - size_t nsize = chunksize(oldfirst); - unlink_chunk(m, oldfirst, nsize); - oldfirst = chunk_plus_offset(oldfirst, nsize); - qsize += nsize; - } - set_free_with_pinuse(q, qsize, oldfirst); - insert_chunk(m, q, qsize); - check_free_chunk(m, q); - } - - check_malloced_chunk(m, chunk2mem(p), nb); - return chunk2mem(p); -} - -/* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { - /* Determine locations and sizes of segment, fenceposts, old top */ - char* old_top = (char*)m->top; - msegmentptr oldsp = segment_holding(m, old_top); - char* old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char* asp = rawsp + offset; - char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; - msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; - - /* reset top to new space */ - init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); - - /* Set up segment record */ - assert(is_aligned(ss)); - set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ - m->seg.base = tbase; - m->seg.size = tsize; - m->seg.sflags = mmapped; - m->seg.next = ss; - - /* Insert trailing fenceposts */ - for (;;) { - mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); - p->head = FENCEPOST_HEAD; - ++nfences; - if ((char*)(&(nextp->head)) < old_end) - p = nextp; - else - break; - } - assert(nfences >= 2); - - /* Insert the rest of old top into a bin as an ordinary free chunk */ - if (csp != old_top) { - mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; - mchunkptr tn = chunk_plus_offset(q, psize); - set_free_with_pinuse(q, psize, tn); - insert_chunk(m, q, psize); - } - - check_top_chunk(m, m->top); -} - -/* -------------------------- System allocation -------------------------- */ - -/* Get memory from system using MORECORE or MMAP */ -static void* sys_alloc(mstate m, size_t nb) { - char* tbase = CMFAIL; - size_t tsize = 0; - flag_t mmap_flag = 0; - size_t asize; /* allocation size */ - - ensure_initialization(); - - /* Directly map large chunks, but only if already initialized */ - if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { - void* mem = mmap_alloc(m, nb); - if (mem != 0) - return mem; - } - - asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize <= nb) - return 0; /* wraparound */ - if (m->footprint_limit != 0) { - size_t fp = m->footprint + asize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; - } - - /* - Try getting memory in any of three ways (in most-preferred to - least-preferred order): - 1. A call to MORECORE that can normally contiguously extend memory. - (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or - or main space is mmapped or a previous contiguous call failed) - 2. A call to MMAP new space (disabled if not HAVE_MMAP). - Note that under the default settings, if MORECORE is unable to - fulfill a request, and HAVE_MMAP is true, then mmap is - used as a noncontiguous system allocator. This is a useful backup - strategy for systems with holes in address spaces -- in this case - sbrk cannot contiguously expand the heap, but mmap may be able to - find space. - 3. A call to MORECORE that cannot usually contiguously extend memory. - (disabled if not HAVE_MORECORE) - - In all cases, we need to request enough bytes from system to ensure - we can malloc nb bytes upon success, so pad with enough space for - top_foot, plus alignment-pad to make sure we don't lose bytes if - not on boundary, and round this up to a granularity unit. - */ - - if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - char* br = CMFAIL; - size_t ssize = asize; /* sbrk call size */ - msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); - ACQUIRE_MALLOC_GLOBAL_LOCK(); - - if (ss == 0) { /* First time through or recovery */ - char* base = (char*)CALL_MORECORE(0); - if (base != CMFAIL) { - size_t fp; - /* Adjust to end on a page boundary */ - if (!is_page_aligned(base)) - ssize += (page_align((size_t)base) - (size_t)base); - fp = m->footprint + ssize; /* recheck limits */ - if (ssize > nb && ssize < HALF_MAX_SIZE_T && - (m->footprint_limit == 0 || - (fp > m->footprint && fp <= m->footprint_limit)) && - (br = (char*)(CALL_MORECORE(ssize))) == base) { - tbase = base; - tsize = ssize; - } - } - } - else { - /* Subtract out existing available top space from MORECORE request. */ - ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); - /* Use mem here only if it did continuously extend old space */ - if (ssize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { - tbase = br; - tsize = ssize; - } - } - - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (ssize < HALF_MAX_SIZE_T && - ssize < nb + SYS_ALLOC_PADDING) { - size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); - if (esize < HALF_MAX_SIZE_T) { - char* end = (char*)CALL_MORECORE(esize); - if (end != CMFAIL) - ssize += esize; - else { /* Can't use; try to release */ - (void) CALL_MORECORE(-ssize); - br = CMFAIL; - } - } - } - } - if (br != CMFAIL) { /* Use the space we did get */ - tbase = br; - tsize = ssize; - } - else - disable_contiguous(m); /* Don't try contiguous path in the future */ - } - - RELEASE_MALLOC_GLOBAL_LOCK(); - } - - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - char* mp = (char*)(CALL_MMAP(asize)); - if (mp != CMFAIL) { - tbase = mp; - tsize = asize; - mmap_flag = USE_MMAP_BIT; - } - } - - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ - if (asize < HALF_MAX_SIZE_T) { - char* br = CMFAIL; - char* end = CMFAIL; - ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char*)(CALL_MORECORE(asize)); - end = (char*)(CALL_MORECORE(0)); - RELEASE_MALLOC_GLOBAL_LOCK(); - if (br != CMFAIL && end != CMFAIL && br < end) { - size_t ssize = end - br; - if (ssize > nb + TOP_FOOT_SIZE) { - tbase = br; - tsize = ssize; - } - } - } - } - - if (tbase != CMFAIL) { - - if ((m->footprint += tsize) > m->max_footprint) - m->max_footprint = m->footprint; - - if (!is_initialized(m)) { /* first-time initialization */ - if (m->least_addr == 0 || tbase < m->least_addr) - m->least_addr = tbase; - m->seg.base = tbase; - m->seg.size = tsize; - m->seg.sflags = mmap_flag; - m->magic = mparams.magic; - m->release_checks = MAX_RELEASE_CHECK_RATE; - init_bins(m); -#if !ONLY_MSPACES - if (is_global(m)) - init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); - else -#endif - { - /* Offset top by embedded malloc_state */ - mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); - } - } - - else { - /* Try to merge with an existing segment */ - msegmentptr sp = &m->seg; - /* Only consider most recent segment if traversal suppressed */ - while (sp != 0 && tbase != sp->base + sp->size) - sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && - (sp->sflags & USE_MMAP_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ - sp->size += tsize; - init_top(m, m->top, m->topsize + tsize); - } - else { - if (tbase < m->least_addr) - m->least_addr = tbase; - sp = &m->seg; - while (sp != 0 && sp->base != tbase + tsize) - sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && - (sp->sflags & USE_MMAP_BIT) == mmap_flag) { - char* oldbase = sp->base; - sp->base = tbase; - sp->size += tsize; - return prepend_alloc(m, tbase, oldbase, nb); - } - else - add_segment(m, tbase, tsize, mmap_flag); - } - } - - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; - mchunkptr p = m->top; - mchunkptr r = m->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(m, p, nb); - check_top_chunk(m, m->top); - check_malloced_chunk(m, chunk2mem(p), nb); - return chunk2mem(p); - } - } - - MALLOC_FAILURE_ACTION; - return 0; -} - -/* ----------------------- system deallocation -------------------------- */ - -/* Unmap and unlink any mmapped segments that don't contain used chunks */ -static size_t release_unused_segments(mstate m) { - size_t released = 0; - int nsegs = 0; - msegmentptr pred = &m->seg; - msegmentptr sp = pred->next; - while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; - msegmentptr next = sp->next; - ++nsegs; - if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { - mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); - /* Can unmap if first chunk holds entire segment and not pinned */ - if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { - tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char*)sp)); - if (p == m->dv) { - m->dv = 0; - m->dvsize = 0; - } - else { - unlink_large_chunk(m, tp); - } - if (CALL_MUNMAP(base, size) == 0) { - released += size; - m->footprint -= size; - /* unlink obsoleted record */ - sp = pred; - sp->next = next; - } - else { /* back out if cannot unmap */ - insert_large_chunk(m, tp, psize); - } - } - } - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ - break; - pred = sp; - sp = next; - } - /* Reset check counter */ - m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? - (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); - return released; -} - -static int sys_trim(mstate m, size_t pad) { - size_t released = 0; - ensure_initialization(); - if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ - - if (m->topsize > pad) { - /* Shrink top space in granularity-size units, keeping at least one */ - size_t unit = mparams.granularity; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char*)m->top); - - if (!is_extern_segment(sp)) { - if (is_mmapped_segment(sp)) { - if (HAVE_MMAP && - sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ - size_t newsize = sp->size - extra; - (void)newsize; /* placate people compiling -Wunused-variable */ - /* Prefer mremap, fall back to munmap */ - if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || - (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { - released = extra; - } - } - } - else if (HAVE_MORECORE) { - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ - extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; - ACQUIRE_MALLOC_GLOBAL_LOCK(); - { - /* Make sure end of memory is where we last set it. */ - char* old_br = (char*)(CALL_MORECORE(0)); - if (old_br == sp->base + sp->size) { - char* rel_br = (char*)(CALL_MORECORE(-extra)); - char* new_br = (char*)(CALL_MORECORE(0)); - if (rel_br != CMFAIL && new_br < old_br) - released = old_br - new_br; - } - } - RELEASE_MALLOC_GLOBAL_LOCK(); - } - } - - if (released != 0) { - sp->size -= released; - m->footprint -= released; - init_top(m, m->top, m->topsize - released); - check_top_chunk(m, m->top); - } - } - - /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) - released += release_unused_segments(m); - - /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; - } - - return (released != 0)? 1 : 0; -} - -/* Consolidate and bin a chunk. Differs from exported versions - of free mainly in that the chunk need not be marked as inuse. -*/ -static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { - mchunkptr next = chunk_plus_offset(p, psize); - if (!pinuse(p)) { - mchunkptr prev; - size_t prevsize = p->prev_foot; - if (is_mmapped(p)) { - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - m->footprint -= psize; - return; - } - prev = chunk_minus_offset(p, prevsize); - psize += prevsize; - p = prev; - if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ - if (p != m->dv) { - unlink_chunk(m, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { - m->dvsize = psize; - set_free_with_pinuse(p, psize, next); - return; - } - } - else { - CORRUPTION_ERROR_ACTION(m); - return; - } - } - if (RTCHECK(ok_address(m, next))) { - if (!cinuse(next)) { /* consolidate forward */ - if (next == m->top) { - size_t tsize = m->topsize += psize; - m->top = p; - p->head = tsize | PINUSE_BIT; - if (p == m->dv) { - m->dv = 0; - m->dvsize = 0; - } - return; - } - else if (next == m->dv) { - size_t dsize = m->dvsize += psize; - m->dv = p; - set_size_and_pinuse_of_free_chunk(p, dsize); - return; - } - else { - size_t nsize = chunksize(next); - psize += nsize; - unlink_chunk(m, next, nsize); - set_size_and_pinuse_of_free_chunk(p, psize); - if (p == m->dv) { - m->dvsize = psize; - return; - } - } - } - else { - set_free_with_pinuse(p, psize, next); - } - insert_chunk(m, p, psize); - } - else { - CORRUPTION_ERROR_ACTION(m); - } -} - -/* ---------------------------- malloc --------------------------- */ - -/* allocate a large request from the best fitting chunk in a treebin */ -static void* tmalloc_large(mstate m, size_t nb) { - tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ - tchunkptr t; - bindex_t idx; - compute_tree_index(nb, idx); - if ((t = *treebin_at(m, idx)) != 0) { - /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ - for (;;) { - tchunkptr rt; - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - v = t; - if ((rsize = trem) == 0) - break; - } - rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; - if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ - break; - } - sizebits <<= 1; - } - } - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ - binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; - if (leftbits != 0) { - bindex_t i; - binmap_t leastbit = least_bit(leftbits); - compute_bit2idx(leastbit, i); - t = *treebin_at(m, i); - } - } - - while (t != 0) { /* find smallest of tree or subtree */ - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - rsize = trem; - v = t; - } - t = leftmost_child(t); - } - - /* If dv is a better fit, return 0 so malloc will use it */ - if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - if (RTCHECK(ok_address(m, v))) { /* split */ - mchunkptr r = chunk_plus_offset(v, nb); - assert(chunksize(v) == rsize + nb); - if (RTCHECK(ok_next(v, r))) { - unlink_large_chunk(m, v); - if (rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(m, v, (rsize + nb)); - else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - insert_chunk(m, r, rsize); - } - return chunk2mem(v); - } - } - CORRUPTION_ERROR_ACTION(m); - } - return 0; -} - -/* allocate a small request from the best fitting chunk in a treebin */ -static void* tmalloc_small(mstate m, size_t nb) { - tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); - compute_bit2idx(leastbit, i); - v = t = *treebin_at(m, i); - rsize = chunksize(t) - nb; - - while ((t = leftmost_child(t)) != 0) { - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - rsize = trem; - v = t; - } - } - - if (RTCHECK(ok_address(m, v))) { - mchunkptr r = chunk_plus_offset(v, nb); - assert(chunksize(v) == rsize + nb); - if (RTCHECK(ok_next(v, r))) { - unlink_large_chunk(m, v); - if (rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(m, v, (rsize + nb)); - else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(m, r, rsize); - } - return chunk2mem(v); - } - } - - CORRUPTION_ERROR_ACTION(m); - return 0; -} - -#if !ONLY_MSPACES - -void* dlmalloc(size_t bytes) { - /* - Basic algorithm: - If a small request (< 256 bytes minus per-chunk overhead): - 1. If one exists, use a remainderless chunk in associated smallbin. - (Remainderless means that there are too few excess bytes to - represent as a chunk.) - 2. If it is big enough, use the dv chunk, which is normally the - chunk adjacent to the one used for the most recent small request. - 3. If one exists, split the smallest available chunk in a bin, - saving remainder in dv. - 4. If it is big enough, use the top chunk. - 5. If available, get memory from system and use it - Otherwise, for a large request: - 1. Find the smallest available binned chunk that fits, and use it - if it is better fitting than dv chunk, splitting if necessary. - 2. If better fitting than any binned chunk, use the dv chunk. - 3. If it is big enough, use the top chunk. - 4. If request size >= mmap threshold, try to directly mmap this chunk. - 5. If available, get memory from system and use it - - The ugly goto's here ensure that postaction occurs along all paths. - */ - -#if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ -#endif - - if (!PREACTION(gm)) { - void* mem; - size_t nb; - if (bytes <= MAX_SMALL_REQUEST) { - bindex_t idx; - binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); - idx = small_index(nb); - smallbits = gm->smallmap >> idx; - - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ - mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ - b = smallbin_at(gm, idx); - p = b->fd; - assert(chunksize(p) == small_index2size(idx)); - unlink_first_small_chunk(gm, b, p, idx); - set_inuse_and_pinuse(gm, p, small_index2size(idx)); - mem = chunk2mem(p); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - else if (nb > gm->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ - mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); - compute_bit2idx(leastbit, i); - b = smallbin_at(gm, i); - p = b->fd; - assert(chunksize(p) == small_index2size(i)); - unlink_first_small_chunk(gm, b, p, i); - rsize = small_index2size(i) - nb; - /* Fit here cannot be remainderless if 4byte sizes */ - if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(gm, p, small_index2size(i)); - else { - set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - r = chunk_plus_offset(p, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(gm, r, rsize); - } - mem = chunk2mem(p); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - } - } - else if (bytes >= MAX_REQUEST) - nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ - else { - nb = pad_request(bytes); - if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - } - - if (nb <= gm->dvsize) { - size_t rsize = gm->dvsize - nb; - mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ - mchunkptr r = gm->dv = chunk_plus_offset(p, nb); - gm->dvsize = rsize; - set_size_and_pinuse_of_free_chunk(r, rsize); - set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - } - else { /* exhaust dv */ - size_t dvs = gm->dvsize; - gm->dvsize = 0; - gm->dv = 0; - set_inuse_and_pinuse(gm, p, dvs); - } - mem = chunk2mem(p); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; - mchunkptr p = gm->top; - mchunkptr r = gm->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - mem = chunk2mem(p); - check_top_chunk(gm, gm->top); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - mem = sys_alloc(gm, nb); - - postaction: - POSTACTION(gm); - return mem; - } - - return 0; -} - -/* ---------------------------- free --------------------------- */ - -void dlfree(void* mem) { - /* - Consolidate freed chunks with preceeding or succeeding bordering - free chunks, if they exist, and then place in a bin. Intermixed - with special cases for top, dv, mmapped chunks, and usage errors. - */ - - if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS - mstate fm = get_mstate_for(p); - if (!ok_magic(fm)) { - USAGE_ERROR_ACTION(fm, p); - return; - } -#else /* FOOTERS */ -#define fm gm -#endif /* FOOTERS */ - if (!PREACTION(fm)) { - check_inuse_chunk(fm, p); - if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); - mchunkptr next = chunk_plus_offset(p, psize); - if (!pinuse(p)) { - size_t prevsize = p->prev_foot; - if (is_mmapped(p)) { - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - fm->footprint -= psize; - goto postaction; - } - else { - mchunkptr prev = chunk_minus_offset(p, prevsize); - psize += prevsize; - p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ - if (p != fm->dv) { - unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { - fm->dvsize = psize; - set_free_with_pinuse(p, psize, next); - goto postaction; - } - } - else - goto erroraction; - } - } - - if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ - if (next == fm->top) { - size_t tsize = fm->topsize += psize; - fm->top = p; - p->head = tsize | PINUSE_BIT; - if (p == fm->dv) { - fm->dv = 0; - fm->dvsize = 0; - } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); - goto postaction; - } - else if (next == fm->dv) { - size_t dsize = fm->dvsize += psize; - fm->dv = p; - set_size_and_pinuse_of_free_chunk(p, dsize); - goto postaction; - } - else { - size_t nsize = chunksize(next); - psize += nsize; - unlink_chunk(fm, next, nsize); - set_size_and_pinuse_of_free_chunk(p, psize); - if (p == fm->dv) { - fm->dvsize = psize; - goto postaction; - } - } - } - else - set_free_with_pinuse(p, psize, next); - - if (is_small(psize)) { - insert_small_chunk(fm, p, psize); - check_free_chunk(fm, p); - } - else { - tchunkptr tp = (tchunkptr)p; - insert_large_chunk(fm, tp, psize); - check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); - } - goto postaction; - } - } - erroraction: - USAGE_ERROR_ACTION(fm, p); - postaction: - POSTACTION(fm); - } - } -#if !FOOTERS -#undef fm -#endif /* FOOTERS */ -} - -void* dlcalloc(size_t n_elements, size_t elem_size) { - void* mem; - size_t req = 0; - if (n_elements != 0) { - req = n_elements * elem_size; - if (((n_elements | elem_size) & ~(size_t)0xffff) && - (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ - } - mem = dlmalloc(req); - if (mem != 0 && calloc_must_clear(mem2chunk(mem))) - memset(mem, 0, req); - return mem; -} - -#endif /* !ONLY_MSPACES */ - -/* ------------ Internal support for realloc, memalign, etc -------------- */ - -/* Try to realloc; only in-place unless can_move true */ -static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, - int can_move) { - mchunkptr newp = 0; - size_t oldsize = chunksize(p); - mchunkptr next = chunk_plus_offset(p, oldsize); - if (RTCHECK(ok_address(m, p) && ok_inuse(p) && - ok_next(p, next) && ok_pinuse(next))) { - if (is_mmapped(p)) { - newp = mmap_resize(m, p, nb, can_move); - } - else if (oldsize >= nb) { /* already big enough */ - size_t rsize = oldsize - nb; - if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ - mchunkptr r = chunk_plus_offset(p, nb); - set_inuse(m, p, nb); - set_inuse(m, r, rsize); - dispose_chunk(m, r, rsize); - } - newp = p; - } - else if (next == m->top) { /* extend into top */ - if (oldsize + m->topsize > nb) { - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; - mchunkptr newtop = chunk_plus_offset(p, nb); - set_inuse(m, p, nb); - newtop->head = newtopsize |PINUSE_BIT; - m->top = newtop; - m->topsize = newtopsize; - newp = p; - } - } - else if (next == m->dv) { /* extend into dv */ - size_t dvs = m->dvsize; - if (oldsize + dvs >= nb) { - size_t dsize = oldsize + dvs - nb; - if (dsize >= MIN_CHUNK_SIZE) { - mchunkptr r = chunk_plus_offset(p, nb); - mchunkptr n = chunk_plus_offset(r, dsize); - set_inuse(m, p, nb); - set_size_and_pinuse_of_free_chunk(r, dsize); - clear_pinuse(n); - m->dvsize = dsize; - m->dv = r; - } - else { /* exhaust dv */ - size_t newsize = oldsize + dvs; - set_inuse(m, p, newsize); - m->dvsize = 0; - m->dv = 0; - } - newp = p; - } - } - else if (!cinuse(next)) { /* extend into next free chunk */ - size_t nextsize = chunksize(next); - if (oldsize + nextsize >= nb) { - size_t rsize = oldsize + nextsize - nb; - unlink_chunk(m, next, nextsize); - if (rsize < MIN_CHUNK_SIZE) { - size_t newsize = oldsize + nextsize; - set_inuse(m, p, newsize); - } - else { - mchunkptr r = chunk_plus_offset(p, nb); - set_inuse(m, p, nb); - set_inuse(m, r, rsize); - dispose_chunk(m, r, rsize); - } - newp = p; - } - } - } - else { - USAGE_ERROR_ACTION(m, chunk2mem(p)); - } - return newp; -} - -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { - void* mem = 0; - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ - alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ - size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) a <<= 1; - alignment = a; - } - if (bytes >= MAX_REQUEST - alignment) { - if (m != 0) { /* Test isn't needed but avoids compiler warning */ - MALLOC_FAILURE_ACTION; - } - } - else { - size_t nb = request2size(bytes); - size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; - mem = internal_malloc(m, req); - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (PREACTION(m)) - return 0; - if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ - /* - Find an aligned spot inside chunk. Since we need to give - back leading space in a chunk of at least MIN_CHUNK_SIZE, if - the first calculation places us at a spot with less than - MIN_CHUNK_SIZE leader, we can move to the next aligned spot. - We've allocated enough total room so that this is always - possible. - */ - char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - - SIZE_T_ONE)) & - -alignment)); - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? - br : br+alignment; - mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char*)(p); - size_t newsize = chunksize(p) - leadsize; - - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ - newp->prev_foot = p->prev_foot + leadsize; - newp->head = newsize; - } - else { /* Otherwise, give back leader, use the rest */ - set_inuse(m, newp, newsize); - set_inuse(m, p, leadsize); - dispose_chunk(m, p, leadsize); - } - p = newp; - } - - /* Give back spare room at the end */ - if (!is_mmapped(p)) { - size_t size = chunksize(p); - if (size > nb + MIN_CHUNK_SIZE) { - size_t remainder_size = size - nb; - mchunkptr remainder = chunk_plus_offset(p, nb); - set_inuse(m, p, nb); - set_inuse(m, remainder, remainder_size); - dispose_chunk(m, remainder, remainder_size); - } - } - - mem = chunk2mem(p); - assert (chunksize(p) >= nb); - assert(((size_t)mem & (alignment - 1)) == 0); - check_inuse_chunk(m, p); - POSTACTION(m); - } - } - return mem; -} - -/* - Common support for independent_X routines, handling - all of the combinations that can result. - The opts arg has: - bit 0 set if all elements are same size (using sizes[0]) - bit 1 set if elements should be zeroed -*/ -static void** ialloc(mstate m, - size_t n_elements, - size_t* sizes, - int opts, - void* chunks[]) { - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ - size_t size; - size_t i; - - ensure_initialization(); - /* compute array length, if needed */ - if (chunks != 0) { - if (n_elements == 0) - return chunks; /* nothing to do */ - marray = chunks; - array_size = 0; - } - else { - /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) - return (void**)internal_malloc(m, 0); - marray = 0; - array_size = request2size(n_elements * (sizeof(void*))); - } - - /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ - element_size = request2size(*sizes); - contents_size = n_elements * element_size; - } - else { /* add up all the sizes */ - element_size = 0; - contents_size = 0; - for (i = 0; i != n_elements; ++i) - contents_size += request2size(sizes[i]); - } - - size = contents_size + array_size; - - /* - Allocate the aggregate chunk. First disable direct-mmapping so - malloc won't use it, since we would not be able to later - free/realloc space internal to a segregated mmap region. - */ - was_enabled = use_mmap(m); - disable_mmap(m); - mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) - enable_mmap(m); - if (mem == 0) - return 0; - - if (PREACTION(m)) return 0; - p = mem2chunk(mem); - remainder_size = chunksize(p); - - assert(!is_mmapped(p)); - - if (opts & 0x2) { /* optionally clear the elements */ - memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); - } - - /* If not provided, allocate the pointer array as final part of chunk */ - if (marray == 0) { - size_t array_chunk_size; - array_chunk = chunk_plus_offset(p, contents_size); - array_chunk_size = remainder_size - contents_size; - marray = (void**) (chunk2mem(array_chunk)); - set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); - remainder_size = contents_size; - } - - /* split out elements */ - for (i = 0; ; ++i) { - marray[i] = chunk2mem(p); - if (i != n_elements-1) { - if (element_size != 0) - size = element_size; - else - size = request2size(sizes[i]); - remainder_size -= size; - set_size_and_pinuse_of_inuse_chunk(m, p, size); - p = chunk_plus_offset(p, size); - } - else { /* the final element absorbs any overallocation slop */ - set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); - break; - } - } - -#if DEBUG - if (marray != chunks) { - /* final element must have exactly exhausted chunk */ - if (element_size != 0) { - assert(remainder_size == element_size); - } - else { - assert(remainder_size == request2size(sizes[i])); - } - check_inuse_chunk(m, mem2chunk(marray)); - } - for (i = 0; i != n_elements; ++i) - check_inuse_chunk(m, mem2chunk(marray[i])); - -#endif /* DEBUG */ - - POSTACTION(m); - return marray; -} - -/* Try to free all pointers in the given array. - Note: this could be made faster, by delaying consolidation, - at the price of disabling some user integrity checks, We - still optimize some consolidations by combining adjacent - chunks before freeing, which will occur often if allocated - with ialloc or the array is sorted. -*/ -static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { - size_t unfreed = 0; - if (!PREACTION(m)) { - void** a; - void** fence = &(array[nelem]); - for (a = array; a != fence; ++a) { - void* mem = *a; - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - size_t psize = chunksize(p); -#if FOOTERS - if (get_mstate_for(p) != m) { - ++unfreed; - continue; - } -#endif - check_inuse_chunk(m, p); - *a = 0; - if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { - void ** b = a + 1; /* try to merge with next chunk */ - mchunkptr next = next_chunk(p); - if (b != fence && *b == chunk2mem(next)) { - size_t newsize = chunksize(next) + psize; - set_inuse(m, p, newsize); - *b = chunk2mem(p); - } - else - dispose_chunk(m, p, psize); - } - else { - CORRUPTION_ERROR_ACTION(m); - break; - } - } - } - if (should_trim(m, m->topsize)) - sys_trim(m, 0); - POSTACTION(m); - } - return unfreed; -} - -/* Traversal */ -#if MALLOC_INSPECT_ALL -static void internal_inspect_all(mstate m, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { - if (is_initialized(m)) { - mchunkptr top = m->top; - msegmentptr s; - for (s = &m->seg; s != 0; s = s->next) { - mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { - mchunkptr next = next_chunk(q); - size_t sz = chunksize(q); - size_t used; - void* start; - if (is_inuse(q)) { - used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ - start = chunk2mem(q); - } - else { - used = 0; - if (is_small(sz)) { /* offset by possible bookkeeping */ - start = (void*)((char*)q + sizeof(struct malloc_chunk)); - } - else { - start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); - } - } - if (start < (void*)next) /* skip if all space is bookkeeping */ - handler(start, next, used, arg); - if (q == top) - break; - q = next; - } - } - } -} -#endif /* MALLOC_INSPECT_ALL */ - -/* ------------------ Exported realloc, memalign, etc -------------------- */ - -#if !ONLY_MSPACES - -void* dlrealloc(void* oldmem, size_t bytes) { - void* mem = 0; - if (oldmem == 0) { - mem = dlmalloc(bytes); - } - else if (bytes >= MAX_REQUEST) { - MALLOC_FAILURE_ACTION; - } -#ifdef REALLOC_ZERO_BYTES_FREES - else if (bytes == 0) { - dlfree(oldmem); - } -#endif /* REALLOC_ZERO_BYTES_FREES */ - else { - size_t nb = request2size(bytes); - mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS - mstate m = gm; -#else /* FOOTERS */ - mstate m = get_mstate_for(oldp); - if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); - return 0; - } -#endif /* FOOTERS */ - if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); - POSTACTION(m); - if (newp != 0) { - check_inuse_chunk(m, newp); - mem = chunk2mem(newp); - } - else { - mem = internal_malloc(m, bytes); - if (mem != 0) { - size_t oc = chunksize(oldp) - overhead_for(oldp); - memcpy(mem, oldmem, (oc < bytes)? oc : bytes); - internal_free(m, oldmem); - } - } - } - } - return mem; -} - -void* dlrealloc_in_place(void* oldmem, size_t bytes) { - void* mem = 0; - if (oldmem != 0) { - if (bytes >= MAX_REQUEST) { - MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); - mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS - mstate m = gm; -#else /* FOOTERS */ - mstate m = get_mstate_for(oldp); - if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); - return 0; - } -#endif /* FOOTERS */ - if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); - POSTACTION(m); - if (newp == oldp) { - check_inuse_chunk(m, newp); - mem = oldmem; - } - } - } - } - return mem; -} - -void* dlmemalign(size_t alignment, size_t bytes) { - if (alignment <= MALLOC_ALIGNMENT) { - return dlmalloc(bytes); - } - return internal_memalign(gm, alignment, bytes); -} - -int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { - void* mem = 0; - if (alignment == MALLOC_ALIGNMENT) - mem = dlmalloc(bytes); - else { - size_t d = alignment / sizeof(void*); - size_t r = alignment % sizeof(void*); - if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) - return EINVAL; - else if (bytes <= MAX_REQUEST - alignment) { - if (alignment < MIN_CHUNK_SIZE) - alignment = MIN_CHUNK_SIZE; - mem = internal_memalign(gm, alignment, bytes); - } - } - if (mem == 0) - return ENOMEM; - else { - *pp = mem; - return 0; - } -} - -void* dlvalloc(size_t bytes) { - size_t pagesz; - ensure_initialization(); - pagesz = mparams.page_size; - return dlmemalign(pagesz, bytes); -} - -void* dlpvalloc(size_t bytes) { - size_t pagesz; - ensure_initialization(); - pagesz = mparams.page_size; - return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); -} - -void** dlindependent_calloc(size_t n_elements, size_t elem_size, - void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ - return ialloc(gm, n_elements, &sz, 3, chunks); -} - -void** dlindependent_comalloc(size_t n_elements, size_t sizes[], - void* chunks[]) { - return ialloc(gm, n_elements, sizes, 0, chunks); -} - -size_t dlbulk_free(void* array[], size_t nelem) { - return internal_bulk_free(gm, array, nelem); -} - -#if MALLOC_INSPECT_ALL -void dlmalloc_inspect_all(void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { - ensure_initialization(); - if (!PREACTION(gm)) { - internal_inspect_all(gm, handler, arg); - POSTACTION(gm); - } -} -#endif /* MALLOC_INSPECT_ALL */ - -int dlmalloc_trim(size_t pad) { - int result = 0; - ensure_initialization(); - if (!PREACTION(gm)) { - result = sys_trim(gm, pad); - POSTACTION(gm); - } - return result; -} - -size_t dlmalloc_footprint(void) { - return gm->footprint; -} - -size_t dlmalloc_max_footprint(void) { - return gm->max_footprint; -} - -size_t dlmalloc_footprint_limit(void) { - size_t maf = gm->footprint_limit; - return maf == 0 ? MAX_SIZE_T : maf; -} - -size_t dlmalloc_set_footprint_limit(size_t bytes) { - ensure_initialization(); - size_t result; /* invert sense of 0 */ - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ - if (bytes == MAX_SIZE_T) - result = 0; /* disable */ - else - result = granularity_align(bytes); - return gm->footprint_limit = result; -} - -#if !NO_MALLINFO -struct mallinfo dlmallinfo(void) { - return internal_mallinfo(gm); -} -#endif /* NO_MALLINFO */ - -#if !NO_MALLOC_STATS -void dlmalloc_stats() { - internal_malloc_stats(gm); -} -#endif /* NO_MALLOC_STATS */ - -int dlmallopt(int param_number, int value) { - return change_mparam(param_number, value); -} - -size_t dlmalloc_usable_size(void* mem) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); - } - return 0; -} - -#endif /* !ONLY_MSPACES */ - -/* ----------------------------- user mspaces ---------------------------- */ - -#if MSPACES - -static mstate init_user_mstate(char* tbase, size_t tsize) { - size_t msize = pad_request(sizeof(struct malloc_state)); - mchunkptr mn; - mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); - memset(m, 0, msize); - (void)INITIAL_LOCK(&m->mutex); - msp->head = (msize|INUSE_BITS); - m->seg.base = m->least_addr = tbase; - m->seg.size = m->footprint = m->max_footprint = tsize; - m->magic = mparams.magic; - m->release_checks = MAX_RELEASE_CHECK_RATE; - m->mflags = mparams.default_mflags; - m->extp = 0; - m->exts = 0; - disable_contiguous(m); - init_bins(m); - mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); - check_top_chunk(m, m->top); - return m; -} - -mspace create_mspace(size_t capacity, int locked) { - mstate m = 0; - size_t msize; - ensure_initialization(); - msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - size_t rs = ((capacity == 0)? mparams.granularity : - (capacity + TOP_FOOT_SIZE + msize)); - size_t tsize = granularity_align(rs); - char* tbase = (char*)(CALL_MMAP(tsize)); - if (tbase != CMFAIL) { - m = init_user_mstate(tbase, tsize); - m->seg.sflags = USE_MMAP_BIT; - set_lock(m, locked); - } - } - return (mspace)m; -} - -mspace create_mspace_with_base(void* base, size_t capacity, int locked) { - mstate m = 0; - size_t msize; - ensure_initialization(); - msize = pad_request(sizeof(struct malloc_state)); - if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - m = init_user_mstate((char*)base, capacity); - m->seg.sflags = EXTERN_BIT; - set_lock(m, locked); - } - return (mspace)m; -} - -int mspace_track_large_chunks(mspace msp, int enable) { - int ret = 0; - mstate ms = (mstate)msp; - if (!PREACTION(ms)) { - if (!use_mmap(ms)) { - ret = 1; - } - if (!enable) { - enable_mmap(ms); - } else { - disable_mmap(ms); - } - POSTACTION(ms); - } - return ret; -} - -size_t destroy_mspace(mspace msp) { - size_t freed = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - msegmentptr sp = &ms->seg; - (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ - while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; - flag_t flag = sp->sflags; - (void)base; /* placate people compiling -Wunused-variable */ - sp = sp->next; - if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && - CALL_MUNMAP(base, size) == 0) - freed += size; - } - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return freed; -} - -/* - mspace versions of routines are near-clones of the global - versions. This is not so nice but better than the alternatives. -*/ - -void* mspace_malloc(mspace msp, size_t bytes) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - if (!PREACTION(ms)) { - void* mem; - size_t nb; - if (bytes <= MAX_SMALL_REQUEST) { - bindex_t idx; - binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); - idx = small_index(nb); - smallbits = ms->smallmap >> idx; - - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ - mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ - b = smallbin_at(ms, idx); - p = b->fd; - assert(chunksize(p) == small_index2size(idx)); - unlink_first_small_chunk(ms, b, p, idx); - set_inuse_and_pinuse(ms, p, small_index2size(idx)); - mem = chunk2mem(p); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ - mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); - compute_bit2idx(leastbit, i); - b = smallbin_at(ms, i); - p = b->fd; - assert(chunksize(p) == small_index2size(i)); - unlink_first_small_chunk(ms, b, p, i); - rsize = small_index2size(i) - nb; - /* Fit here cannot be remainderless if 4byte sizes */ - if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(ms, p, small_index2size(i)); - else { - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - r = chunk_plus_offset(p, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(ms, r, rsize); - } - mem = chunk2mem(p); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - } - } - else if (bytes >= MAX_REQUEST) - nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ - else { - nb = pad_request(bytes); - if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - } - - if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; - mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ - mchunkptr r = ms->dv = chunk_plus_offset(p, nb); - ms->dvsize = rsize; - set_size_and_pinuse_of_free_chunk(r, rsize); - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } - else { /* exhaust dv */ - size_t dvs = ms->dvsize; - ms->dvsize = 0; - ms->dv = 0; - set_inuse_and_pinuse(ms, p, dvs); - } - mem = chunk2mem(p); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; - mchunkptr p = ms->top; - mchunkptr r = ms->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - mem = chunk2mem(p); - check_top_chunk(ms, ms->top); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - mem = sys_alloc(ms, nb); - - postaction: - POSTACTION(ms); - return mem; - } - - return 0; -} - -void mspace_free(mspace msp, void* mem) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS - mstate fm = get_mstate_for(p); - (void)msp; /* placate people compiling -Wunused */ -#else /* FOOTERS */ - mstate fm = (mstate)msp; -#endif /* FOOTERS */ - if (!ok_magic(fm)) { - USAGE_ERROR_ACTION(fm, p); - return; - } - if (!PREACTION(fm)) { - check_inuse_chunk(fm, p); - if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); - mchunkptr next = chunk_plus_offset(p, psize); - if (!pinuse(p)) { - size_t prevsize = p->prev_foot; - if (is_mmapped(p)) { - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - fm->footprint -= psize; - goto postaction; - } - else { - mchunkptr prev = chunk_minus_offset(p, prevsize); - psize += prevsize; - p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ - if (p != fm->dv) { - unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { - fm->dvsize = psize; - set_free_with_pinuse(p, psize, next); - goto postaction; - } - } - else - goto erroraction; - } - } - - if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ - if (next == fm->top) { - size_t tsize = fm->topsize += psize; - fm->top = p; - p->head = tsize | PINUSE_BIT; - if (p == fm->dv) { - fm->dv = 0; - fm->dvsize = 0; - } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); - goto postaction; - } - else if (next == fm->dv) { - size_t dsize = fm->dvsize += psize; - fm->dv = p; - set_size_and_pinuse_of_free_chunk(p, dsize); - goto postaction; - } - else { - size_t nsize = chunksize(next); - psize += nsize; - unlink_chunk(fm, next, nsize); - set_size_and_pinuse_of_free_chunk(p, psize); - if (p == fm->dv) { - fm->dvsize = psize; - goto postaction; - } - } - } - else - set_free_with_pinuse(p, psize, next); - - if (is_small(psize)) { - insert_small_chunk(fm, p, psize); - check_free_chunk(fm, p); - } - else { - tchunkptr tp = (tchunkptr)p; - insert_large_chunk(fm, tp, psize); - check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); - } - goto postaction; - } - } - erroraction: - USAGE_ERROR_ACTION(fm, p); - postaction: - POSTACTION(fm); - } - } -} - -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - void* mem; - size_t req = 0; - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - if (n_elements != 0) { - req = n_elements * elem_size; - if (((n_elements | elem_size) & ~(size_t)0xffff) && - (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ - } - mem = internal_malloc(ms, req); - if (mem != 0 && calloc_must_clear(mem2chunk(mem))) - memset(mem, 0, req); - return mem; -} - -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; - if (oldmem == 0) { - mem = mspace_malloc(msp, bytes); - } - else if (bytes >= MAX_REQUEST) { - MALLOC_FAILURE_ACTION; - } -#ifdef REALLOC_ZERO_BYTES_FREES - else if (bytes == 0) { - mspace_free(msp, oldmem); - } -#endif /* REALLOC_ZERO_BYTES_FREES */ - else { - size_t nb = request2size(bytes); - mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS - mstate m = (mstate)msp; -#else /* FOOTERS */ - mstate m = get_mstate_for(oldp); - if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); - return 0; - } -#endif /* FOOTERS */ - if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); - POSTACTION(m); - if (newp != 0) { - check_inuse_chunk(m, newp); - mem = chunk2mem(newp); - } - else { - mem = mspace_malloc(m, bytes); - if (mem != 0) { - size_t oc = chunksize(oldp) - overhead_for(oldp); - memcpy(mem, oldmem, (oc < bytes)? oc : bytes); - mspace_free(m, oldmem); - } - } - } - } - return mem; -} - -void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; - if (oldmem != 0) { - if (bytes >= MAX_REQUEST) { - MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); - mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS - mstate m = (mstate)msp; -#else /* FOOTERS */ - mstate m = get_mstate_for(oldp); - (void)msp; /* placate people compiling -Wunused */ - if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); - return 0; - } -#endif /* FOOTERS */ - if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); - POSTACTION(m); - if (newp == oldp) { - check_inuse_chunk(m, newp); - mem = oldmem; - } - } - } - } - return mem; -} - -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - if (alignment <= MALLOC_ALIGNMENT) - return mspace_malloc(msp, bytes); - return internal_memalign(ms, alignment, bytes); -} - -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - return ialloc(ms, n_elements, &sz, 3, chunks); -} - -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - return ialloc(ms, n_elements, sizes, 0, chunks); -} - -size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { - return internal_bulk_free((mstate)msp, array, nelem); -} - -#if MALLOC_INSPECT_ALL -void mspace_inspect_all(mspace msp, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - if (!PREACTION(ms)) { - internal_inspect_all(ms, handler, arg); - POSTACTION(ms); - } - } - else { - USAGE_ERROR_ACTION(ms,ms); - } -} -#endif /* MALLOC_INSPECT_ALL */ - -int mspace_trim(mspace msp, size_t pad) { - int result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - if (!PREACTION(ms)) { - result = sys_trim(ms, pad); - POSTACTION(ms); - } - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - -#if !NO_MALLOC_STATS -void mspace_malloc_stats(mspace msp) { - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - internal_malloc_stats(ms); - } - else { - USAGE_ERROR_ACTION(ms,ms); - } -} -#endif /* NO_MALLOC_STATS */ - -size_t mspace_footprint(mspace msp) { - size_t result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - result = ms->footprint; - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - -size_t mspace_max_footprint(mspace msp) { - size_t result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - result = ms->max_footprint; - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - -size_t mspace_footprint_limit(mspace msp) { - size_t result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - size_t maf = ms->footprint_limit; - result = (maf == 0) ? MAX_SIZE_T : maf; - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - -size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { - size_t result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ - if (bytes == MAX_SIZE_T) - result = 0; /* disable */ - else - result = granularity_align(bytes); - ms->footprint_limit = result; - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - -#if !NO_MALLINFO -struct mallinfo mspace_mallinfo(mspace msp) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - } - return internal_mallinfo(ms); -} -#endif /* NO_MALLINFO */ - -size_t mspace_usable_size(const void* mem) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); - } - return 0; -} - -int mspace_mallopt(int param_number, int value) { - return change_mparam(param_number, value); -} - -#endif /* MSPACES */ - - -/* -------------------- Alternative MORECORE functions ------------------- */ - -/* - Guidelines for creating a custom version of MORECORE: - - * For best performance, MORECORE should allocate in multiples of pagesize. - * MORECORE may allocate more memory than requested. (Or even less, - but this will usually result in a malloc failure.) - * MORECORE must not allocate memory when given argument zero, but - instead return one past the end address of memory from previous - nonzero call. - * For best performance, consecutive calls to MORECORE with positive - arguments should return increasing addresses, indicating that - space has been contiguously extended. - * Even though consecutive calls to MORECORE need not return contiguous - addresses, it must be OK for malloc'ed chunks to span multiple - regions in those cases where they do happen to be contiguous. - * MORECORE need not handle negative arguments -- it may instead - just return MFAIL when given negative arguments. - Negative arguments are always multiples of pagesize. MORECORE - must not misinterpret negative args as large positive unsigned - args. You can suppress all such calls from even occurring by defining - MORECORE_CANNOT_TRIM, - - As an example alternative MORECORE, here is a custom allocator - kindly contributed for pre-OSX macOS. It uses virtually but not - necessarily physically contiguous non-paged memory (locked in, - present and won't get swapped out). You can use it by uncommenting - this section, adding some #includes, and setting up the appropriate - defines above: - - #define MORECORE osMoreCore - - There is also a shutdown routine that should somehow be called for - cleanup upon program exit. - - #define MAX_POOL_ENTRIES 100 - #define MINIMUM_MORECORE_SIZE (64 * 1024U) - static int next_os_pool; - void *our_os_pools[MAX_POOL_ENTRIES]; - - void *osMoreCore(int size) - { - void *ptr = 0; - static void *sbrk_top = 0; - - if (size > 0) - { - if (size < MINIMUM_MORECORE_SIZE) - size = MINIMUM_MORECORE_SIZE; - if (CurrentExecutionLevel() == kTaskLevel) - ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); - if (ptr == 0) - { - return (void *) MFAIL; - } - // save ptrs so they can be freed during cleanup - our_os_pools[next_os_pool] = ptr; - next_os_pool++; - ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); - sbrk_top = (char *) ptr + size; - return ptr; - } - else if (size < 0) - { - // we don't currently support shrink behavior - return (void *) MFAIL; - } - else - { - return sbrk_top; - } - } - - // cleanup any allocated memory pools - // called as last thing before shutting down driver - - void osCleanupMem(void) - { - void **ptr; - - for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) - if (*ptr) - { - PoolDeallocate(*ptr); - *ptr = 0; - } - } - -*/ - - -/* ----------------------------------------------------------------------- -History: - v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea - * fix bad comparison in dlposix_memalign - * don't reuse adjusted asize in sys_alloc - * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion - * reduce compiler warnings -- thanks to all who reported/suggested these - - v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee) - * Always perform unlink checks unless INSECURE - * Add posix_memalign. - * Improve realloc to expand in more cases; expose realloc_in_place. - Thanks to Peter Buhr for the suggestion. - * Add footprint_limit, inspect_all, bulk_free. Thanks - to Barry Hayes and others for the suggestions. - * Internal refactorings to avoid calls while holding locks - * Use non-reentrant locks by default. Thanks to Roland McGrath - for the suggestion. - * Small fixes to mspace_destroy, reset_on_error. - * Various configuration extensions/changes. Thanks - to all who contributed these. - - V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu) - * Update Creative Commons URL - - V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) - * Use zeros instead of prev foot for is_mmapped - * Add mspace_track_large_chunks; thanks to Jean Brouwers - * Fix set_inuse in internal_realloc; thanks to Jean Brouwers - * Fix insufficient sys_alloc padding when using 16byte alignment - * Fix bad error check in mspace_footprint - * Adaptations for ptmalloc; thanks to Wolfram Gloger. - * Reentrant spin locks; thanks to Earl Chew and others - * Win32 improvements; thanks to Niall Douglas and Earl Chew - * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options - * Extension hook in malloc_state - * Various small adjustments to reduce warnings on some compilers - * Various configuration extensions/changes for more platforms. Thanks - to all who contributed these. - - V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) - * Add max_footprint functions - * Ensure all appropriate literals are size_t - * Fix conditional compilation problem for some #define settings - * Avoid concatenating segments with the one provided - in create_mspace_with_base - * Rename some variables to avoid compiler shadowing warnings - * Use explicit lock initialization. - * Better handling of sbrk interference. - * Simplify and fix segment insertion, trimming and mspace_destroy - * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x - * Thanks especially to Dennis Flanagan for help on these. - - V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) - * Fix memalign brace error. - - V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) - * Fix improper #endif nesting in C++ - * Add explicit casts needed for C++ - - V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) - * Use trees for large bins - * Support mspaces - * Use segments to unify sbrk-based and mmap-based system allocation, - removing need for emulation on most platforms without sbrk. - * Default safety checks - * Optional footer checks. Thanks to William Robertson for the idea. - * Internal code refactoring - * Incorporate suggestions and platform-specific changes. - Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, - Aaron Bachmann, Emery Berger, and others. - * Speed up non-fastbin processing enough to remove fastbins. - * Remove useless cfree() to avoid conflicts with other apps. - * Remove internal memcpy, memset. Compilers handle builtins better. - * Remove some options that no one ever used and rename others. - - V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) - * Fix malloc_state bitmap array misdeclaration - - V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) - * Allow tuning of FIRST_SORTED_BIN_SIZE - * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. - * Better detection and support for non-contiguousness of MORECORE. - Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger - * Bypass most of malloc if no frees. Thanks To Emery Berger. - * Fix freeing of old top non-contiguous chunk im sysmalloc. - * Raised default trim and map thresholds to 256K. - * Fix mmap-related #defines. Thanks to Lubos Lunak. - * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. - * Branch-free bin calculation - * Default trim and mmap thresholds now 256K. - - V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) - * Introduce independent_comalloc and independent_calloc. - Thanks to Michael Pachos for motivation and help. - * Make optional .h file available - * Allow > 2GB requests on 32bit systems. - * new WIN32 sbrk, mmap, munmap, lock code from . - Thanks also to Andreas Mueller , - and Anonymous. - * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for - helping test this.) - * memalign: check alignment arg - * realloc: don't try to shift chunks backwards, since this - leads to more fragmentation in some programs and doesn't - seem to help in any others. - * Collect all cases in malloc requiring system memory into sysmalloc - * Use mmap as backup to sbrk - * Place all internal state in malloc_state - * Introduce fastbins (although similar to 2.5.1) - * Many minor tunings and cosmetic improvements - * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK - * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS - Thanks to Tony E. Bennett and others. - * Include errno.h to support default failure action. - - V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) - * return null for negative arguments - * Added Several WIN32 cleanups from Martin C. Fong - * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' - (e.g. WIN32 platforms) - * Cleanup header file inclusion for WIN32 platforms - * Cleanup code to avoid Microsoft Visual C++ compiler complaints - * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing - memory allocation routines - * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) - * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to - usage of 'assert' in non-WIN32 code - * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to - avoid infinite loop - * Always call 'fREe()' rather than 'free()' - - V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) - * Fixed ordering problem with boundary-stamping - - V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) - * Added pvalloc, as recommended by H.J. Liu - * Added 64bit pointer support mainly from Wolfram Gloger - * Added anonymously donated WIN32 sbrk emulation - * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen - * malloc_extend_top: fix mask error that caused wastage after - foreign sbrks - * Add linux mremap support code from HJ Liu - - V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) - * Integrated most documentation with the code. - * Add support for mmap, with help from - Wolfram Gloger (Gloger@lrz.uni-muenchen.de). - * Use last_remainder in more cases. - * Pack bins using idea from colin@nyx10.cs.du.edu - * Use ordered bins instead of best-fit threshhold - * Eliminate block-local decls to simplify tracing and debugging. - * Support another case of realloc via move into top - * Fix error occuring when initial sbrk_base not word-aligned. - * Rely on page size for units instead of SBRK_UNIT to - avoid surprises about sbrk alignment conventions. - * Add mallinfo, mallopt. Thanks to Raymond Nijssen - (raymond@es.ele.tue.nl) for the suggestion. - * Add `pad' argument to malloc_trim and top_pad mallopt parameter. - * More precautions for cases where other routines call sbrk, - courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). - * Added macros etc., allowing use in linux libc from - H.J. Lu (hjl@gnu.ai.mit.edu) - * Inverted this history list - - V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) - * Re-tuned and fixed to behave more nicely with V2.6.0 changes. - * Removed all preallocation code since under current scheme - the work required to undo bad preallocations exceeds - the work saved in good cases for most test programs. - * No longer use return list or unconsolidated bins since - no scheme using them consistently outperforms those that don't - given above changes. - * Use best fit for very large chunks to prevent some worst-cases. - * Added some support for debugging - - V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) - * Removed footers when chunks are in use. Thanks to - Paul Wilson (wilson@cs.texas.edu) for the suggestion. - - V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) - * Added malloc_trim, with help from Wolfram Gloger - (wmglo@Dent.MED.Uni-Muenchen.DE). - - V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) - - V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) - * realloc: try to expand in both directions - * malloc: swap order of clean-bin strategy; - * realloc: only conditionally expand backwards - * Try not to scavenge used bins - * Use bin counts as a guide to preallocation - * Occasionally bin return list chunks in first scan - * Add a few optimizations from colin@nyx10.cs.du.edu - - V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) - * faster bin computation & slightly different binning - * merged all consolidations to one part of malloc proper - (eliminating old malloc_find_space & malloc_clean_bin) - * Scan 2 returns chunks (not just 1) - * Propagate failure in realloc if malloc returns 0 - * Add stuff to allow compilation on non-ANSI compilers - from kpv@research.att.com - - V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) - * removed potential for odd address access in prev_chunk - * removed dependency on getpagesize.h - * misc cosmetics and a bit more internal documentation - * anticosmetics: mangled names in macros to evade debugger strangeness - * tested on sparc, hp-700, dec-mips, rs6000 - with gcc & native cc (hp, dec only) allowing - Detlefs & Zorn comparison study (in SIGPLAN Notices.) - - Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) - * Based loosely on libg++-1.2X malloc. (It retains some of the overall - structure of old version, but most details differ.) - -*/ diff --git a/cpp/src/plasma/thirdparty/xxhash.cc b/cpp/src/plasma/thirdparty/xxhash.cc deleted file mode 100644 index f74880b0de71d..0000000000000 --- a/cpp/src/plasma/thirdparty/xxhash.cc +++ /dev/null @@ -1,889 +0,0 @@ -/* -* xxHash - Fast Hash algorithm -* Copyright (C) 2012-2016, Yann Collet -* -* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are -* met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following disclaimer -* in the documentation and/or other materials provided with the -* distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* -* You can contact the author at : -* - xxHash homepage: http://www.xxhash.com -* - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - - -/* ************************************* -* Tuning parameters -***************************************/ -/*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif defined(__INTEL_COMPILER) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. - * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. - * By default, this option is disabled. To enable it, uncomment below define : - */ -/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ - -/*!XXH_FORCE_NATIVE_FORMAT : - * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. - * Results are therefore identical for little-endian and big-endian CPU. - * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. - * Should endian-independence be of no importance for your application, you may set the #define below to 1, - * to improve speed for Big-endian CPU. - * This option has no impact on Little_Endian CPU. - */ -#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ -# define XXH_FORCE_NATIVE_FORMAT 0 -#endif - -/*!XXH_FORCE_ALIGN_CHECK : - * This is a minor performance trick, only useful with lots of very small keys. - * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; set to 0 when the input data - * is guaranteed to be aligned. - */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - - -/* ************************************* -* Includes & Memory related functions -***************************************/ -/*! Modify the local functions below should you wish to use some other memory routines -* for malloc(), free() */ -#include -static void* XXH_malloc(size_t s) { return malloc(s); } -static void XXH_free (void* p) { free(p); } -/*! and for memcpy() */ -#include -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } - -#define XXH_STATIC_LINKING_ONLY -#include "xxhash.h" - - -/* ************************************* -* Compiler Specific Options -***************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# define FORCE_INLINE static __forceinline -#else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -# else -# define FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -#endif - - -/* ************************************* -* Basic Types -***************************************/ -#ifndef MEM_MODULE -# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; -# else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; -# endif -#endif - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; } __attribute__((packed)) unalign; -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ -static U32 XXH_read32(const void* memPtr) -{ - U32 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - - -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ -#if defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) -#endif - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -#else -static U32 XXH_swap32 (U32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -#endif - - -/* ************************************* -* Architecture Macros -***************************************/ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; - -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ -#ifndef XXH_CPU_LITTLE_ENDIAN - static const int g_one = 1; -# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) -#endif - - -/* *************************** -* Memory reads -*****************************/ -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; - -FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); - else - return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); -} - -FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE32_align(ptr, endian, XXH_unaligned); -} - -static U32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); -} - - -/* ************************************* -* Macros -***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } - - -/* ******************************************************************* -* 32-bits hash functions -*********************************************************************/ -static const U32 PRIME32_1 = 2654435761U; -static const U32 PRIME32_2 = 2246822519U; -static const U32 PRIME32_3 = 3266489917U; -static const U32 PRIME32_4 = 668265263U; -static const U32 PRIME32_5 = 374761393U; - -static U32 XXH32_round(U32 seed, U32 input) -{ - seed += input * PRIME32_2; - seed = XXH_rotl32(seed, 13); - seed *= PRIME32_1; - return seed; -} - -FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U32 h32; -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)16; - } -#endif - - if (len>=16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = seed + PRIME32_1 + PRIME32_2; - U32 v2 = seed + PRIME32_2; - U32 v3 = seed + 0; - U32 v4 = seed - PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; - v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; - v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; - v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; - } while (p<=limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + PRIME32_5; - } - - h32 += (U32) len; - - while (p+4<=bEnd) { - h32 += XXH_get32bits(p) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - p+=4; - } - - while (p> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; -} - - -XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_state_t state; - XXH32_reset(&state, seed); - XXH32_update(&state, input, len); - return XXH32_digest(&state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - - - -/*====== Hash streaming ======*/ - -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) -{ - XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ - state.v1 = seed + PRIME32_1 + PRIME32_2; - state.v2 = seed + PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME32_1; - memcpy(statePtr, &state, sizeof(state)); - return XXH_OK; -} - - -FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; -#endif - - state->total_len_32 += (unsigned)len; - state->large_len |= (len>=16) | (state->total_len_32>=16); - - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); - state->memsize += (unsigned)len; - return XXH_OK; - } - - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const U32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; - } - p += 16-state->memsize; - state->memsize = 0; - } - - if (p <= bEnd-16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do { - v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH32_update_endian(state_in, input, len, XXH_bigEndian); -} - - - -FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) -{ - const BYTE * p = (const BYTE*)state->mem32; - const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; - U32 h32; - - if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); - } else { - h32 = state->v3 /* == seed */ + PRIME32_5; - } - - h32 += state->total_len_32; - - while (p+4<=bEnd) { - h32 += XXH_readLE32(p, endian) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4; - p+=4; - } - - while (p> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; -} - - -XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_digest_endian(state_in, XXH_littleEndian); - else - return XXH32_digest_endian(state_in, XXH_bigEndian); -} - - -/*====== Canonical representation ======*/ - -/*! Default XXH result types are basic unsigned 32 and 64 bits. -* The canonical representation follows human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. -*/ - -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); -} - - -#ifndef XXH_NO_LONG_LONG - -/* ******************************************************************* -* 64-bits hash functions -*********************************************************************/ - -/*====== Memory access ======*/ - -#ifndef MEM_MODULE -# define MEM_MODULE -# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t U64; -# else - typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ -# endif -#endif - - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; -static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ - -static U64 XXH_read64(const void* memPtr) -{ - U64 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap64 _byteswap_uint64 -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap64 __builtin_bswap64 -#else -static U64 XXH_swap64 (U64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); -} -#endif - -FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); - else - return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); -} - -FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE64_align(ptr, endian, XXH_unaligned); -} - -static U64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); -} - - -/*====== xxh64 ======*/ - -static const U64 PRIME64_1 = 11400714785074694791ULL; -static const U64 PRIME64_2 = 14029467366897019727ULL; -static const U64 PRIME64_3 = 1609587929392839161ULL; -static const U64 PRIME64_4 = 9650029242287828579ULL; -static const U64 PRIME64_5 = 2870177450012600261ULL; - -static U64 XXH64_round(U64 acc, U64 input) -{ - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; -} - -static U64 XXH64_mergeRound(U64 acc, U64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; -} - -FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U64 h64; -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)32; - } -#endif - - if (len>=32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; - v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; - v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; - v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; - } while (p<=limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64) len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - - -XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_state_t state; - XXH64_reset(&state, seed); - XXH64_update(&state, input, len); - return XXH64_digest(&state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - -/*====== Hash Streaming ======*/ - -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) -{ - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - memcpy(statePtr, &state, sizeof(state)); - return XXH_OK; -} - -FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; -#endif - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; - } - - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} - -FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) -{ - const BYTE * p = (const BYTE*)state->mem64; - const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 + PRIME64_5; - } - - h64 += (U64) state->total_len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - -XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); -} - - -/*====== Canonical representation ======*/ - -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); -} - -#endif /* XXH_NO_LONG_LONG */ diff --git a/cpp/src/plasma/thirdparty/xxhash.h b/cpp/src/plasma/thirdparty/xxhash.h deleted file mode 100644 index 9d831e03b35f6..0000000000000 --- a/cpp/src/plasma/thirdparty/xxhash.h +++ /dev/null @@ -1,293 +0,0 @@ -/* - xxHash - Extremely Fast Hash algorithm - Header File - Copyright (C) 2012-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - -/* Notice extracted from xxHash homepage : - -xxHash is an extremely fast Hash algorithm, running at RAM speed limits. -It also successfully passes all tests from the SMHasher suite. - -Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) - -Name Speed Q.Score Author -xxHash 5.4 GB/s 10 -CrapWow 3.2 GB/s 2 Andrew -MumurHash 3a 2.7 GB/s 10 Austin Appleby -SpookyHash 2.0 GB/s 10 Bob Jenkins -SBox 1.4 GB/s 9 Bret Mulvey -Lookup3 1.2 GB/s 9 Bob Jenkins -SuperFastHash 1.2 GB/s 1 Paul Hsieh -CityHash64 1.05 GB/s 10 Pike & Alakuijala -FNV 0.55 GB/s 5 Fowler, Noll, Vo -CRC32 0.43 GB/s 9 -MD5-32 0.33 GB/s 10 Ronald L. Rivest -SHA1-32 0.28 GB/s 10 - -Q.Score is a measure of quality of the hash function. -It depends on successfully passing SMHasher test set. -10 is a perfect score. - -A 64-bits version, named XXH64, is available since r35. -It offers much better speed, but for 64-bits applications only. -Name Speed on 64 bits Speed on 32 bits -XXH64 13.8 GB/s 1.9 GB/s -XXH32 6.8 GB/s 6.0 GB/s -*/ - -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - -#if defined (__cplusplus) -extern "C" { -#endif - - -/* **************************** -* Definitions -******************************/ -#include /* size_t */ -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; - - -/* **************************** -* API modifier -******************************/ -/** XXH_PRIVATE_API -* This is useful to include xxhash functions in `static` mode -* in order to inline them, and remove their symbol from the public list. -* Methodology : -* #define XXH_PRIVATE_API -* #include "xxhash.h" -* `xxhash.c` is automatically included. -* It's not useful to compile and link it as a separate module. -*/ -#ifdef XXH_PRIVATE_API -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif -# if defined(__GNUC__) -# define XXH_PUBLIC_API static __inline __attribute__((unused)) -# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define XXH_PUBLIC_API static inline -# elif defined(_MSC_VER) -# define XXH_PUBLIC_API static __inline -# else -# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ -# endif -#else -# define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_PRIVATE_API */ - -/*!XXH_NAMESPACE, aka Namespace Emulation : - -If you want to include _and expose_ xxHash functions from within your own library, -but also want to avoid symbol collisions with other libraries which may also include xxHash, - -you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library -with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). - -Note that no change is required within the calling program as long as it includes `xxhash.h` : -regular symbol name will be automatically translated by this header. -*/ -#ifdef XXH_NAMESPACE -# define XXH_CAT(A,B) A##B -# define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) -# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) -#endif - - -/* ************************************* -* Version -***************************************/ -#define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 6 -#define XXH_VERSION_RELEASE 2 -#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) -XXH_PUBLIC_API unsigned XXH_versionNumber (void); - - -/*-********************************************************************** -* 32-bits hash -************************************************************************/ -typedef unsigned int XXH32_hash_t; - -/*! XXH32() : - Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". - The memory between input & input+length must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); - -/*====== Streaming ======*/ -typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); - -/* -These functions generate the xxHash of an input provided in multiple segments. -Note that, for small input, they are slower than single-call functions, due to state management. -For small input, prefer `XXH32()` and `XXH64()` . - -XXH state must first be allocated, using XXH*_createState() . - -Start a new hash by initializing state with a seed, using XXH*_reset(). - -Then, feed the hash state by calling XXH*_update() as many times as necessary. -Obviously, input must be allocated and read accessible. -The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. - -Finally, a hash value can be produced anytime, by using XXH*_digest(). -This function returns the nn-bits hash as an int or long long. - -It's still possible to continue inserting input into the hash state after a digest, -and generate some new hashes later on, by calling again XXH*_digest(). - -When done, free XXH state space if it was allocated dynamically. -*/ - -/*====== Canonical representation ======*/ - -typedef struct { unsigned char digest[4]; } XXH32_canonical_t; -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); - -/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. -* The canonical representation uses human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. -*/ - - -#ifndef XXH_NO_LONG_LONG -/*-********************************************************************** -* 64-bits hash -************************************************************************/ -typedef unsigned long long XXH64_hash_t; - -/*! XXH64() : - Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". - "seed" can be used to alter the result predictably. - This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark). -*/ -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); - -/*====== Streaming ======*/ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); - -/*====== Canonical representation ======*/ -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); -#endif /* XXH_NO_LONG_LONG */ - - -#ifdef XXH_STATIC_LINKING_ONLY - -/* ================================================================================================ - This section contains definitions which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - They shall only be used with static linking. - Never use these definitions in association with dynamic linking ! -=================================================================================================== */ - -/* These definitions are only meant to make possible - static allocation of XXH state, on stack or in a struct for example. - Never use members directly. */ - -struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; /* buffer defined as U32 for alignment */ - unsigned memsize; - unsigned reserved; /* never read nor write, will be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ - -#ifndef XXH_NO_LONG_LONG /* remove 64-bits support */ -struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ - unsigned memsize; - unsigned reserved[2]; /* never read nor write, will be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ -#endif - -#ifdef XXH_PRIVATE_API -# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ -#endif - -#endif /* XXH_STATIC_LINKING_ONLY */ - - -#if defined (__cplusplus) -} -#endif - -#endif /* XXHASH_H_5627135585666179 */ From e9f76e125b836d0fdc0a533e2fee3fca8bf4c1a1 Mon Sep 17 00:00:00 2001 From: Wes McKinney Date: Thu, 20 Jul 2017 13:02:41 -0400 Subject: [PATCH 02/16] [maven-release-plugin] prepare release apache-arrow-0.5.0 Change-Id: Icb6a8ac2d92a38190fbc89d88a0a97120646f843 --- java/format/pom.xml | 2 +- java/memory/pom.xml | 2 +- java/pom.xml | 4 ++-- java/tools/pom.xml | 2 +- java/vector/pom.xml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/java/format/pom.xml b/java/format/pom.xml index e09275476d175..6b962e2b5ca40 100644 --- a/java/format/pom.xml +++ b/java/format/pom.xml @@ -15,7 +15,7 @@ arrow-java-root org.apache.arrow - 0.5.0-SNAPSHOT + 0.5.0 arrow-format diff --git a/java/memory/pom.xml b/java/memory/pom.xml index dc4d0daf93d92..e204cd66fd04d 100644 --- a/java/memory/pom.xml +++ b/java/memory/pom.xml @@ -14,7 +14,7 @@ org.apache.arrow arrow-java-root - 0.5.0-SNAPSHOT + 0.5.0 arrow-memory Arrow Memory diff --git a/java/pom.xml b/java/pom.xml index 1ec3d561121c3..7eaf27db813dc 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -20,7 +20,7 @@ org.apache.arrow arrow-java-root - 0.5.0-SNAPSHOT + 0.5.0 pom Apache Arrow Java Root POM @@ -42,7 +42,7 @@ scm:git:https://git-wip-us.apache.org/repos/asf/arrow.git scm:git:https://git-wip-us.apache.org/repos/asf/arrow.git https://github.com/apache/arrow - HEAD + apache-arrow-0.5.0 diff --git a/java/tools/pom.xml b/java/tools/pom.xml index 53331f2f24541..6d28c9f556c7e 100644 --- a/java/tools/pom.xml +++ b/java/tools/pom.xml @@ -14,7 +14,7 @@ org.apache.arrow arrow-java-root - 0.5.0-SNAPSHOT + 0.5.0 arrow-tools Arrow Tools diff --git a/java/vector/pom.xml b/java/vector/pom.xml index a117a2fb3b7cb..c484935b4c3f2 100644 --- a/java/vector/pom.xml +++ b/java/vector/pom.xml @@ -14,7 +14,7 @@ org.apache.arrow arrow-java-root - 0.5.0-SNAPSHOT + 0.5.0 arrow-vector Arrow Vectors From 9b26ed84e144a2c251a8e163d88302a5bdaf7dfd Mon Sep 17 00:00:00 2001 From: Wes McKinney Date: Thu, 20 Jul 2017 13:02:51 -0400 Subject: [PATCH 03/16] [maven-release-plugin] prepare for next development iteration Change-Id: I8fff2322e3b3feb6e80cfc2be1d5743e9ec9bbb7 --- java/format/pom.xml | 2 +- java/memory/pom.xml | 2 +- java/pom.xml | 4 ++-- java/tools/pom.xml | 2 +- java/vector/pom.xml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/java/format/pom.xml b/java/format/pom.xml index 6b962e2b5ca40..af8ff609dbc9c 100644 --- a/java/format/pom.xml +++ b/java/format/pom.xml @@ -15,7 +15,7 @@ arrow-java-root org.apache.arrow - 0.5.0 + 0.6.0-SNAPSHOT arrow-format diff --git a/java/memory/pom.xml b/java/memory/pom.xml index e204cd66fd04d..9a8d2d7c9b56f 100644 --- a/java/memory/pom.xml +++ b/java/memory/pom.xml @@ -14,7 +14,7 @@ org.apache.arrow arrow-java-root - 0.5.0 + 0.6.0-SNAPSHOT arrow-memory Arrow Memory diff --git a/java/pom.xml b/java/pom.xml index 7eaf27db813dc..2613a44104576 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -20,7 +20,7 @@ org.apache.arrow arrow-java-root - 0.5.0 + 0.6.0-SNAPSHOT pom Apache Arrow Java Root POM @@ -42,7 +42,7 @@ scm:git:https://git-wip-us.apache.org/repos/asf/arrow.git scm:git:https://git-wip-us.apache.org/repos/asf/arrow.git https://github.com/apache/arrow - apache-arrow-0.5.0 + HEAD diff --git a/java/tools/pom.xml b/java/tools/pom.xml index 6d28c9f556c7e..9d067ef1e9bc2 100644 --- a/java/tools/pom.xml +++ b/java/tools/pom.xml @@ -14,7 +14,7 @@ org.apache.arrow arrow-java-root - 0.5.0 + 0.6.0-SNAPSHOT arrow-tools Arrow Tools diff --git a/java/vector/pom.xml b/java/vector/pom.xml index c484935b4c3f2..e15ab9a2497fc 100644 --- a/java/vector/pom.xml +++ b/java/vector/pom.xml @@ -14,7 +14,7 @@ org.apache.arrow arrow-java-root - 0.5.0 + 0.6.0-SNAPSHOT arrow-vector Arrow Vectors From 2c8101515e2a0ab515c03f82dc84b02ca6c466da Mon Sep 17 00:00:00 2001 From: Wes McKinney Date: Sun, 23 Jul 2017 14:37:54 -0400 Subject: [PATCH 04/16] [C++] Restore Plasma source tree after 0.5.0 release This reverts commit 62ef2cd8a39fc93e7fa4bb790d7cd92adb77571f. --- cpp/src/plasma/CMakeLists.txt | 113 + cpp/src/plasma/client.cc | 557 ++ cpp/src/plasma/client.h | 343 ++ cpp/src/plasma/common.cc | 83 + cpp/src/plasma/common.h | 63 + cpp/src/plasma/events.cc | 81 + cpp/src/plasma/events.h | 99 + cpp/src/plasma/eviction_policy.cc | 107 + cpp/src/plasma/eviction_policy.h | 134 + cpp/src/plasma/extension.cc | 456 ++ cpp/src/plasma/extension.h | 50 + cpp/src/plasma/fling.cc | 90 + cpp/src/plasma/fling.h | 52 + cpp/src/plasma/format/.gitignore | 1 + cpp/src/plasma/format/common.fbs | 34 + cpp/src/plasma/format/plasma.fbs | 291 + cpp/src/plasma/io.cc | 212 + cpp/src/plasma/io.h | 55 + cpp/src/plasma/malloc.cc | 178 + cpp/src/plasma/malloc.h | 26 + cpp/src/plasma/plasma.cc | 64 + cpp/src/plasma/plasma.h | 191 + cpp/src/plasma/protocol.cc | 502 ++ cpp/src/plasma/protocol.h | 170 + cpp/src/plasma/store.cc | 683 +++ cpp/src/plasma/store.h | 169 + cpp/src/plasma/test/client_tests.cc | 132 + cpp/src/plasma/test/run_tests.sh | 61 + cpp/src/plasma/test/run_valgrind.sh | 27 + cpp/src/plasma/test/serialization_tests.cc | 388 ++ cpp/src/plasma/thirdparty/ae/ae.c | 465 ++ cpp/src/plasma/thirdparty/ae/ae.h | 123 + cpp/src/plasma/thirdparty/ae/ae_epoll.c | 135 + cpp/src/plasma/thirdparty/ae/ae_evport.c | 320 + cpp/src/plasma/thirdparty/ae/ae_kqueue.c | 138 + cpp/src/plasma/thirdparty/ae/ae_select.c | 106 + cpp/src/plasma/thirdparty/ae/config.h | 54 + cpp/src/plasma/thirdparty/ae/zmalloc.h | 45 + cpp/src/plasma/thirdparty/dlmalloc.c | 6281 ++++++++++++++++++++ cpp/src/plasma/thirdparty/xxhash.cc | 889 +++ cpp/src/plasma/thirdparty/xxhash.h | 293 + 41 files changed, 14261 insertions(+) create mode 100644 cpp/src/plasma/CMakeLists.txt create mode 100644 cpp/src/plasma/client.cc create mode 100644 cpp/src/plasma/client.h create mode 100644 cpp/src/plasma/common.cc create mode 100644 cpp/src/plasma/common.h create mode 100644 cpp/src/plasma/events.cc create mode 100644 cpp/src/plasma/events.h create mode 100644 cpp/src/plasma/eviction_policy.cc create mode 100644 cpp/src/plasma/eviction_policy.h create mode 100644 cpp/src/plasma/extension.cc create mode 100644 cpp/src/plasma/extension.h create mode 100644 cpp/src/plasma/fling.cc create mode 100644 cpp/src/plasma/fling.h create mode 100644 cpp/src/plasma/format/.gitignore create mode 100644 cpp/src/plasma/format/common.fbs create mode 100644 cpp/src/plasma/format/plasma.fbs create mode 100644 cpp/src/plasma/io.cc create mode 100644 cpp/src/plasma/io.h create mode 100644 cpp/src/plasma/malloc.cc create mode 100644 cpp/src/plasma/malloc.h create mode 100644 cpp/src/plasma/plasma.cc create mode 100644 cpp/src/plasma/plasma.h create mode 100644 cpp/src/plasma/protocol.cc create mode 100644 cpp/src/plasma/protocol.h create mode 100644 cpp/src/plasma/store.cc create mode 100644 cpp/src/plasma/store.h create mode 100644 cpp/src/plasma/test/client_tests.cc create mode 100644 cpp/src/plasma/test/run_tests.sh create mode 100644 cpp/src/plasma/test/run_valgrind.sh create mode 100644 cpp/src/plasma/test/serialization_tests.cc create mode 100644 cpp/src/plasma/thirdparty/ae/ae.c create mode 100644 cpp/src/plasma/thirdparty/ae/ae.h create mode 100644 cpp/src/plasma/thirdparty/ae/ae_epoll.c create mode 100644 cpp/src/plasma/thirdparty/ae/ae_evport.c create mode 100644 cpp/src/plasma/thirdparty/ae/ae_kqueue.c create mode 100644 cpp/src/plasma/thirdparty/ae/ae_select.c create mode 100644 cpp/src/plasma/thirdparty/ae/config.h create mode 100644 cpp/src/plasma/thirdparty/ae/zmalloc.h create mode 100644 cpp/src/plasma/thirdparty/dlmalloc.c create mode 100644 cpp/src/plasma/thirdparty/xxhash.cc create mode 100644 cpp/src/plasma/thirdparty/xxhash.h diff --git a/cpp/src/plasma/CMakeLists.txt b/cpp/src/plasma/CMakeLists.txt new file mode 100644 index 0000000000000..4ff3beba779c2 --- /dev/null +++ b/cpp/src/plasma/CMakeLists.txt @@ -0,0 +1,113 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +cmake_minimum_required(VERSION 2.8) + +project(plasma) + +find_package(PythonLibsNew REQUIRED) +find_package(Threads) + +option(PLASMA_PYTHON + "Build the Plasma Python extensions" + OFF) + +if(APPLE) + SET(CMAKE_SHARED_LIBRARY_SUFFIX ".so") +endif(APPLE) + +include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS}) +include_directories("${FLATBUFFERS_INCLUDE_DIR}" "${CMAKE_CURRENT_LIST_DIR}/" "${CMAKE_CURRENT_LIST_DIR}/thirdparty/" "${CMAKE_CURRENT_LIST_DIR}/../") + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_XOPEN_SOURCE=500 -D_POSIX_C_SOURCE=200809L") + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-conversion") + +# Compile flatbuffers + +set(PLASMA_FBS_SRC "${CMAKE_CURRENT_LIST_DIR}/format/plasma.fbs" "${CMAKE_CURRENT_LIST_DIR}/format/common.fbs") +set(OUTPUT_DIR ${CMAKE_CURRENT_LIST_DIR}/format/) + +set(PLASMA_FBS_OUTPUT_FILES + "${OUTPUT_DIR}/common_generated.h" + "${OUTPUT_DIR}/plasma_generated.h") + +add_custom_target(gen_plasma_fbs DEPENDS ${PLASMA_FBS_OUTPUT_FILES}) + +if(FLATBUFFERS_VENDORED) + add_dependencies(gen_plasma_fbs flatbuffers_ep) +endif() + +add_custom_command( + OUTPUT ${PLASMA_FBS_OUTPUT_FILES} + # The --gen-object-api flag generates a C++ class MessageT for each + # flatbuffers message Message, which can be used to store deserialized + # messages in data structures. This is currently used for ObjectInfo for + # example. + COMMAND ${FLATBUFFERS_COMPILER} -c -o ${OUTPUT_DIR} ${PLASMA_FBS_SRC} --gen-object-api + DEPENDS ${PLASMA_FBS_SRC} + COMMENT "Running flatc compiler on ${PLASMA_FBS_SRC}" + VERBATIM) + +if(UNIX AND NOT APPLE) + link_libraries(rt) +endif() + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") + +set_source_files_properties(extension.cc PROPERTIES COMPILE_FLAGS -Wno-strict-aliasing) + +set(PLASMA_SRCS + client.cc + common.cc + eviction_policy.cc + events.cc + fling.cc + io.cc + malloc.cc + plasma.cc + protocol.cc + thirdparty/ae/ae.c + thirdparty/xxhash.cc) + +ADD_ARROW_LIB(plasma + SOURCES ${PLASMA_SRCS} + DEPENDENCIES gen_plasma_fbs + SHARED_LINK_LIBS ${FLATBUFFERS_STATIC_LIB} ${CMAKE_THREAD_LIBS_INIT} arrow_static + STATIC_LINK_LIBS ${FLATBUFFERS_STATIC_LIB} ${CMAKE_THREAD_LIBS_INIT} arrow_static) + +# The optimization flag -O3 is suggested by dlmalloc.c, which is #included in +# malloc.cc; we set it here regardless of whether we do a debug or release build. +set_source_files_properties(malloc.cc PROPERTIES COMPILE_FLAGS "-Wno-error -O3") + +add_executable(plasma_store store.cc) +target_link_libraries(plasma_store plasma_static) + +ADD_ARROW_TEST(test/serialization_tests) +ARROW_TEST_LINK_LIBRARIES(test/serialization_tests plasma_static) +ADD_ARROW_TEST(test/client_tests) +ARROW_TEST_LINK_LIBRARIES(test/client_tests plasma_static) + +if(PLASMA_PYTHON) + add_library(plasma_extension SHARED extension.cc) + + if(APPLE) + target_link_libraries(plasma_extension plasma_static "-undefined dynamic_lookup") + else(APPLE) + target_link_libraries(plasma_extension plasma_static -Wl,--whole-archive ${FLATBUFFERS_STATIC_LIB} -Wl,--no-whole-archive) + endif(APPLE) +endif() diff --git a/cpp/src/plasma/client.cc b/cpp/src/plasma/client.cc new file mode 100644 index 0000000000000..dcb78e7ec52c6 --- /dev/null +++ b/cpp/src/plasma/client.cc @@ -0,0 +1,557 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// PLASMA CLIENT: Client library for using the plasma store and manager + +#include "plasma/client.h" + +#ifdef _WIN32 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "plasma/common.h" +#include "plasma/fling.h" +#include "plasma/io.h" +#include "plasma/plasma.h" +#include "plasma/protocol.h" + +#define XXH_STATIC_LINKING_ONLY +#include "thirdparty/xxhash.h" + +#define XXH64_DEFAULT_SEED 0 + +// Number of threads used for memcopy and hash computations. +constexpr int64_t kThreadPoolSize = 8; +constexpr int64_t kBytesInMB = 1 << 20; +static std::vector threadpool_(kThreadPoolSize); + +// If the file descriptor fd has been mmapped in this client process before, +// return the pointer that was returned by mmap, otherwise mmap it and store the +// pointer in a hash table. +uint8_t* PlasmaClient::lookup_or_mmap(int fd, int store_fd_val, int64_t map_size) { + auto entry = mmap_table_.find(store_fd_val); + if (entry != mmap_table_.end()) { + close(fd); + return entry->second.pointer; + } else { + uint8_t* result = reinterpret_cast( + mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)); + // TODO(pcm): Don't fail here, instead return a Status. + if (result == MAP_FAILED) { ARROW_LOG(FATAL) << "mmap failed"; } + close(fd); + ClientMmapTableEntry& entry = mmap_table_[store_fd_val]; + entry.pointer = result; + entry.length = map_size; + entry.count = 0; + return result; + } +} + +// Get a pointer to a file that we know has been memory mapped in this client +// process before. +uint8_t* PlasmaClient::lookup_mmapped_file(int store_fd_val) { + auto entry = mmap_table_.find(store_fd_val); + ARROW_CHECK(entry != mmap_table_.end()); + return entry->second.pointer; +} + +void PlasmaClient::increment_object_count( + const ObjectID& object_id, PlasmaObject* object, bool is_sealed) { + // Increment the count of the object to track the fact that it is being used. + // The corresponding decrement should happen in PlasmaClient::Release. + auto elem = objects_in_use_.find(object_id); + ObjectInUseEntry* object_entry; + if (elem == objects_in_use_.end()) { + // Add this object ID to the hash table of object IDs in use. The + // corresponding call to free happens in PlasmaClient::Release. + objects_in_use_[object_id] = + std::unique_ptr(new ObjectInUseEntry()); + objects_in_use_[object_id]->object = *object; + objects_in_use_[object_id]->count = 0; + objects_in_use_[object_id]->is_sealed = is_sealed; + object_entry = objects_in_use_[object_id].get(); + // Increment the count of the number of objects in the memory-mapped file + // that are being used. The corresponding decrement should happen in + // PlasmaClient::Release. + auto entry = mmap_table_.find(object->handle.store_fd); + ARROW_CHECK(entry != mmap_table_.end()); + ARROW_CHECK(entry->second.count >= 0); + // Update the in_use_object_bytes_. + in_use_object_bytes_ += + (object_entry->object.data_size + object_entry->object.metadata_size); + entry->second.count += 1; + } else { + object_entry = elem->second.get(); + ARROW_CHECK(object_entry->count > 0); + } + // Increment the count of the number of instances of this object that are + // being used by this client. The corresponding decrement should happen in + // PlasmaClient::Release. + object_entry->count += 1; +} + +Status PlasmaClient::Create(const ObjectID& object_id, int64_t data_size, + uint8_t* metadata, int64_t metadata_size, uint8_t** data) { + ARROW_LOG(DEBUG) << "called plasma_create on conn " << store_conn_ << " with size " + << data_size << " and metadata size " << metadata_size; + RETURN_NOT_OK(SendCreateRequest(store_conn_, object_id, data_size, metadata_size)); + std::vector buffer; + RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaCreateReply, &buffer)); + ObjectID id; + PlasmaObject object; + RETURN_NOT_OK(ReadCreateReply(buffer.data(), &id, &object)); + // If the CreateReply included an error, then the store will not send a file + // descriptor. + int fd = recv_fd(store_conn_); + ARROW_CHECK(fd >= 0) << "recv not successful"; + ARROW_CHECK(object.data_size == data_size); + ARROW_CHECK(object.metadata_size == metadata_size); + // The metadata should come right after the data. + ARROW_CHECK(object.metadata_offset == object.data_offset + data_size); + *data = lookup_or_mmap(fd, object.handle.store_fd, object.handle.mmap_size) + + object.data_offset; + // If plasma_create is being called from a transfer, then we will not copy the + // metadata here. The metadata will be written along with the data streamed + // from the transfer. + if (metadata != NULL) { + // Copy the metadata to the buffer. + memcpy(*data + object.data_size, metadata, metadata_size); + } + // Increment the count of the number of instances of this object that this + // client is using. A call to PlasmaClient::Release is required to decrement + // this + // count. Cache the reference to the object. + increment_object_count(object_id, &object, false); + // We increment the count a second time (and the corresponding decrement will + // happen in a PlasmaClient::Release call in plasma_seal) so even if the + // buffer + // returned by PlasmaClient::Dreate goes out of scope, the object does not get + // released before the call to PlasmaClient::Seal happens. + increment_object_count(object_id, &object, false); + return Status::OK(); +} + +Status PlasmaClient::Get(const ObjectID* object_ids, int64_t num_objects, + int64_t timeout_ms, ObjectBuffer* object_buffers) { + // Fill out the info for the objects that are already in use locally. + bool all_present = true; + for (int i = 0; i < num_objects; ++i) { + auto object_entry = objects_in_use_.find(object_ids[i]); + if (object_entry == objects_in_use_.end()) { + // This object is not currently in use by this client, so we need to send + // a request to the store. + all_present = false; + // Make a note to ourselves that the object is not present. + object_buffers[i].data_size = -1; + } else { + // NOTE: If the object is still unsealed, we will deadlock, since we must + // have been the one who created it. + ARROW_CHECK(object_entry->second->is_sealed) + << "Plasma client called get on an unsealed object that it created"; + PlasmaObject* object = &object_entry->second->object; + object_buffers[i].data = lookup_mmapped_file(object->handle.store_fd); + object_buffers[i].data = object_buffers[i].data + object->data_offset; + object_buffers[i].data_size = object->data_size; + object_buffers[i].metadata = object_buffers[i].data + object->data_size; + object_buffers[i].metadata_size = object->metadata_size; + // Increment the count of the number of instances of this object that this + // client is using. A call to PlasmaClient::Release is required to + // decrement this + // count. Cache the reference to the object. + increment_object_count(object_ids[i], object, true); + } + } + + if (all_present) { return Status::OK(); } + + // If we get here, then the objects aren't all currently in use by this + // client, so we need to send a request to the plasma store. + RETURN_NOT_OK(SendGetRequest(store_conn_, object_ids, num_objects, timeout_ms)); + std::vector buffer; + RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaGetReply, &buffer)); + std::vector received_object_ids(num_objects); + std::vector object_data(num_objects); + PlasmaObject* object; + RETURN_NOT_OK(ReadGetReply( + buffer.data(), received_object_ids.data(), object_data.data(), num_objects)); + + for (int i = 0; i < num_objects; ++i) { + DCHECK(received_object_ids[i] == object_ids[i]); + object = &object_data[i]; + if (object_buffers[i].data_size != -1) { + // If the object was already in use by the client, then the store should + // have returned it. + DCHECK_NE(object->data_size, -1); + // We won't use this file descriptor, but the store sent us one, so we + // need to receive it and then close it right away so we don't leak file + // descriptors. + int fd = recv_fd(store_conn_); + close(fd); + ARROW_CHECK(fd >= 0); + // We've already filled out the information for this object, so we can + // just continue. + continue; + } + // If we are here, the object was not currently in use, so we need to + // process the reply from the object store. + if (object->data_size != -1) { + // The object was retrieved. The user will be responsible for releasing + // this object. + int fd = recv_fd(store_conn_); + ARROW_CHECK(fd >= 0); + object_buffers[i].data = + lookup_or_mmap(fd, object->handle.store_fd, object->handle.mmap_size); + // Finish filling out the return values. + object_buffers[i].data = object_buffers[i].data + object->data_offset; + object_buffers[i].data_size = object->data_size; + object_buffers[i].metadata = object_buffers[i].data + object->data_size; + object_buffers[i].metadata_size = object->metadata_size; + // Increment the count of the number of instances of this object that this + // client is using. A call to PlasmaClient::Release is required to + // decrement this + // count. Cache the reference to the object. + increment_object_count(received_object_ids[i], object, true); + } else { + // The object was not retrieved. Make sure we already put a -1 here to + // indicate that the object was not retrieved. The caller is not + // responsible for releasing this object. + DCHECK_EQ(object_buffers[i].data_size, -1); + object_buffers[i].data_size = -1; + } + } + return Status::OK(); +} + +/// This is a helper method for implementing plasma_release. We maintain a +/// buffer +/// of release calls and only perform them once the buffer becomes full (as +/// judged by the aggregate sizes of the objects). There may be multiple release +/// calls for the same object ID in the buffer. In this case, the first release +/// calls will not do anything. The client will only send a message to the store +/// releasing the object when the client is truly done with the object. +/// +/// @param conn The plasma connection. +/// @param object_id The object ID to attempt to release. +Status PlasmaClient::PerformRelease(const ObjectID& object_id) { + // Decrement the count of the number of instances of this object that are + // being used by this client. The corresponding increment should have happened + // in PlasmaClient::Get. + auto object_entry = objects_in_use_.find(object_id); + ARROW_CHECK(object_entry != objects_in_use_.end()); + object_entry->second->count -= 1; + ARROW_CHECK(object_entry->second->count >= 0); + // Check if the client is no longer using this object. + if (object_entry->second->count == 0) { + // Decrement the count of the number of objects in this memory-mapped file + // that the client is using. The corresponding increment should have + // happened in plasma_get. + int fd = object_entry->second->object.handle.store_fd; + auto entry = mmap_table_.find(fd); + ARROW_CHECK(entry != mmap_table_.end()); + entry->second.count -= 1; + ARROW_CHECK(entry->second.count >= 0); + // If none are being used then unmap the file. + if (entry->second.count == 0) { + munmap(entry->second.pointer, entry->second.length); + // Remove the corresponding entry from the hash table. + mmap_table_.erase(fd); + } + // Tell the store that the client no longer needs the object. + RETURN_NOT_OK(SendReleaseRequest(store_conn_, object_id)); + // Update the in_use_object_bytes_. + in_use_object_bytes_ -= (object_entry->second->object.data_size + + object_entry->second->object.metadata_size); + DCHECK_GE(in_use_object_bytes_, 0); + // Remove the entry from the hash table of objects currently in use. + objects_in_use_.erase(object_id); + } + return Status::OK(); +} + +Status PlasmaClient::Release(const ObjectID& object_id) { + // Add the new object to the release history. + release_history_.push_front(object_id); + // If there are too many bytes in use by the client or if there are too many + // pending release calls, and there are at least some pending release calls in + // the release_history list, then release some objects. + while ((in_use_object_bytes_ > std::min(kL3CacheSizeBytes, store_capacity_ / 100) || + release_history_.size() > config_.release_delay) && + release_history_.size() > 0) { + // Perform a release for the object ID for the first pending release. + RETURN_NOT_OK(PerformRelease(release_history_.back())); + // Remove the last entry from the release history. + release_history_.pop_back(); + } + return Status::OK(); +} + +// This method is used to query whether the plasma store contains an object. +Status PlasmaClient::Contains(const ObjectID& object_id, bool* has_object) { + // Check if we already have a reference to the object. + if (objects_in_use_.count(object_id) > 0) { + *has_object = 1; + } else { + // If we don't already have a reference to the object, check with the store + // to see if we have the object. + RETURN_NOT_OK(SendContainsRequest(store_conn_, object_id)); + std::vector buffer; + RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaContainsReply, &buffer)); + ObjectID object_id2; + RETURN_NOT_OK(ReadContainsReply(buffer.data(), &object_id2, has_object)); + } + return Status::OK(); +} + +static void ComputeBlockHash(const unsigned char* data, int64_t nbytes, uint64_t* hash) { + XXH64_state_t hash_state; + XXH64_reset(&hash_state, XXH64_DEFAULT_SEED); + XXH64_update(&hash_state, data, nbytes); + *hash = XXH64_digest(&hash_state); +} + +static inline bool compute_object_hash_parallel( + XXH64_state_t* hash_state, const unsigned char* data, int64_t nbytes) { + // Note that this function will likely be faster if the address of data is + // aligned on a 64-byte boundary. + const int num_threads = kThreadPoolSize; + uint64_t threadhash[num_threads + 1]; + const uint64_t data_address = reinterpret_cast(data); + const uint64_t num_blocks = nbytes / BLOCK_SIZE; + const uint64_t chunk_size = (num_blocks / num_threads) * BLOCK_SIZE; + const uint64_t right_address = data_address + chunk_size * num_threads; + const uint64_t suffix = (data_address + nbytes) - right_address; + // Now the data layout is | k * num_threads * block_size | suffix | == + // | num_threads * chunk_size | suffix |, where chunk_size = k * block_size. + // Each thread gets a "chunk" of k blocks, except the suffix thread. + + for (int i = 0; i < num_threads; i++) { + threadpool_[i] = std::thread(ComputeBlockHash, + reinterpret_cast(data_address) + i * chunk_size, chunk_size, + &threadhash[i]); + } + ComputeBlockHash( + reinterpret_cast(right_address), suffix, &threadhash[num_threads]); + + // Join the threads. + for (auto& t : threadpool_) { + if (t.joinable()) { t.join(); } + } + + XXH64_update(hash_state, (unsigned char*)threadhash, sizeof(threadhash)); + return true; +} + +static uint64_t compute_object_hash(const ObjectBuffer& obj_buffer) { + XXH64_state_t hash_state; + XXH64_reset(&hash_state, XXH64_DEFAULT_SEED); + if (obj_buffer.data_size >= kBytesInMB) { + compute_object_hash_parallel( + &hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size); + } else { + XXH64_update(&hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size); + } + XXH64_update( + &hash_state, (unsigned char*)obj_buffer.metadata, obj_buffer.metadata_size); + return XXH64_digest(&hash_state); +} + +bool plasma_compute_object_hash( + PlasmaClient* conn, ObjectID object_id, unsigned char* digest) { + // Get the plasma object data. We pass in a timeout of 0 to indicate that + // the operation should timeout immediately. + ObjectBuffer object_buffer; + ARROW_CHECK_OK(conn->Get(&object_id, 1, 0, &object_buffer)); + // If the object was not retrieved, return false. + if (object_buffer.data_size == -1) { return false; } + // Compute the hash. + uint64_t hash = compute_object_hash(object_buffer); + memcpy(digest, &hash, sizeof(hash)); + // Release the plasma object. + ARROW_CHECK_OK(conn->Release(object_id)); + return true; +} + +Status PlasmaClient::Seal(const ObjectID& object_id) { + // Make sure this client has a reference to the object before sending the + // request to Plasma. + auto object_entry = objects_in_use_.find(object_id); + ARROW_CHECK(object_entry != objects_in_use_.end()) + << "Plasma client called seal an object without a reference to it"; + ARROW_CHECK(!object_entry->second->is_sealed) + << "Plasma client called seal an already sealed object"; + object_entry->second->is_sealed = true; + /// Send the seal request to Plasma. + static unsigned char digest[kDigestSize]; + ARROW_CHECK(plasma_compute_object_hash(this, object_id, &digest[0])); + RETURN_NOT_OK(SendSealRequest(store_conn_, object_id, &digest[0])); + // We call PlasmaClient::Release to decrement the number of instances of this + // object + // that are currently being used by this client. The corresponding increment + // happened in plasma_create and was used to ensure that the object was not + // released before the call to PlasmaClient::Seal. + return Release(object_id); +} + +Status PlasmaClient::Delete(const ObjectID& object_id) { + // TODO(rkn): In the future, we can use this method to give hints to the + // eviction policy about when an object will no longer be needed. + return Status::NotImplemented("PlasmaClient::Delete is not implemented."); +} + +Status PlasmaClient::Evict(int64_t num_bytes, int64_t& num_bytes_evicted) { + // Send a request to the store to evict objects. + RETURN_NOT_OK(SendEvictRequest(store_conn_, num_bytes)); + // Wait for a response with the number of bytes actually evicted. + std::vector buffer; + int64_t type; + RETURN_NOT_OK(ReadMessage(store_conn_, &type, &buffer)); + return ReadEvictReply(buffer.data(), num_bytes_evicted); +} + +Status PlasmaClient::Subscribe(int* fd) { + int sock[2]; + // Create a non-blocking socket pair. This will only be used to send + // notifications from the Plasma store to the client. + socketpair(AF_UNIX, SOCK_STREAM, 0, sock); + // Make the socket non-blocking. + int flags = fcntl(sock[1], F_GETFL, 0); + ARROW_CHECK(fcntl(sock[1], F_SETFL, flags | O_NONBLOCK) == 0); + // Tell the Plasma store about the subscription. + RETURN_NOT_OK(SendSubscribeRequest(store_conn_)); + // Send the file descriptor that the Plasma store should use to push + // notifications about sealed objects to this client. + ARROW_CHECK(send_fd(store_conn_, sock[1]) >= 0); + close(sock[1]); + // Return the file descriptor that the client should use to read notifications + // about sealed objects. + *fd = sock[0]; + return Status::OK(); +} + +Status PlasmaClient::Connect(const std::string& store_socket_name, + const std::string& manager_socket_name, int release_delay) { + store_conn_ = connect_ipc_sock_retry(store_socket_name, -1, -1); + if (manager_socket_name != "") { + manager_conn_ = connect_ipc_sock_retry(manager_socket_name, -1, -1); + } else { + manager_conn_ = -1; + } + config_.release_delay = release_delay; + in_use_object_bytes_ = 0; + // Send a ConnectRequest to the store to get its memory capacity. + RETURN_NOT_OK(SendConnectRequest(store_conn_)); + std::vector buffer; + RETURN_NOT_OK(PlasmaReceive(store_conn_, MessageType_PlasmaConnectReply, &buffer)); + RETURN_NOT_OK(ReadConnectReply(buffer.data(), &store_capacity_)); + return Status::OK(); +} + +Status PlasmaClient::Disconnect() { + // NOTE: We purposefully do not finish sending release calls for objects in + // use, so that we don't duplicate PlasmaClient::Release calls (when handling + // a SIGTERM, for example). + + // Close the connections to Plasma. The Plasma store will release the objects + // that were in use by us when handling the SIGPIPE. + close(store_conn_); + if (manager_conn_ >= 0) { close(manager_conn_); } + return Status::OK(); +} + +#define h_addr h_addr_list[0] + +Status PlasmaClient::Transfer(const char* address, int port, const ObjectID& object_id) { + return SendDataRequest(manager_conn_, object_id, address, port); +} + +Status PlasmaClient::Fetch(int num_object_ids, const ObjectID* object_ids) { + ARROW_CHECK(manager_conn_ >= 0); + return SendFetchRequest(manager_conn_, object_ids, num_object_ids); +} + +int PlasmaClient::get_manager_fd() { + return manager_conn_; +} + +Status PlasmaClient::Info(const ObjectID& object_id, int* object_status) { + ARROW_CHECK(manager_conn_ >= 0); + + RETURN_NOT_OK(SendStatusRequest(manager_conn_, &object_id, 1)); + std::vector buffer; + RETURN_NOT_OK(PlasmaReceive(manager_conn_, MessageType_PlasmaStatusReply, &buffer)); + ObjectID id; + RETURN_NOT_OK(ReadStatusReply(buffer.data(), &id, object_status, 1)); + ARROW_CHECK(object_id == id); + return Status::OK(); +} + +Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_requests, + int num_ready_objects, int64_t timeout_ms, int* num_objects_ready) { + ARROW_CHECK(manager_conn_ >= 0); + ARROW_CHECK(num_object_requests > 0); + ARROW_CHECK(num_ready_objects > 0); + ARROW_CHECK(num_ready_objects <= num_object_requests); + + for (int i = 0; i < num_object_requests; ++i) { + ARROW_CHECK(object_requests[i].type == PLASMA_QUERY_LOCAL || + object_requests[i].type == PLASMA_QUERY_ANYWHERE); + } + + RETURN_NOT_OK(SendWaitRequest(manager_conn_, object_requests, num_object_requests, + num_ready_objects, timeout_ms)); + std::vector buffer; + RETURN_NOT_OK(PlasmaReceive(manager_conn_, MessageType_PlasmaWaitReply, &buffer)); + RETURN_NOT_OK(ReadWaitReply(buffer.data(), object_requests, &num_ready_objects)); + + *num_objects_ready = 0; + for (int i = 0; i < num_object_requests; ++i) { + int type = object_requests[i].type; + int status = object_requests[i].status; + switch (type) { + case PLASMA_QUERY_LOCAL: + if (status == ObjectStatus_Local) { *num_objects_ready += 1; } + break; + case PLASMA_QUERY_ANYWHERE: + if (status == ObjectStatus_Local || status == ObjectStatus_Remote) { + *num_objects_ready += 1; + } else { + ARROW_CHECK(status == ObjectStatus_Nonexistent); + } + break; + default: + ARROW_LOG(FATAL) << "This code should be unreachable."; + } + } + return Status::OK(); +} diff --git a/cpp/src/plasma/client.h b/cpp/src/plasma/client.h new file mode 100644 index 0000000000000..fb3a161795d47 --- /dev/null +++ b/cpp/src/plasma/client.h @@ -0,0 +1,343 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_CLIENT_H +#define PLASMA_CLIENT_H + +#include +#include + +#include +#include + +#include "plasma/plasma.h" + +using arrow::Status; + +#define PLASMA_DEFAULT_RELEASE_DELAY 64 + +// Use 100MB as an overestimate of the L3 cache size. +constexpr int64_t kL3CacheSizeBytes = 100000000; + +/// Object buffer data structure. +struct ObjectBuffer { + /// The size in bytes of the data object. + int64_t data_size; + /// The address of the data object. + uint8_t* data; + /// The metadata size in bytes. + int64_t metadata_size; + /// The address of the metadata. + uint8_t* metadata; +}; + +/// Configuration options for the plasma client. +struct PlasmaClientConfig { + /// Number of release calls we wait until the object is actually released. + /// This allows us to avoid invalidating the cpu cache on workers if objects + /// are reused accross tasks. + size_t release_delay; +}; + +struct ClientMmapTableEntry { + /// The result of mmap for this file descriptor. + uint8_t* pointer; + /// The length of the memory-mapped file. + size_t length; + /// The number of objects in this memory-mapped file that are currently being + /// used by the client. When this count reaches zeros, we unmap the file. + int count; +}; + +struct ObjectInUseEntry { + /// A count of the number of times this client has called PlasmaClient::Create + /// or + /// PlasmaClient::Get on this object ID minus the number of calls to + /// PlasmaClient::Release. + /// When this count reaches zero, we remove the entry from the ObjectsInUse + /// and decrement a count in the relevant ClientMmapTableEntry. + int count; + /// Cached information to read the object. + PlasmaObject object; + /// A flag representing whether the object has been sealed. + bool is_sealed; +}; + +class PlasmaClient { + public: + /// Connect to the local plasma store and plasma manager. Return + /// the resulting connection. + /// + /// @param store_socket_name The name of the UNIX domain socket to use to + /// connect to the Plasma store. + /// @param manager_socket_name The name of the UNIX domain socket to use to + /// connect to the local Plasma manager. If this is "", then this + /// function will not connect to a manager. + /// @param release_delay Number of released objects that are kept around + /// and not evicted to avoid too many munmaps. + /// @return The return status. + Status Connect(const std::string& store_socket_name, + const std::string& manager_socket_name, int release_delay); + + /// Create an object in the Plasma Store. Any metadata for this object must be + /// be passed in when the object is created. + /// + /// @param object_id The ID to use for the newly created object. + /// @param data_size The size in bytes of the space to be allocated for this + /// object's + /// data (this does not include space used for metadata). + /// @param metadata The object's metadata. If there is no metadata, this + /// pointer + /// should be NULL. + /// @param metadata_size The size in bytes of the metadata. If there is no + /// metadata, this should be 0. + /// @param data The address of the newly created object will be written here. + /// @return The return status. + Status Create(const ObjectID& object_id, int64_t data_size, uint8_t* metadata, + int64_t metadata_size, uint8_t** data); + + /// Get some objects from the Plasma Store. This function will block until the + /// objects have all been created and sealed in the Plasma Store or the + /// timeout + /// expires. The caller is responsible for releasing any retrieved objects, + /// but + /// the caller should not release objects that were not retrieved. + /// + /// @param object_ids The IDs of the objects to get. + /// @param num_object_ids The number of object IDs to get. + /// @param timeout_ms The amount of time in milliseconds to wait before this + /// request times out. If this value is -1, then no timeout is set. + /// @param object_buffers An array where the results will be stored. If the + /// data + /// size field is -1, then the object was not retrieved. + /// @return The return status. + Status Get(const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms, + ObjectBuffer* object_buffers); + + /// Tell Plasma that the client no longer needs the object. This should be + /// called + /// after Get when the client is done with the object. After this call, + /// the address returned by Get is no longer valid. This should be called + /// once for each call to Get (with the same object ID). + /// + /// @param object_id The ID of the object that is no longer needed. + /// @return The return status. + Status Release(const ObjectID& object_id); + + /// Check if the object store contains a particular object and the object has + /// been sealed. The result will be stored in has_object. + /// + /// @todo: We may want to indicate if the object has been created but not + /// sealed. + /// + /// @param object_id The ID of the object whose presence we are checking. + /// @param has_object The function will write true at this address if + /// the object is present and false if it is not present. + /// @return The return status. + Status Contains(const ObjectID& object_id, bool* has_object); + + /// Seal an object in the object store. The object will be immutable after + /// this + /// call. + /// + /// @param object_id The ID of the object to seal. + /// @return The return status. + Status Seal(const ObjectID& object_id); + + /// Delete an object from the object store. This currently assumes that the + /// object is present and has been sealed. + /// + /// @todo We may want to allow the deletion of objects that are not present or + /// haven't been sealed. + /// + /// @param object_id The ID of the object to delete. + /// @return The return status. + Status Delete(const ObjectID& object_id); + + /// Delete objects until we have freed up num_bytes bytes or there are no more + /// released objects that can be deleted. + /// + /// @param num_bytes The number of bytes to try to free up. + /// @param num_bytes_evicted Out parameter for total number of bytes of space + /// retrieved. + /// @return The return status. + Status Evict(int64_t num_bytes, int64_t& num_bytes_evicted); + + /// Subscribe to notifications when objects are sealed in the object store. + /// Whenever an object is sealed, a message will be written to the client + /// socket + /// that is returned by this method. + /// + /// @param fd Out parameter for the file descriptor the client should use to + /// read notifications + /// from the object store about sealed objects. + /// @return The return status. + Status Subscribe(int* fd); + + /// Disconnect from the local plasma instance, including the local store and + /// manager. + /// + /// @return The return status. + Status Disconnect(); + + /// Attempt to initiate the transfer of some objects from remote Plasma + /// Stores. + /// This method does not guarantee that the fetched objects will arrive + /// locally. + /// + /// For an object that is available in the local Plasma Store, this method + /// will + /// not do anything. For an object that is not available locally, it will + /// check + /// if the object are already being fetched. If so, it will not do anything. + /// If + /// not, it will query the object table for a list of Plasma Managers that + /// have + /// the object. The object table will return a non-empty list, and this Plasma + /// Manager will attempt to initiate transfers from one of those Plasma + /// Managers. + /// + /// This function is non-blocking. + /// + /// This method is idempotent in the sense that it is ok to call it multiple + /// times. + /// + /// @param num_object_ids The number of object IDs fetch is being called on. + /// @param object_ids The IDs of the objects that fetch is being called on. + /// @return The return status. + Status Fetch(int num_object_ids, const ObjectID* object_ids); + + /// Wait for (1) a specified number of objects to be available (sealed) in the + /// local Plasma Store or in a remote Plasma Store, or (2) for a timeout to + /// expire. This is a blocking call. + /// + /// @param num_object_requests Size of the object_requests array. + /// @param object_requests Object event array. Each element contains a request + /// for a particular object_id. The type of request is specified in the + /// "type" field. + /// - A PLASMA_QUERY_LOCAL request is satisfied when object_id becomes + /// available in the local Plasma Store. In this case, this function + /// sets the "status" field to ObjectStatus_Local. Note, if the + /// status + /// is not ObjectStatus_Local, it will be ObjectStatus_Nonexistent, + /// but it may exist elsewhere in the system. + /// - A PLASMA_QUERY_ANYWHERE request is satisfied when object_id + /// becomes + /// available either at the local Plasma Store or on a remote Plasma + /// Store. In this case, the functions sets the "status" field to + /// ObjectStatus_Local or ObjectStatus_Remote. + /// @param num_ready_objects The number of requests in object_requests array + /// that + /// must be satisfied before the function returns, unless it timeouts. + /// The num_ready_objects should be no larger than num_object_requests. + /// @param timeout_ms Timeout value in milliseconds. If this timeout expires + /// before min_num_ready_objects of requests are satisfied, the + /// function + /// returns. + /// @param num_objects_ready Out parameter for number of satisfied requests in + /// the object_requests list. If the returned number is less than + /// min_num_ready_objects this means that timeout expired. + /// @return The return status. + Status Wait(int64_t num_object_requests, ObjectRequest* object_requests, + int num_ready_objects, int64_t timeout_ms, int* num_objects_ready); + + /// Transfer local object to a different plasma manager. + /// + /// @param conn The object containing the connection state. + /// @param addr IP address of the plasma manager we are transfering to. + /// @param port Port of the plasma manager we are transfering to. + /// @object_id ObjectID of the object we are transfering. + /// @return The return status. + Status Transfer(const char* addr, int port, const ObjectID& object_id); + + /// Return the status of a given object. This method may query the object + /// table. + /// + /// @param conn The object containing the connection state. + /// @param object_id The ID of the object whose status we query. + /// @param object_status Out parameter for object status. Can take the + /// following values. + /// - PLASMA_CLIENT_LOCAL, if object is stored in the local Plasma + /// Store. + /// has been already scheduled by the Plasma Manager. + /// - PLASMA_CLIENT_TRANSFER, if the object is either currently being + /// transferred or just scheduled. + /// - PLASMA_CLIENT_REMOTE, if the object is stored at a remote + /// Plasma Store. + /// - PLASMA_CLIENT_DOES_NOT_EXIST, if the object doesn’t exist in the + /// system. + /// @return The return status. + Status Info(const ObjectID& object_id, int* object_status); + + /// Get the file descriptor for the socket connection to the plasma manager. + /// + /// @param conn The plasma connection. + /// @return The file descriptor for the manager connection. If there is no + /// connection to the manager, this is -1. + int get_manager_fd(); + + private: + Status PerformRelease(const ObjectID& object_id); + + uint8_t* lookup_or_mmap(int fd, int store_fd_val, int64_t map_size); + + uint8_t* lookup_mmapped_file(int store_fd_val); + + void increment_object_count( + const ObjectID& object_id, PlasmaObject* object, bool is_sealed); + + /// File descriptor of the Unix domain socket that connects to the store. + int store_conn_; + /// File descriptor of the Unix domain socket that connects to the manager. + int manager_conn_; + /// Table of dlmalloc buffer files that have been memory mapped so far. This + /// is a hash table mapping a file descriptor to a struct containing the + /// address of the corresponding memory-mapped file. + std::unordered_map mmap_table_; + /// A hash table of the object IDs that are currently being used by this + /// client. + std::unordered_map, UniqueIDHasher> + objects_in_use_; + /// Object IDs of the last few release calls. This is a deque and + /// is used to delay releasing objects to see if they can be reused by + /// subsequent tasks so we do not unneccessarily invalidate cpu caches. + /// TODO(pcm): replace this with a proper lru cache using the size of the L3 + /// cache. + std::deque release_history_; + /// The number of bytes in the combined objects that are held in the release + /// history doubly-linked list. If this is too large then the client starts + /// releasing objects. + int64_t in_use_object_bytes_; + /// Configuration options for the plasma client. + PlasmaClientConfig config_; + /// The amount of memory available to the Plasma store. The client needs this + /// information to make sure that it does not delay in releasing so much + /// memory that the store is unable to evict enough objects to free up space. + int64_t store_capacity_; +}; + +/// Compute the hash of an object in the object store. +/// +/// @param conn The object containing the connection state. +/// @param object_id The ID of the object we want to hash. +/// @param digest A pointer at which to return the hash digest of the object. +/// The pointer must have at least DIGEST_SIZE bytes allocated. +/// @return A boolean representing whether the hash operation succeeded. +bool plasma_compute_object_hash( + PlasmaClient* conn, ObjectID object_id, unsigned char* digest); + +#endif // PLASMA_CLIENT_H diff --git a/cpp/src/plasma/common.cc b/cpp/src/plasma/common.cc new file mode 100644 index 0000000000000..a09a963fa4769 --- /dev/null +++ b/cpp/src/plasma/common.cc @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/common.h" + +#include + +#include "format/plasma_generated.h" + +using arrow::Status; + +UniqueID UniqueID::from_random() { + UniqueID id; + uint8_t* data = id.mutable_data(); + std::random_device engine; + for (int i = 0; i < kUniqueIDSize; i++) { + data[i] = static_cast(engine()); + } + return id; +} + +UniqueID UniqueID::from_binary(const std::string& binary) { + UniqueID id; + std::memcpy(&id, binary.data(), sizeof(id)); + return id; +} + +const uint8_t* UniqueID::data() const { + return id_; +} + +uint8_t* UniqueID::mutable_data() { + return id_; +} + +std::string UniqueID::binary() const { + return std::string(reinterpret_cast(id_), kUniqueIDSize); +} + +std::string UniqueID::hex() const { + constexpr char hex[] = "0123456789abcdef"; + std::string result; + for (int i = 0; i < kUniqueIDSize; i++) { + unsigned int val = id_[i]; + result.push_back(hex[val >> 4]); + result.push_back(hex[val & 0xf]); + } + return result; +} + +bool UniqueID::operator==(const UniqueID& rhs) const { + return std::memcmp(data(), rhs.data(), kUniqueIDSize) == 0; +} + +Status plasma_error_status(int plasma_error) { + switch (plasma_error) { + case PlasmaError_OK: + return Status::OK(); + case PlasmaError_ObjectExists: + return Status::PlasmaObjectExists("object already exists in the plasma store"); + case PlasmaError_ObjectNonexistent: + return Status::PlasmaObjectNonexistent("object does not exist in the plasma store"); + case PlasmaError_OutOfMemory: + return Status::PlasmaStoreFull("object does not fit in the plasma store"); + default: + ARROW_LOG(FATAL) << "unknown plasma error code " << plasma_error; + } + return Status::OK(); +} diff --git a/cpp/src/plasma/common.h b/cpp/src/plasma/common.h new file mode 100644 index 0000000000000..85dc74bf86e0d --- /dev/null +++ b/cpp/src/plasma/common.h @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_COMMON_H +#define PLASMA_COMMON_H + +#include +#include +// TODO(pcm): Convert getopt and sscanf in the store to use more idiomatic C++ +// and get rid of the next three lines: +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include "arrow/status.h" +#include "arrow/util/logging.h" + +constexpr int64_t kUniqueIDSize = 20; + +class UniqueID { + public: + static UniqueID from_random(); + static UniqueID from_binary(const std::string& binary); + bool operator==(const UniqueID& rhs) const; + const uint8_t* data() const; + uint8_t* mutable_data(); + std::string binary() const; + std::string hex() const; + + private: + uint8_t id_[kUniqueIDSize]; +}; + +static_assert(std::is_pod::value, "UniqueID must be plain old data"); + +struct UniqueIDHasher { + // ObjectID hashing function. + size_t operator()(const UniqueID& id) const { + size_t result; + std::memcpy(&result, id.data(), sizeof(size_t)); + return result; + } +}; + +typedef UniqueID ObjectID; + +arrow::Status plasma_error_status(int plasma_error); + +#endif // PLASMA_COMMON_H diff --git a/cpp/src/plasma/events.cc b/cpp/src/plasma/events.cc new file mode 100644 index 0000000000000..a9f7356e1f67e --- /dev/null +++ b/cpp/src/plasma/events.cc @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/events.h" + +#include + +void EventLoop::file_event_callback( + aeEventLoop* loop, int fd, void* context, int events) { + FileCallback* callback = reinterpret_cast(context); + (*callback)(events); +} + +int EventLoop::timer_event_callback(aeEventLoop* loop, TimerID timer_id, void* context) { + TimerCallback* callback = reinterpret_cast(context); + return (*callback)(timer_id); +} + +constexpr int kInitialEventLoopSize = 1024; + +EventLoop::EventLoop() { + loop_ = aeCreateEventLoop(kInitialEventLoopSize); +} + +bool EventLoop::add_file_event(int fd, int events, const FileCallback& callback) { + if (file_callbacks_.find(fd) != file_callbacks_.end()) { return false; } + auto data = std::unique_ptr(new FileCallback(callback)); + void* context = reinterpret_cast(data.get()); + // Try to add the file descriptor. + int err = aeCreateFileEvent(loop_, fd, events, EventLoop::file_event_callback, context); + // If it cannot be added, increase the size of the event loop. + if (err == AE_ERR && errno == ERANGE) { + err = aeResizeSetSize(loop_, 3 * aeGetSetSize(loop_) / 2); + if (err != AE_OK) { return false; } + err = aeCreateFileEvent(loop_, fd, events, EventLoop::file_event_callback, context); + } + // In any case, test if there were errors. + if (err == AE_OK) { + file_callbacks_.emplace(fd, std::move(data)); + return true; + } + return false; +} + +void EventLoop::remove_file_event(int fd) { + aeDeleteFileEvent(loop_, fd, AE_READABLE | AE_WRITABLE); + file_callbacks_.erase(fd); +} + +void EventLoop::run() { + aeMain(loop_); +} + +int64_t EventLoop::add_timer(int64_t timeout, const TimerCallback& callback) { + auto data = std::unique_ptr(new TimerCallback(callback)); + void* context = reinterpret_cast(data.get()); + int64_t timer_id = + aeCreateTimeEvent(loop_, timeout, EventLoop::timer_event_callback, context, NULL); + timer_callbacks_.emplace(timer_id, std::move(data)); + return timer_id; +} + +int EventLoop::remove_timer(int64_t timer_id) { + int err = aeDeleteTimeEvent(loop_, timer_id); + timer_callbacks_.erase(timer_id); + return err; +} diff --git a/cpp/src/plasma/events.h b/cpp/src/plasma/events.h new file mode 100644 index 0000000000000..bd93d6bb2a6fd --- /dev/null +++ b/cpp/src/plasma/events.h @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_EVENTS +#define PLASMA_EVENTS + +#include +#include +#include + +extern "C" { +#include "ae/ae.h" +} + +/// Constant specifying that the timer is done and it will be removed. +constexpr int kEventLoopTimerDone = AE_NOMORE; + +/// Read event on the file descriptor. +constexpr int kEventLoopRead = AE_READABLE; + +/// Write event on the file descriptor. +constexpr int kEventLoopWrite = AE_WRITABLE; + +typedef long long TimerID; // NOLINT + +class EventLoop { + public: + // Signature of the handler that will be called when there is a new event + // on the file descriptor that this handler has been registered for. + // + // The arguments are the event flags (read or write). + using FileCallback = std::function; + + // This handler will be called when a timer times out. The timer id is + // passed as an argument. The return is the number of milliseconds the timer + // shall be reset to or kEventLoopTimerDone if the timer shall not be + // triggered again. + using TimerCallback = std::function; + + EventLoop(); + + /// Add a new file event handler to the event loop. + /// + /// @param fd The file descriptor we are listening to. + /// @param events The flags for events we are listening to (read or write). + /// @param callback The callback that will be called when the event happens. + /// @return Returns true if the event handler was added successfully. + bool add_file_event(int fd, int events, const FileCallback& callback); + + /// Remove a file event handler from the event loop. + /// + /// @param fd The file descriptor of the event handler. + /// @return Void. + void remove_file_event(int fd); + + /// Register a handler that will be called after a time slice of + /// "timeout" milliseconds. + /// + /// @param timeout The timeout in milliseconds. + /// @param callback The callback for the timeout. + /// @return The ID of the newly created timer. + int64_t add_timer(int64_t timeout, const TimerCallback& callback); + + /// Remove a timer handler from the event loop. + /// + /// @param timer_id The ID of the timer that is to be removed. + /// @return The ae.c error code. TODO(pcm): needs to be standardized + int remove_timer(int64_t timer_id); + + /// Run the event loop. + /// + /// @return Void. + void run(); + + private: + static void file_event_callback(aeEventLoop* loop, int fd, void* context, int events); + + static int timer_event_callback(aeEventLoop* loop, TimerID timer_id, void* context); + + aeEventLoop* loop_; + std::unordered_map> file_callbacks_; + std::unordered_map> timer_callbacks_; +}; + +#endif // PLASMA_EVENTS diff --git a/cpp/src/plasma/eviction_policy.cc b/cpp/src/plasma/eviction_policy.cc new file mode 100644 index 0000000000000..4ae6384d42543 --- /dev/null +++ b/cpp/src/plasma/eviction_policy.cc @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/eviction_policy.h" + +#include + +void LRUCache::add(const ObjectID& key, int64_t size) { + auto it = item_map_.find(key); + ARROW_CHECK(it == item_map_.end()); + /* Note that it is important to use a list so the iterators stay valid. */ + item_list_.emplace_front(key, size); + item_map_.emplace(key, item_list_.begin()); +} + +void LRUCache::remove(const ObjectID& key) { + auto it = item_map_.find(key); + ARROW_CHECK(it != item_map_.end()); + item_list_.erase(it->second); + item_map_.erase(it); +} + +int64_t LRUCache::choose_objects_to_evict( + int64_t num_bytes_required, std::vector* objects_to_evict) { + int64_t bytes_evicted = 0; + auto it = item_list_.end(); + while (bytes_evicted < num_bytes_required && it != item_list_.begin()) { + it--; + objects_to_evict->push_back(it->first); + bytes_evicted += it->second; + } + return bytes_evicted; +} + +EvictionPolicy::EvictionPolicy(PlasmaStoreInfo* store_info) + : memory_used_(0), store_info_(store_info) {} + +int64_t EvictionPolicy::choose_objects_to_evict( + int64_t num_bytes_required, std::vector* objects_to_evict) { + int64_t bytes_evicted = + cache_.choose_objects_to_evict(num_bytes_required, objects_to_evict); + /* Update the LRU cache. */ + for (auto& object_id : *objects_to_evict) { + cache_.remove(object_id); + } + /* Update the number of bytes used. */ + memory_used_ -= bytes_evicted; + return bytes_evicted; +} + +void EvictionPolicy::object_created(const ObjectID& object_id) { + auto entry = store_info_->objects[object_id].get(); + cache_.add(object_id, entry->info.data_size + entry->info.metadata_size); +} + +bool EvictionPolicy::require_space( + int64_t size, std::vector* objects_to_evict) { + /* Check if there is enough space to create the object. */ + int64_t required_space = memory_used_ + size - store_info_->memory_capacity; + int64_t num_bytes_evicted; + if (required_space > 0) { + /* Try to free up at least as much space as we need right now but ideally + * up to 20% of the total capacity. */ + int64_t space_to_free = std::max(size, store_info_->memory_capacity / 5); + ARROW_LOG(DEBUG) << "not enough space to create this object, so evicting objects"; + /* Choose some objects to evict, and update the return pointers. */ + num_bytes_evicted = choose_objects_to_evict(space_to_free, objects_to_evict); + ARROW_LOG(INFO) << "There is not enough space to create this object, so evicting " + << objects_to_evict->size() << " objects to free up " + << num_bytes_evicted << " bytes."; + } else { + num_bytes_evicted = 0; + } + if (num_bytes_evicted >= required_space) { + /* We only increment the space used if there is enough space to create the + * object. */ + memory_used_ += size; + } + return num_bytes_evicted >= required_space; +} + +void EvictionPolicy::begin_object_access( + const ObjectID& object_id, std::vector* objects_to_evict) { + /* If the object is in the LRU cache, remove it. */ + cache_.remove(object_id); +} + +void EvictionPolicy::end_object_access( + const ObjectID& object_id, std::vector* objects_to_evict) { + auto entry = store_info_->objects[object_id].get(); + /* Add the object to the LRU cache.*/ + cache_.add(object_id, entry->info.data_size + entry->info.metadata_size); +} diff --git a/cpp/src/plasma/eviction_policy.h b/cpp/src/plasma/eviction_policy.h new file mode 100644 index 0000000000000..3815fc6652f0c --- /dev/null +++ b/cpp/src/plasma/eviction_policy.h @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_EVICTION_POLICY_H +#define PLASMA_EVICTION_POLICY_H + +#include +#include +#include +#include + +#include "plasma/common.h" +#include "plasma/plasma.h" + +// ==== The eviction policy ==== +// +// This file contains declaration for all functions and data structures that +// need to be provided if you want to implement a new eviction algorithm for the +// Plasma store. + +class LRUCache { + public: + LRUCache() {} + + void add(const ObjectID& key, int64_t size); + + void remove(const ObjectID& key); + + int64_t choose_objects_to_evict( + int64_t num_bytes_required, std::vector* objects_to_evict); + + private: + /// A doubly-linked list containing the items in the cache and + /// their sizes in LRU order. + typedef std::list> ItemList; + ItemList item_list_; + /// A hash table mapping the object ID of an object in the cache to its + /// location in the doubly linked list item_list_. + std::unordered_map item_map_; +}; + +/// The eviction policy. +class EvictionPolicy { + public: + /// Construct an eviction policy. + /// + /// @param store_info Information about the Plasma store that is exposed + /// to the eviction policy. + explicit EvictionPolicy(PlasmaStoreInfo* store_info); + + /// This method will be called whenever an object is first created in order to + /// add it to the LRU cache. This is done so that the first time, the Plasma + /// store calls begin_object_access, we can remove the object from the LRU + /// cache. + /// + /// @param object_id The object ID of the object that was created. + /// @return Void. + void object_created(const ObjectID& object_id); + + /// This method will be called when the Plasma store needs more space, perhaps + /// to create a new object. If the required amount of space cannot be freed up, + /// then a fatal error will be thrown. When this method is called, the eviction + /// policy will assume that the objects chosen to be evicted will in fact be + /// evicted from the Plasma store by the caller. + /// + /// @param size The size in bytes of the new object, including both data and + /// metadata. + /// @param objects_to_evict The object IDs that were chosen for eviction will + /// be stored into this vector. + /// @return True if enough space can be freed and false otherwise. + bool require_space(int64_t size, std::vector* objects_to_evict); + + /// This method will be called whenever an unused object in the Plasma store + /// starts to be used. When this method is called, the eviction policy will + /// assume that the objects chosen to be evicted will in fact be evicted from + /// the Plasma store by the caller. + /// + /// @param object_id The ID of the object that is now being used. + /// @param objects_to_evict The object IDs that were chosen for eviction will + /// be stored into this vector. + /// @return Void. + void begin_object_access( + const ObjectID& object_id, std::vector* objects_to_evict); + + /// This method will be called whenever an object in the Plasma store that was + /// being used is no longer being used. When this method is called, the + /// eviction policy will assume that the objects chosen to be evicted will in + /// fact be evicted from the Plasma store by the caller. + /// + /// @param object_id The ID of the object that is no longer being used. + /// @param objects_to_evict The object IDs that were chosen for eviction will + /// be stored into this vector. + /// @return Void. + void end_object_access( + const ObjectID& object_id, std::vector* objects_to_evict); + + /// Choose some objects to evict from the Plasma store. When this method is + /// called, the eviction policy will assume that the objects chosen to be + /// evicted will in fact be evicted from the Plasma store by the caller. + /// + /// @note This method is not part of the API. It is exposed in the header file + /// only for testing. + /// + /// @param num_bytes_required The number of bytes of space to try to free up. + /// @param objects_to_evict The object IDs that were chosen for eviction will + /// be stored into this vector. + /// @return The total number of bytes of space chosen to be evicted. + int64_t choose_objects_to_evict( + int64_t num_bytes_required, std::vector* objects_to_evict); + + private: + /// The amount of memory (in bytes) currently being used. + int64_t memory_used_; + /// Pointer to the plasma store info. + PlasmaStoreInfo* store_info_; + /// Datastructure for the LRU cache. + LRUCache cache_; +}; + +#endif // PLASMA_EVICTION_POLICY_H diff --git a/cpp/src/plasma/extension.cc b/cpp/src/plasma/extension.cc new file mode 100644 index 0000000000000..5d61e337c108d --- /dev/null +++ b/cpp/src/plasma/extension.cc @@ -0,0 +1,456 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/extension.h" + +#include +#include + +#include "plasma/client.h" +#include "plasma/common.h" +#include "plasma/io.h" +#include "plasma/protocol.h" + +PyObject* PlasmaOutOfMemoryError; +PyObject* PlasmaObjectExistsError; + +PyObject* PyPlasma_connect(PyObject* self, PyObject* args) { + const char* store_socket_name; + const char* manager_socket_name; + int release_delay; + if (!PyArg_ParseTuple( + args, "ssi", &store_socket_name, &manager_socket_name, &release_delay)) { + return NULL; + } + PlasmaClient* client = new PlasmaClient(); + ARROW_CHECK_OK(client->Connect(store_socket_name, manager_socket_name, release_delay)); + + return PyCapsule_New(client, "plasma", NULL); +} + +PyObject* PyPlasma_disconnect(PyObject* self, PyObject* args) { + PyObject* client_capsule; + if (!PyArg_ParseTuple(args, "O", &client_capsule)) { return NULL; } + PlasmaClient* client; + ARROW_CHECK(PyObjectToPlasmaClient(client_capsule, &client)); + ARROW_CHECK_OK(client->Disconnect()); + /* We use the context of the connection capsule to indicate if the connection + * is still active (if the context is NULL) or if it is closed (if the context + * is (void*) 0x1). This is neccessary because the primary pointer of the + * capsule cannot be NULL. */ + PyCapsule_SetContext(client_capsule, reinterpret_cast(0x1)); + Py_RETURN_NONE; +} + +PyObject* PyPlasma_create(PyObject* self, PyObject* args) { + PlasmaClient* client; + ObjectID object_id; + Py_ssize_t size; + PyObject* metadata; + if (!PyArg_ParseTuple(args, "O&O&nO", PyObjectToPlasmaClient, &client, + PyStringToUniqueID, &object_id, &size, &metadata)) { + return NULL; + } + if (!PyByteArray_Check(metadata)) { + PyErr_SetString(PyExc_TypeError, "metadata must be a bytearray"); + return NULL; + } + uint8_t* data; + Status s = client->Create(object_id, size, + reinterpret_cast(PyByteArray_AsString(metadata)), + PyByteArray_Size(metadata), &data); + if (s.IsPlasmaObjectExists()) { + PyErr_SetString(PlasmaObjectExistsError, + "An object with this ID already exists in the plasma " + "store."); + return NULL; + } + if (s.IsPlasmaStoreFull()) { + PyErr_SetString(PlasmaOutOfMemoryError, + "The plasma store ran out of memory and could not create " + "this object."); + return NULL; + } + ARROW_CHECK(s.ok()); + +#if PY_MAJOR_VERSION >= 3 + return PyMemoryView_FromMemory(reinterpret_cast(data), size, PyBUF_WRITE); +#else + return PyBuffer_FromReadWriteMemory(reinterpret_cast(data), size); +#endif +} + +PyObject* PyPlasma_hash(PyObject* self, PyObject* args) { + PlasmaClient* client; + ObjectID object_id; + if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, + &object_id)) { + return NULL; + } + unsigned char digest[kDigestSize]; + bool success = plasma_compute_object_hash(client, object_id, digest); + if (success) { + PyObject* digest_string = + PyBytes_FromStringAndSize(reinterpret_cast(digest), kDigestSize); + return digest_string; + } else { + Py_RETURN_NONE; + } +} + +PyObject* PyPlasma_seal(PyObject* self, PyObject* args) { + PlasmaClient* client; + ObjectID object_id; + if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, + &object_id)) { + return NULL; + } + ARROW_CHECK_OK(client->Seal(object_id)); + Py_RETURN_NONE; +} + +PyObject* PyPlasma_release(PyObject* self, PyObject* args) { + PlasmaClient* client; + ObjectID object_id; + if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, + &object_id)) { + return NULL; + } + ARROW_CHECK_OK(client->Release(object_id)); + Py_RETURN_NONE; +} + +PyObject* PyPlasma_get(PyObject* self, PyObject* args) { + PlasmaClient* client; + PyObject* object_id_list; + Py_ssize_t timeout_ms; + if (!PyArg_ParseTuple( + args, "O&On", PyObjectToPlasmaClient, &client, &object_id_list, &timeout_ms)) { + return NULL; + } + + Py_ssize_t num_object_ids = PyList_Size(object_id_list); + std::vector object_ids(num_object_ids); + std::vector object_buffers(num_object_ids); + + for (int i = 0; i < num_object_ids; ++i) { + PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]); + } + + Py_BEGIN_ALLOW_THREADS; + ARROW_CHECK_OK( + client->Get(object_ids.data(), num_object_ids, timeout_ms, object_buffers.data())); + Py_END_ALLOW_THREADS; + + PyObject* returns = PyList_New(num_object_ids); + for (int i = 0; i < num_object_ids; ++i) { + if (object_buffers[i].data_size != -1) { + /* The object was retrieved, so return the object. */ + PyObject* t = PyTuple_New(2); + Py_ssize_t data_size = static_cast(object_buffers[i].data_size); + Py_ssize_t metadata_size = static_cast(object_buffers[i].metadata_size); +#if PY_MAJOR_VERSION >= 3 + char* data = reinterpret_cast(object_buffers[i].data); + char* metadata = reinterpret_cast(object_buffers[i].metadata); + PyTuple_SET_ITEM(t, 0, PyMemoryView_FromMemory(data, data_size, PyBUF_READ)); + PyTuple_SET_ITEM( + t, 1, PyMemoryView_FromMemory(metadata, metadata_size, PyBUF_READ)); +#else + void* data = reinterpret_cast(object_buffers[i].data); + void* metadata = reinterpret_cast(object_buffers[i].metadata); + PyTuple_SET_ITEM(t, 0, PyBuffer_FromMemory(data, data_size)); + PyTuple_SET_ITEM(t, 1, PyBuffer_FromMemory(metadata, metadata_size)); +#endif + ARROW_CHECK(PyList_SetItem(returns, i, t) == 0); + } else { + /* The object was not retrieved, so just add None to the list of return + * values. */ + Py_INCREF(Py_None); + ARROW_CHECK(PyList_SetItem(returns, i, Py_None) == 0); + } + } + return returns; +} + +PyObject* PyPlasma_contains(PyObject* self, PyObject* args) { + PlasmaClient* client; + ObjectID object_id; + if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, + &object_id)) { + return NULL; + } + bool has_object; + ARROW_CHECK_OK(client->Contains(object_id, &has_object)); + + if (has_object) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +PyObject* PyPlasma_fetch(PyObject* self, PyObject* args) { + PlasmaClient* client; + PyObject* object_id_list; + if (!PyArg_ParseTuple(args, "O&O", PyObjectToPlasmaClient, &client, &object_id_list)) { + return NULL; + } + if (client->get_manager_fd() == -1) { + PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); + return NULL; + } + Py_ssize_t n = PyList_Size(object_id_list); + ObjectID* object_ids = new ObjectID[n]; + for (int i = 0; i < n; ++i) { + PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]); + } + ARROW_CHECK_OK(client->Fetch(static_cast(n), object_ids)); + delete[] object_ids; + Py_RETURN_NONE; +} + +PyObject* PyPlasma_wait(PyObject* self, PyObject* args) { + PlasmaClient* client; + PyObject* object_id_list; + Py_ssize_t timeout; + int num_returns; + if (!PyArg_ParseTuple(args, "O&Oni", PyObjectToPlasmaClient, &client, &object_id_list, + &timeout, &num_returns)) { + return NULL; + } + Py_ssize_t n = PyList_Size(object_id_list); + + if (client->get_manager_fd() == -1) { + PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); + return NULL; + } + if (num_returns < 0) { + PyErr_SetString( + PyExc_RuntimeError, "The argument num_returns cannot be less than zero."); + return NULL; + } + if (num_returns > n) { + PyErr_SetString(PyExc_RuntimeError, + "The argument num_returns cannot be greater than len(object_ids)"); + return NULL; + } + int64_t threshold = 1 << 30; + if (timeout > threshold) { + PyErr_SetString( + PyExc_RuntimeError, "The argument timeout cannot be greater than 2 ** 30."); + return NULL; + } + + std::vector object_requests(n); + for (int i = 0; i < n; ++i) { + ARROW_CHECK(PyStringToUniqueID(PyList_GetItem(object_id_list, i), + &object_requests[i].object_id) == 1); + object_requests[i].type = PLASMA_QUERY_ANYWHERE; + } + /* Drop the global interpreter lock while we are waiting, so other threads can + * run. */ + int num_return_objects; + Py_BEGIN_ALLOW_THREADS; + ARROW_CHECK_OK( + client->Wait(n, object_requests.data(), num_returns, timeout, &num_return_objects)); + Py_END_ALLOW_THREADS; + + int num_to_return = std::min(num_return_objects, num_returns); + PyObject* ready_ids = PyList_New(num_to_return); + PyObject* waiting_ids = PySet_New(object_id_list); + int num_returned = 0; + for (int i = 0; i < n; ++i) { + if (num_returned == num_to_return) { break; } + if (object_requests[i].status == ObjectStatus_Local || + object_requests[i].status == ObjectStatus_Remote) { + PyObject* ready = PyBytes_FromStringAndSize( + reinterpret_cast(&object_requests[i].object_id), + sizeof(object_requests[i].object_id)); + PyList_SetItem(ready_ids, num_returned, ready); + PySet_Discard(waiting_ids, ready); + num_returned += 1; + } else { + ARROW_CHECK(object_requests[i].status == ObjectStatus_Nonexistent); + } + } + ARROW_CHECK(num_returned == num_to_return); + /* Return both the ready IDs and the remaining IDs. */ + PyObject* t = PyTuple_New(2); + PyTuple_SetItem(t, 0, ready_ids); + PyTuple_SetItem(t, 1, waiting_ids); + return t; +} + +PyObject* PyPlasma_evict(PyObject* self, PyObject* args) { + PlasmaClient* client; + Py_ssize_t num_bytes; + if (!PyArg_ParseTuple(args, "O&n", PyObjectToPlasmaClient, &client, &num_bytes)) { + return NULL; + } + int64_t evicted_bytes; + ARROW_CHECK_OK(client->Evict(static_cast(num_bytes), evicted_bytes)); + return PyLong_FromSsize_t(static_cast(evicted_bytes)); +} + +PyObject* PyPlasma_delete(PyObject* self, PyObject* args) { + PlasmaClient* client; + ObjectID object_id; + if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, + &object_id)) { + return NULL; + } + ARROW_CHECK_OK(client->Delete(object_id)); + Py_RETURN_NONE; +} + +PyObject* PyPlasma_transfer(PyObject* self, PyObject* args) { + PlasmaClient* client; + ObjectID object_id; + const char* addr; + int port; + if (!PyArg_ParseTuple(args, "O&O&si", PyObjectToPlasmaClient, &client, + PyStringToUniqueID, &object_id, &addr, &port)) { + return NULL; + } + + if (client->get_manager_fd() == -1) { + PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); + return NULL; + } + + ARROW_CHECK_OK(client->Transfer(addr, port, object_id)); + Py_RETURN_NONE; +} + +PyObject* PyPlasma_subscribe(PyObject* self, PyObject* args) { + PlasmaClient* client; + if (!PyArg_ParseTuple(args, "O&", PyObjectToPlasmaClient, &client)) { return NULL; } + + int sock; + ARROW_CHECK_OK(client->Subscribe(&sock)); + return PyLong_FromLong(sock); +} + +PyObject* PyPlasma_receive_notification(PyObject* self, PyObject* args) { + int plasma_sock; + + if (!PyArg_ParseTuple(args, "i", &plasma_sock)) { return NULL; } + /* Receive object notification from the plasma connection socket. If the + * object was added, return a tuple of its fields: ObjectID, data_size, + * metadata_size. If the object was deleted, data_size and metadata_size will + * be set to -1. */ + uint8_t* notification = read_message_async(plasma_sock); + if (notification == NULL) { + PyErr_SetString( + PyExc_RuntimeError, "Failed to read object notification from Plasma socket"); + return NULL; + } + auto object_info = flatbuffers::GetRoot(notification); + /* Construct a tuple from object_info and return. */ + PyObject* t = PyTuple_New(3); + PyTuple_SetItem(t, 0, PyBytes_FromStringAndSize(object_info->object_id()->data(), + object_info->object_id()->size())); + if (object_info->is_deletion()) { + PyTuple_SetItem(t, 1, PyLong_FromLong(-1)); + PyTuple_SetItem(t, 2, PyLong_FromLong(-1)); + } else { + PyTuple_SetItem(t, 1, PyLong_FromLong(object_info->data_size())); + PyTuple_SetItem(t, 2, PyLong_FromLong(object_info->metadata_size())); + } + + delete[] notification; + return t; +} + +static PyMethodDef plasma_methods[] = { + {"connect", PyPlasma_connect, METH_VARARGS, "Connect to plasma."}, + {"disconnect", PyPlasma_disconnect, METH_VARARGS, "Disconnect from plasma."}, + {"create", PyPlasma_create, METH_VARARGS, "Create a new plasma object."}, + {"hash", PyPlasma_hash, METH_VARARGS, "Compute the hash of a plasma object."}, + {"seal", PyPlasma_seal, METH_VARARGS, "Seal a plasma object."}, + {"get", PyPlasma_get, METH_VARARGS, "Get a plasma object."}, + {"contains", PyPlasma_contains, METH_VARARGS, + "Does the plasma store contain this plasma object?"}, + {"fetch", PyPlasma_fetch, METH_VARARGS, + "Fetch the object from another plasma manager instance."}, + {"wait", PyPlasma_wait, METH_VARARGS, + "Wait until num_returns objects in object_ids are ready."}, + {"evict", PyPlasma_evict, METH_VARARGS, + "Evict some objects until we recover some number of bytes."}, + {"release", PyPlasma_release, METH_VARARGS, "Release the plasma object."}, + {"delete", PyPlasma_delete, METH_VARARGS, "Delete a plasma object."}, + {"transfer", PyPlasma_transfer, METH_VARARGS, + "Transfer object to another plasma manager."}, + {"subscribe", PyPlasma_subscribe, METH_VARARGS, + "Subscribe to the plasma notification socket."}, + {"receive_notification", PyPlasma_receive_notification, METH_VARARGS, + "Receive next notification from plasma notification socket."}, + {NULL} /* Sentinel */ +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, "libplasma", /* m_name */ + "A Python client library for plasma.", /* m_doc */ + 0, /* m_size */ + plasma_methods, /* m_methods */ + NULL, /* m_reload */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL, /* m_free */ +}; +#endif + +#if PY_MAJOR_VERSION >= 3 +#define INITERROR return NULL +#else +#define INITERROR return +#endif + +#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ +#define PyMODINIT_FUNC void +#endif + +#if PY_MAJOR_VERSION >= 3 +#define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void) +#else +#define MOD_INIT(name) PyMODINIT_FUNC init##name(void) +#endif + +MOD_INIT(libplasma) { +#if PY_MAJOR_VERSION >= 3 + PyObject* m = PyModule_Create(&moduledef); +#else + PyObject* m = + Py_InitModule3("libplasma", plasma_methods, "A Python client library for plasma."); +#endif + + /* Create a custom exception for when an object ID is reused. */ + char plasma_object_exists_error[] = "plasma_object_exists.error"; + PlasmaObjectExistsError = PyErr_NewException(plasma_object_exists_error, NULL, NULL); + Py_INCREF(PlasmaObjectExistsError); + PyModule_AddObject(m, "plasma_object_exists_error", PlasmaObjectExistsError); + /* Create a custom exception for when the plasma store is out of memory. */ + char plasma_out_of_memory_error[] = "plasma_out_of_memory.error"; + PlasmaOutOfMemoryError = PyErr_NewException(plasma_out_of_memory_error, NULL, NULL); + Py_INCREF(PlasmaOutOfMemoryError); + PyModule_AddObject(m, "plasma_out_of_memory_error", PlasmaOutOfMemoryError); + +#if PY_MAJOR_VERSION >= 3 + return m; +#endif +} diff --git a/cpp/src/plasma/extension.h b/cpp/src/plasma/extension.h new file mode 100644 index 0000000000000..cee30abb3592d --- /dev/null +++ b/cpp/src/plasma/extension.h @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_EXTENSION_H +#define PLASMA_EXTENSION_H + +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE +#include + +#include "bytesobject.h" // NOLINT + +#include "plasma/client.h" +#include "plasma/common.h" + +static int PyObjectToPlasmaClient(PyObject* object, PlasmaClient** client) { + if (PyCapsule_IsValid(object, "plasma")) { + *client = reinterpret_cast(PyCapsule_GetPointer(object, "plasma")); + return 1; + } else { + PyErr_SetString(PyExc_TypeError, "must be a 'plasma' capsule"); + return 0; + } +} + +int PyStringToUniqueID(PyObject* object, ObjectID* object_id) { + if (PyBytes_Check(object)) { + memcpy(object_id, PyBytes_AsString(object), sizeof(ObjectID)); + return 1; + } else { + PyErr_SetString(PyExc_TypeError, "must be a 20 character string"); + return 0; + } +} + +#endif // PLASMA_EXTENSION_H diff --git a/cpp/src/plasma/fling.cc b/cpp/src/plasma/fling.cc new file mode 100644 index 0000000000000..79da4f43a192a --- /dev/null +++ b/cpp/src/plasma/fling.cc @@ -0,0 +1,90 @@ +// Copyright 2013 Sharvil Nanavati +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "plasma/fling.h" + +#include + +void init_msg(struct msghdr* msg, struct iovec* iov, char* buf, size_t buf_len) { + iov->iov_base = buf; + iov->iov_len = 1; + + msg->msg_iov = iov; + msg->msg_iovlen = 1; + msg->msg_control = buf; + msg->msg_controllen = buf_len; + msg->msg_name = NULL; + msg->msg_namelen = 0; +} + +int send_fd(int conn, int fd) { + struct msghdr msg; + struct iovec iov; + char buf[CMSG_SPACE(sizeof(int))]; + memset(&buf, 0, CMSG_SPACE(sizeof(int))); + + init_msg(&msg, &iov, buf, sizeof(buf)); + + struct cmsghdr* header = CMSG_FIRSTHDR(&msg); + header->cmsg_level = SOL_SOCKET; + header->cmsg_type = SCM_RIGHTS; + header->cmsg_len = CMSG_LEN(sizeof(int)); + *reinterpret_cast(CMSG_DATA(header)) = fd; + + // Send file descriptor. + ssize_t r = sendmsg(conn, &msg, 0); + if (r >= 0) { + return 0; + } else { + return static_cast(r); + } +} + +int recv_fd(int conn) { + struct msghdr msg; + struct iovec iov; + char buf[CMSG_SPACE(sizeof(int))]; + init_msg(&msg, &iov, buf, sizeof(buf)); + + if (recvmsg(conn, &msg, 0) == -1) return -1; + + int found_fd = -1; + int oh_noes = 0; + for (struct cmsghdr* header = CMSG_FIRSTHDR(&msg); header != NULL; + header = CMSG_NXTHDR(&msg, header)) + if (header->cmsg_level == SOL_SOCKET && header->cmsg_type == SCM_RIGHTS) { + ssize_t count = + (header->cmsg_len - (CMSG_DATA(header) - (unsigned char*)header)) / sizeof(int); + for (int i = 0; i < count; ++i) { + int fd = (reinterpret_cast(CMSG_DATA(header)))[i]; + if (found_fd == -1) { + found_fd = fd; + } else { + close(fd); + oh_noes = 1; + } + } + } + + // The sender sent us more than one file descriptor. We've closed + // them all to prevent fd leaks but notify the caller that we got + // a bad message. + if (oh_noes) { + close(found_fd); + errno = EBADMSG; + return -1; + } + + return found_fd; +} diff --git a/cpp/src/plasma/fling.h b/cpp/src/plasma/fling.h new file mode 100644 index 0000000000000..78ac9d17f26fb --- /dev/null +++ b/cpp/src/plasma/fling.h @@ -0,0 +1,52 @@ +// Copyright 2013 Sharvil Nanavati +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// FLING: Exchanging file descriptors over sockets +// +// This is a little library for sending file descriptors over a socket +// between processes. The reason for doing that (as opposed to using +// filenames to share the files) is so (a) no files remain in the +// filesystem after all the processes terminate, (b) to make sure that +// there are no name collisions and (c) to be able to control who has +// access to the data. +// +// Most of the code is from https://github.com/sharvil/flingfd + +#include +#include +#include +#include +#include + +// This is neccessary for Mac OS X, see http://www.apuebook.com/faqs2e.html +// (10). +#if !defined(CMSG_SPACE) && !defined(CMSG_LEN) +#define CMSG_SPACE(len) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + __DARWIN_ALIGN32(len)) +#define CMSG_LEN(len) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + (len)) +#endif + +void init_msg(struct msghdr* msg, struct iovec* iov, char* buf, size_t buf_len); + +// Send a file descriptor over a unix domain socket. +// +// @param conn Unix domain socket to send the file descriptor over. +// @param fd File descriptor to send over. +// @return Status code which is < 0 on failure. +int send_fd(int conn, int fd); + +// Receive a file descriptor over a unix domain socket. +// +// @param conn Unix domain socket to receive the file descriptor from. +// @return File descriptor or a value < 0 on failure. +int recv_fd(int conn); diff --git a/cpp/src/plasma/format/.gitignore b/cpp/src/plasma/format/.gitignore new file mode 100644 index 0000000000000..b2ddb055dcbc6 --- /dev/null +++ b/cpp/src/plasma/format/.gitignore @@ -0,0 +1 @@ +*_generated.h diff --git a/cpp/src/plasma/format/common.fbs b/cpp/src/plasma/format/common.fbs new file mode 100644 index 0000000000000..4d7d2852aec3d --- /dev/null +++ b/cpp/src/plasma/format/common.fbs @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Object information data structure. +table ObjectInfo { + // Object ID of this object. + object_id: string; + // Number of bytes the content of this object occupies in memory. + data_size: long; + // Number of bytes the metadata of this object occupies in memory. + metadata_size: long; + // Unix epoch of when this object was created. + create_time: long; + // How long creation of this object took. + construct_duration: long; + // Hash of the object content. + digest: string; + // Specifies if this object was deleted or added. + is_deletion: bool; +} diff --git a/cpp/src/plasma/format/plasma.fbs b/cpp/src/plasma/format/plasma.fbs new file mode 100644 index 0000000000000..23782ade539d4 --- /dev/null +++ b/cpp/src/plasma/format/plasma.fbs @@ -0,0 +1,291 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Plasma protocol specification + +enum MessageType:int { + // Create a new object. + PlasmaCreateRequest = 1, + PlasmaCreateReply, + // Seal an object. + PlasmaSealRequest, + PlasmaSealReply, + // Get an object that is stored on the local Plasma store. + PlasmaGetRequest, + PlasmaGetReply, + // Release an object. + PlasmaReleaseRequest, + PlasmaReleaseReply, + // Delete an object. + PlasmaDeleteRequest, + PlasmaDeleteReply, + // Get status of an object. + PlasmaStatusRequest, + PlasmaStatusReply, + // See if the store contains an object (will be deprecated). + PlasmaContainsRequest, + PlasmaContainsReply, + // Get information for a newly connecting client. + PlasmaConnectRequest, + PlasmaConnectReply, + // Make room for new objects in the plasma store. + PlasmaEvictRequest, + PlasmaEvictReply, + // Fetch objects from remote Plasma stores. + PlasmaFetchRequest, + // Wait for objects to be ready either from local or remote Plasma stores. + PlasmaWaitRequest, + PlasmaWaitReply, + // Subscribe to a list of objects or to all objects. + PlasmaSubscribeRequest, + // Unsubscribe. + PlasmaUnsubscribeRequest, + // Sending and receiving data. + // PlasmaDataRequest initiates sending the data, there will be one + // such message per data transfer. + PlasmaDataRequest, + // PlasmaDataReply contains the actual data and is sent back to the + // object store that requested the data. For each transfer, multiple + // reply messages get sent. Each one contains a fixed number of bytes. + PlasmaDataReply, + // Object notifications. + PlasmaNotification +} + +enum PlasmaError:int { + // Operation was successful. + OK, + // Trying to create an object that already exists. + ObjectExists, + // Trying to access an object that doesn't exist. + ObjectNonexistent, + // Trying to create an object but there isn't enough space in the store. + OutOfMemory +} + +// Plasma store messages + +struct PlasmaObjectSpec { + // Index of the memory segment (= memory mapped file) that + // this object is allocated in. + segment_index: int; + // Size in bytes of this segment (needed to call mmap). + mmap_size: ulong; + // The offset in bytes in the memory mapped file of the data. + data_offset: ulong; + // The size in bytes of the data. + data_size: ulong; + // The offset in bytes in the memory mapped file of the metadata. + metadata_offset: ulong; + // The size in bytes of the metadata. + metadata_size: ulong; +} + +table PlasmaCreateRequest { + // ID of the object to be created. + object_id: string; + // The size of the object's data in bytes. + data_size: ulong; + // The size of the object's metadata in bytes. + metadata_size: ulong; +} + +table PlasmaCreateReply { + // ID of the object that was created. + object_id: string; + // The object that is returned with this reply. + plasma_object: PlasmaObjectSpec; + // Error that occurred for this call. + error: PlasmaError; +} + +table PlasmaSealRequest { + // ID of the object to be sealed. + object_id: string; + // Hash of the object data. + digest: string; +} + +table PlasmaSealReply { + // ID of the object that was sealed. + object_id: string; + // Error code. + error: PlasmaError; +} + +table PlasmaGetRequest { + // IDs of the objects stored at local Plasma store we are getting. + object_ids: [string]; + // The number of milliseconds before the request should timeout. + timeout_ms: long; +} + +table PlasmaGetReply { + // IDs of the objects being returned. + // This number can be smaller than the number of requested + // objects if not all requested objects are stored and sealed + // in the local Plasma store. + object_ids: [string]; + // Plasma object information, in the same order as their IDs. + plasma_objects: [PlasmaObjectSpec]; + // The number of elements in both object_ids and plasma_objects arrays must agree. +} + +table PlasmaReleaseRequest { + // ID of the object to be released. + object_id: string; +} + +table PlasmaReleaseReply { + // ID of the object that was released. + object_id: string; + // Error code. + error: PlasmaError; +} + +table PlasmaDeleteRequest { + // ID of the object to be deleted. + object_id: string; +} + +table PlasmaDeleteReply { + // ID of the object that was deleted. + object_id: string; + // Error code. + error: PlasmaError; +} + +table PlasmaStatusRequest { + // IDs of the objects stored at local Plasma store we request the status of. + object_ids: [string]; +} + +enum ObjectStatus:int { + // Object is stored in the local Plasma Store. + Local = 1, + // Object is stored on a remote Plasma store, and it is not stored on the + // local Plasma Store. + Remote, + // Object is not stored in the system. + Nonexistent, + // Object is currently transferred from a remote Plasma store the the local + // Plasma Store. + Transfer +} + +table PlasmaStatusReply { + // IDs of the objects being returned. + object_ids: [string]; + // Status of the object. + status: [ObjectStatus]; +} + +// PlasmaContains is a subset of PlasmaStatus which does not +// involve the plasma manager, only the store. We should consider +// unifying them in the future and deprecating PlasmaContains. + +table PlasmaContainsRequest { + // ID of the object we are querying. + object_id: string; +} + +table PlasmaContainsReply { + // ID of the object we are querying. + object_id: string; + // 1 if the object is in the store and 0 otherwise. + has_object: int; +} + +// PlasmaConnect is used by a plasma client the first time it connects with the +// store. This is not really necessary, but is used to get some information +// about the store such as its memory capacity. + +table PlasmaConnectRequest { +} + +table PlasmaConnectReply { + // The memory capacity of the store. + memory_capacity: long; +} + +table PlasmaEvictRequest { + // Number of bytes that shall be freed. + num_bytes: ulong; +} + +table PlasmaEvictReply { + // Number of bytes that have been freed. + num_bytes: ulong; +} + +table PlasmaFetchRequest { + // IDs of objects to be gotten. + object_ids: [string]; +} + +table ObjectRequestSpec { + // ID of the object. + object_id: string; + // The type of the object. This specifies whether we + // will be waiting for an object store in the local or + // global Plasma store. + type: int; +} + +table PlasmaWaitRequest { + // Array of object requests whose status we are asking for. + object_requests: [ObjectRequestSpec]; + // Number of objects expected to be returned, if available. + num_ready_objects: int; + // timeout + timeout: long; +} + +table ObjectReply { + // ID of the object. + object_id: string; + // The object status. This specifies where the object is stored. + status: int; +} + +table PlasmaWaitReply { + // Array of object requests being returned. + object_requests: [ObjectReply]; + // Number of objects expected to be returned, if available. + num_ready_objects: int; +} + +table PlasmaSubscribeRequest { +} + +table PlasmaDataRequest { + // ID of the object that is requested. + object_id: string; + // The host address where the data shall be sent to. + address: string; + // The port of the manager the data shall be sent to. + port: int; +} + +table PlasmaDataReply { + // ID of the object that will be sent. + object_id: string; + // Size of the object data in bytes. + object_size: ulong; + // Size of the metadata in bytes. + metadata_size: ulong; +} diff --git a/cpp/src/plasma/io.cc b/cpp/src/plasma/io.cc new file mode 100644 index 0000000000000..5875ebb7ae611 --- /dev/null +++ b/cpp/src/plasma/io.cc @@ -0,0 +1,212 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/io.h" + +#include "plasma/common.h" + +using arrow::Status; + +/* Number of times we try binding to a socket. */ +#define NUM_BIND_ATTEMPTS 5 +#define BIND_TIMEOUT_MS 100 + +/* Number of times we try connecting to a socket. */ +#define NUM_CONNECT_ATTEMPTS 50 +#define CONNECT_TIMEOUT_MS 100 + +Status WriteBytes(int fd, uint8_t* cursor, size_t length) { + ssize_t nbytes = 0; + size_t bytesleft = length; + size_t offset = 0; + while (bytesleft > 0) { + /* While we haven't written the whole message, write to the file descriptor, + * advance the cursor, and decrease the amount left to write. */ + nbytes = write(fd, cursor + offset, bytesleft); + if (nbytes < 0) { + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; } + return Status::IOError(std::string(strerror(errno))); + } else if (nbytes == 0) { + return Status::IOError("Encountered unexpected EOF"); + } + ARROW_CHECK(nbytes > 0); + bytesleft -= nbytes; + offset += nbytes; + } + + return Status::OK(); +} + +Status WriteMessage(int fd, int64_t type, int64_t length, uint8_t* bytes) { + int64_t version = PLASMA_PROTOCOL_VERSION; + RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast(&version), sizeof(version))); + RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast(&type), sizeof(type))); + RETURN_NOT_OK(WriteBytes(fd, reinterpret_cast(&length), sizeof(length))); + return WriteBytes(fd, bytes, length * sizeof(char)); +} + +Status ReadBytes(int fd, uint8_t* cursor, size_t length) { + ssize_t nbytes = 0; + /* Termination condition: EOF or read 'length' bytes total. */ + size_t bytesleft = length; + size_t offset = 0; + while (bytesleft > 0) { + nbytes = read(fd, cursor + offset, bytesleft); + if (nbytes < 0) { + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; } + return Status::IOError(std::string(strerror(errno))); + } else if (0 == nbytes) { + return Status::IOError("Encountered unexpected EOF"); + } + ARROW_CHECK(nbytes > 0); + bytesleft -= nbytes; + offset += nbytes; + } + + return Status::OK(); +} + +Status ReadMessage(int fd, int64_t* type, std::vector* buffer) { + int64_t version; + RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(&version), sizeof(version)), + *type = DISCONNECT_CLIENT); + ARROW_CHECK(version == PLASMA_PROTOCOL_VERSION) << "version = " << version; + size_t length; + RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(type), sizeof(*type)), + *type = DISCONNECT_CLIENT); + RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(&length), sizeof(length)), + *type = DISCONNECT_CLIENT); + if (length > buffer->size()) { buffer->resize(length); } + RETURN_NOT_OK_ELSE(ReadBytes(fd, buffer->data(), length), *type = DISCONNECT_CLIENT); + return Status::OK(); +} + +int bind_ipc_sock(const std::string& pathname, bool shall_listen) { + struct sockaddr_un socket_address; + int socket_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (socket_fd < 0) { + ARROW_LOG(ERROR) << "socket() failed for pathname " << pathname; + return -1; + } + /* Tell the system to allow the port to be reused. */ + int on = 1; + if (setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&on), + sizeof(on)) < 0) { + ARROW_LOG(ERROR) << "setsockopt failed for pathname " << pathname; + close(socket_fd); + return -1; + } + + unlink(pathname.c_str()); + memset(&socket_address, 0, sizeof(socket_address)); + socket_address.sun_family = AF_UNIX; + if (pathname.size() + 1 > sizeof(socket_address.sun_path)) { + ARROW_LOG(ERROR) << "Socket pathname is too long."; + close(socket_fd); + return -1; + } + strncpy(socket_address.sun_path, pathname.c_str(), pathname.size() + 1); + + if (bind(socket_fd, (struct sockaddr*)&socket_address, sizeof(socket_address)) != 0) { + ARROW_LOG(ERROR) << "Bind failed for pathname " << pathname; + close(socket_fd); + return -1; + } + if (shall_listen && listen(socket_fd, 128) == -1) { + ARROW_LOG(ERROR) << "Could not listen to socket " << pathname; + close(socket_fd); + return -1; + } + return socket_fd; +} + +int connect_ipc_sock_retry( + const std::string& pathname, int num_retries, int64_t timeout) { + /* Pick the default values if the user did not specify. */ + if (num_retries < 0) { num_retries = NUM_CONNECT_ATTEMPTS; } + if (timeout < 0) { timeout = CONNECT_TIMEOUT_MS; } + + int fd = -1; + for (int num_attempts = 0; num_attempts < num_retries; ++num_attempts) { + fd = connect_ipc_sock(pathname); + if (fd >= 0) { break; } + if (num_attempts == 0) { + ARROW_LOG(ERROR) << "Connection to socket failed for pathname " << pathname; + } + /* Sleep for timeout milliseconds. */ + usleep(static_cast(timeout * 1000)); + } + /* If we could not connect to the socket, exit. */ + if (fd == -1) { ARROW_LOG(FATAL) << "Could not connect to socket " << pathname; } + return fd; +} + +int connect_ipc_sock(const std::string& pathname) { + struct sockaddr_un socket_address; + int socket_fd; + + socket_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (socket_fd < 0) { + ARROW_LOG(ERROR) << "socket() failed for pathname " << pathname; + return -1; + } + + memset(&socket_address, 0, sizeof(socket_address)); + socket_address.sun_family = AF_UNIX; + if (pathname.size() + 1 > sizeof(socket_address.sun_path)) { + ARROW_LOG(ERROR) << "Socket pathname is too long."; + return -1; + } + strncpy(socket_address.sun_path, pathname.c_str(), pathname.size() + 1); + + if (connect(socket_fd, (struct sockaddr*)&socket_address, sizeof(socket_address)) != + 0) { + close(socket_fd); + return -1; + } + + return socket_fd; +} + +int AcceptClient(int socket_fd) { + int client_fd = accept(socket_fd, NULL, NULL); + if (client_fd < 0) { + ARROW_LOG(ERROR) << "Error reading from socket."; + return -1; + } + return client_fd; +} + +uint8_t* read_message_async(int sock) { + int64_t size; + Status s = ReadBytes(sock, reinterpret_cast(&size), sizeof(int64_t)); + if (!s.ok()) { + /* The other side has closed the socket. */ + ARROW_LOG(DEBUG) << "Socket has been closed, or some other error has occurred."; + close(sock); + return NULL; + } + uint8_t* message = reinterpret_cast(malloc(size)); + s = ReadBytes(sock, message, size); + if (!s.ok()) { + /* The other side has closed the socket. */ + ARROW_LOG(DEBUG) << "Socket has been closed, or some other error has occurred."; + close(sock); + return NULL; + } + return message; +} diff --git a/cpp/src/plasma/io.h b/cpp/src/plasma/io.h new file mode 100644 index 0000000000000..43c3fb535497f --- /dev/null +++ b/cpp/src/plasma/io.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_IO_H +#define PLASMA_IO_H + +#include +#include +#include +#include + +#include +#include + +#include "arrow/status.h" + +// TODO(pcm): Replace our own custom message header (message type, +// message length, plasma protocol verion) with one that is serialized +// using flatbuffers. +#define PLASMA_PROTOCOL_VERSION 0x0000000000000000 +#define DISCONNECT_CLIENT 0 + +arrow::Status WriteBytes(int fd, uint8_t* cursor, size_t length); + +arrow::Status WriteMessage(int fd, int64_t type, int64_t length, uint8_t* bytes); + +arrow::Status ReadBytes(int fd, uint8_t* cursor, size_t length); + +arrow::Status ReadMessage(int fd, int64_t* type, std::vector* buffer); + +int bind_ipc_sock(const std::string& pathname, bool shall_listen); + +int connect_ipc_sock(const std::string& pathname); + +int connect_ipc_sock_retry(const std::string& pathname, int num_retries, int64_t timeout); + +int AcceptClient(int socket_fd); + +uint8_t* read_message_async(int sock); + +#endif // PLASMA_IO_H diff --git a/cpp/src/plasma/malloc.cc b/cpp/src/plasma/malloc.cc new file mode 100644 index 0000000000000..97c9a16c0c0bd --- /dev/null +++ b/cpp/src/plasma/malloc.cc @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/malloc.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "plasma/common.h" + +extern "C" { +void* fake_mmap(size_t); +int fake_munmap(void*, int64_t); + +#define MMAP(s) fake_mmap(s) +#define MUNMAP(a, s) fake_munmap(a, s) +#define DIRECT_MMAP(s) fake_mmap(s) +#define DIRECT_MUNMAP(a, s) fake_munmap(a, s) +#define USE_DL_PREFIX +#define HAVE_MORECORE 0 +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#define DEFAULT_GRANULARITY ((size_t)128U * 1024U) + +#include "thirdparty/dlmalloc.c" // NOLINT + +#undef MMAP +#undef MUNMAP +#undef DIRECT_MMAP +#undef DIRECT_MUNMAP +#undef USE_DL_PREFIX +#undef HAVE_MORECORE +#undef DEFAULT_GRANULARITY +} + +struct mmap_record { + int fd; + int64_t size; +}; + +namespace { + +/** Hashtable that contains one entry per segment that we got from the OS + * via mmap. Associates the address of that segment with its file descriptor + * and size. */ +std::unordered_map mmap_records; + +} /* namespace */ + +constexpr int GRANULARITY_MULTIPLIER = 2; + +static void* pointer_advance(void* p, ptrdiff_t n) { + return (unsigned char*)p + n; +} + +static void* pointer_retreat(void* p, ptrdiff_t n) { + return (unsigned char*)p - n; +} + +static ptrdiff_t pointer_distance(void const* pfrom, void const* pto) { + return (unsigned char const*)pto - (unsigned char const*)pfrom; +} + +/* Create a buffer. This is creating a temporary file and then + * immediately unlinking it so we do not leave traces in the system. */ +int create_buffer(int64_t size) { + int fd; +#ifdef _WIN32 + if (!CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, + (DWORD)((uint64_t)size >> (CHAR_BIT * sizeof(DWORD))), (DWORD)(uint64_t)size, + NULL)) { + fd = -1; + } +#else +#ifdef __linux__ + constexpr char file_template[] = "/dev/shm/plasmaXXXXXX"; +#else + constexpr char file_template[] = "/tmp/plasmaXXXXXX"; +#endif + char file_name[32]; + strncpy(file_name, file_template, 32); + fd = mkstemp(file_name); + if (fd < 0) return -1; + FILE* file = fdopen(fd, "a+"); + if (!file) { + close(fd); + return -1; + } + if (unlink(file_name) != 0) { + ARROW_LOG(FATAL) << "unlink error"; + return -1; + } + if (ftruncate(fd, (off_t)size) != 0) { + ARROW_LOG(FATAL) << "ftruncate error"; + return -1; + } +#endif + return fd; +} + +void* fake_mmap(size_t size) { + /* Add sizeof(size_t) so that the returned pointer is deliberately not + * page-aligned. This ensures that the segments of memory returned by + * fake_mmap are never contiguous. */ + size += sizeof(size_t); + + int fd = create_buffer(size); + ARROW_CHECK(fd >= 0) << "Failed to create buffer during mmap"; + void* pointer = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (pointer == MAP_FAILED) { return pointer; } + + /* Increase dlmalloc's allocation granularity directly. */ + mparams.granularity *= GRANULARITY_MULTIPLIER; + + mmap_record& record = mmap_records[pointer]; + record.fd = fd; + record.size = size; + + /* We lie to dlmalloc about where mapped memory actually lives. */ + pointer = pointer_advance(pointer, sizeof(size_t)); + ARROW_LOG(DEBUG) << pointer << " = fake_mmap(" << size << ")"; + return pointer; +} + +int fake_munmap(void* addr, int64_t size) { + ARROW_LOG(DEBUG) << "fake_munmap(" << addr << ", " << size << ")"; + addr = pointer_retreat(addr, sizeof(size_t)); + size += sizeof(size_t); + + auto entry = mmap_records.find(addr); + + if (entry == mmap_records.end() || entry->second.size != size) { + /* Reject requests to munmap that don't directly match previous + * calls to mmap, to prevent dlmalloc from trimming. */ + return -1; + } + + int r = munmap(addr, size); + if (r == 0) { close(entry->second.fd); } + + mmap_records.erase(entry); + return r; +} + +void get_malloc_mapinfo(void* addr, int* fd, int64_t* map_size, ptrdiff_t* offset) { + /* TODO(rshin): Implement a more efficient search through mmap_records. */ + for (const auto& entry : mmap_records) { + if (addr >= entry.first && addr < pointer_advance(entry.first, entry.second.size)) { + *fd = entry.second.fd; + *map_size = entry.second.size; + *offset = pointer_distance(entry.first, addr); + return; + } + } + *fd = -1; + *map_size = 0; + *offset = 0; +} diff --git a/cpp/src/plasma/malloc.h b/cpp/src/plasma/malloc.h new file mode 100644 index 0000000000000..b4af2c826b5c9 --- /dev/null +++ b/cpp/src/plasma/malloc.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_MALLOC_H +#define PLASMA_MALLOC_H + +#include +#include + +void get_malloc_mapinfo(void* addr, int* fd, int64_t* map_length, ptrdiff_t* offset); + +#endif // MALLOC_H diff --git a/cpp/src/plasma/plasma.cc b/cpp/src/plasma/plasma.cc new file mode 100644 index 0000000000000..559d8e7f2a65e --- /dev/null +++ b/cpp/src/plasma/plasma.cc @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/plasma.h" + +#include +#include +#include + +#include "plasma/common.h" +#include "plasma/protocol.h" + +int warn_if_sigpipe(int status, int client_sock) { + if (status >= 0) { return 0; } + if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) { + ARROW_LOG(WARNING) << "Received SIGPIPE, BAD FILE DESCRIPTOR, or ECONNRESET when " + "sending a message to client on fd " + << client_sock << ". The client on the other end may " + "have hung up."; + return errno; + } + ARROW_LOG(FATAL) << "Failed to write message to client on fd " << client_sock << "."; + return -1; // This is never reached. +} + +/** + * This will create a new ObjectInfo buffer. The first sizeof(int64_t) bytes + * of this buffer are the length of the remaining message and the + * remaining message is a serialized version of the object info. + * + * @param object_info The object info to be serialized + * @return The object info buffer. It is the caller's responsibility to free + * this buffer with "delete" after it has been used. + */ +uint8_t* create_object_info_buffer(ObjectInfoT* object_info) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreateObjectInfo(fbb, object_info); + fbb.Finish(message); + uint8_t* notification = new uint8_t[sizeof(int64_t) + fbb.GetSize()]; + *(reinterpret_cast(notification)) = fbb.GetSize(); + memcpy(notification + sizeof(int64_t), fbb.GetBufferPointer(), fbb.GetSize()); + return notification; +} + +ObjectTableEntry* get_object_table_entry( + PlasmaStoreInfo* store_info, const ObjectID& object_id) { + auto it = store_info->objects.find(object_id); + if (it == store_info->objects.end()) { return NULL; } + return it->second.get(); +} diff --git a/cpp/src/plasma/plasma.h b/cpp/src/plasma/plasma.h new file mode 100644 index 0000000000000..275d0c7a41687 --- /dev/null +++ b/cpp/src/plasma/plasma.h @@ -0,0 +1,191 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_PLASMA_H +#define PLASMA_PLASMA_H + +#include +#include +#include +#include +#include +#include +#include +#include // pid_t + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/logging.h" +#include "format/common_generated.h" +#include "plasma/common.h" + +#define HANDLE_SIGPIPE(s, fd_) \ + do { \ + Status _s = (s); \ + if (!_s.ok()) { \ + if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) { \ + ARROW_LOG(WARNING) \ + << "Received SIGPIPE, BAD FILE DESCRIPTOR, or ECONNRESET when " \ + "sending a message to client on fd " \ + << fd_ << ". " \ + "The client on the other end may have hung up."; \ + } else { \ + return _s; \ + } \ + } \ + } while (0); + +/// Allocation granularity used in plasma for object allocation. +#define BLOCK_SIZE 64 + +/// Size of object hash digests. +constexpr int64_t kDigestSize = sizeof(uint64_t); + +struct Client; + +/// Object request data structure. Used in the plasma_wait_for_objects() +/// argument. +typedef struct { + /// The ID of the requested object. If ID_NIL request any object. + ObjectID object_id; + /// Request associated to the object. It can take one of the following values: + /// - PLASMA_QUERY_LOCAL: return if or when the object is available in the + /// local Plasma Store. + /// - PLASMA_QUERY_ANYWHERE: return if or when the object is available in + /// the system (i.e., either in the local or a remote Plasma Store). + int type; + /// Object status. Same as the status returned by plasma_status() function + /// call. This is filled in by plasma_wait_for_objects1(): + /// - ObjectStatus_Local: object is ready at the local Plasma Store. + /// - ObjectStatus_Remote: object is ready at a remote Plasma Store. + /// - ObjectStatus_Nonexistent: object does not exist in the system. + /// - PLASMA_CLIENT_IN_TRANSFER, if the object is currently being scheduled + /// for being transferred or it is transferring. + int status; +} ObjectRequest; + +/// Mapping from object IDs to type and status of the request. +typedef std::unordered_map ObjectRequestMap; + +/// Handle to access memory mapped file and map it into client address space. +typedef struct { + /// The file descriptor of the memory mapped file in the store. It is used as + /// a unique identifier of the file in the client to look up the corresponding + /// file descriptor on the client's side. + int store_fd; + /// The size in bytes of the memory mapped file. + int64_t mmap_size; +} object_handle; + +// TODO(pcm): Replace this by the flatbuffers message PlasmaObjectSpec. +typedef struct { + /// Handle for memory mapped file the object is stored in. + object_handle handle; + /// The offset in bytes in the memory mapped file of the data. + ptrdiff_t data_offset; + /// The offset in bytes in the memory mapped file of the metadata. + ptrdiff_t metadata_offset; + /// The size in bytes of the data. + int64_t data_size; + /// The size in bytes of the metadata. + int64_t metadata_size; +} PlasmaObject; + +typedef enum { + /// Object was created but not sealed in the local Plasma Store. + PLASMA_CREATED = 1, + /// Object is sealed and stored in the local Plasma Store. + PLASMA_SEALED +} object_state; + +typedef enum { + /// The object was not found. + OBJECT_NOT_FOUND = 0, + /// The object was found. + OBJECT_FOUND = 1 +} object_status; + +typedef enum { + /// Query for object in the local plasma store. + PLASMA_QUERY_LOCAL = 1, + /// Query for object in the local plasma store or in a remote plasma store. + PLASMA_QUERY_ANYWHERE +} object_request_type; + +/// This type is used by the Plasma store. It is here because it is exposed to +/// the eviction policy. +struct ObjectTableEntry { + /// Object id of this object. + ObjectID object_id; + /// Object info like size, creation time and owner. + ObjectInfoT info; + /// Memory mapped file containing the object. + int fd; + /// Size of the underlying map. + int64_t map_size; + /// Offset from the base of the mmap. + ptrdiff_t offset; + /// Pointer to the object data. Needed to free the object. + uint8_t* pointer; + /// Set of clients currently using this object. + std::unordered_set clients; + /// The state of the object, e.g., whether it is open or sealed. + object_state state; + /// The digest of the object. Used to see if two objects are the same. + unsigned char digest[kDigestSize]; +}; + +/// The plasma store information that is exposed to the eviction policy. +struct PlasmaStoreInfo { + /// Objects that are in the Plasma store. + std::unordered_map, UniqueIDHasher> objects; + /// The amount of memory (in bytes) that we allow to be allocated in the + /// store. + int64_t memory_capacity; +}; + +/// Get an entry from the object table and return NULL if the object_id +/// is not present. +/// +/// @param store_info The PlasmaStoreInfo that contains the object table. +/// @param object_id The object_id of the entry we are looking for. +/// @return The entry associated with the object_id or NULL if the object_id +/// is not present. +ObjectTableEntry* get_object_table_entry( + PlasmaStoreInfo* store_info, const ObjectID& object_id); + +/// Print a warning if the status is less than zero. This should be used to check +/// the success of messages sent to plasma clients. We print a warning instead of +/// failing because the plasma clients are allowed to die. This is used to handle +/// situations where the store writes to a client file descriptor, and the client +/// may already have disconnected. If we have processed the disconnection and +/// closed the file descriptor, we should get a BAD FILE DESCRIPTOR error. If we +/// have not, then we should get a SIGPIPE. If we write to a TCP socket that +/// isn't connected yet, then we should get an ECONNRESET. +/// +/// @param status The status to check. If it is less less than zero, we will +/// print a warning. +/// @param client_sock The client socket. This is just used to print some extra +/// information. +/// @return The errno set. +int warn_if_sigpipe(int status, int client_sock); + +uint8_t* create_object_info_buffer(ObjectInfoT* object_info); + +#endif // PLASMA_PLASMA_H diff --git a/cpp/src/plasma/protocol.cc b/cpp/src/plasma/protocol.cc new file mode 100644 index 0000000000000..246aa29736056 --- /dev/null +++ b/cpp/src/plasma/protocol.cc @@ -0,0 +1,502 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "plasma/protocol.h" + +#include "flatbuffers/flatbuffers.h" +#include "format/plasma_generated.h" + +#include "plasma/common.h" +#include "plasma/io.h" + +using flatbuffers::uoffset_t; + +flatbuffers::Offset>> +to_flatbuffer(flatbuffers::FlatBufferBuilder* fbb, const ObjectID* object_ids, + int64_t num_objects) { + std::vector> results; + for (int64_t i = 0; i < num_objects; i++) { + results.push_back(fbb->CreateString(object_ids[i].binary())); + } + return fbb->CreateVector(results); +} + +Status PlasmaReceive(int sock, int64_t message_type, std::vector* buffer) { + int64_t type; + RETURN_NOT_OK(ReadMessage(sock, &type, buffer)); + ARROW_CHECK(type == message_type) << "type = " << type + << ", message_type = " << message_type; + return Status::OK(); +} + +template +Status PlasmaSend(int sock, int64_t message_type, flatbuffers::FlatBufferBuilder* fbb, + const Message& message) { + fbb->Finish(message); + return WriteMessage(sock, message_type, fbb->GetSize(), fbb->GetBufferPointer()); +} + +// Create messages. + +Status SendCreateRequest( + int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaCreateRequest( + fbb, fbb.CreateString(object_id.binary()), data_size, metadata_size); + return PlasmaSend(sock, MessageType_PlasmaCreateRequest, &fbb, message); +} + +Status ReadCreateRequest( + uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *data_size = message->data_size(); + *metadata_size = message->metadata_size(); + *object_id = ObjectID::from_binary(message->object_id()->str()); + return Status::OK(); +} + +Status SendCreateReply( + int sock, ObjectID object_id, PlasmaObject* object, int error_code) { + flatbuffers::FlatBufferBuilder fbb; + PlasmaObjectSpec plasma_object(object->handle.store_fd, object->handle.mmap_size, + object->data_offset, object->data_size, object->metadata_offset, + object->metadata_size); + auto message = CreatePlasmaCreateReply( + fbb, fbb.CreateString(object_id.binary()), &plasma_object, (PlasmaError)error_code); + return PlasmaSend(sock, MessageType_PlasmaCreateReply, &fbb, message); +} + +Status ReadCreateReply(uint8_t* data, ObjectID* object_id, PlasmaObject* object) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + object->handle.store_fd = message->plasma_object()->segment_index(); + object->handle.mmap_size = message->plasma_object()->mmap_size(); + object->data_offset = message->plasma_object()->data_offset(); + object->data_size = message->plasma_object()->data_size(); + object->metadata_offset = message->plasma_object()->metadata_offset(); + object->metadata_size = message->plasma_object()->metadata_size(); + return plasma_error_status(message->error()); +} + +// Seal messages. + +Status SendSealRequest(int sock, ObjectID object_id, unsigned char* digest) { + flatbuffers::FlatBufferBuilder fbb; + auto digest_string = fbb.CreateString(reinterpret_cast(digest), kDigestSize); + auto message = + CreatePlasmaSealRequest(fbb, fbb.CreateString(object_id.binary()), digest_string); + return PlasmaSend(sock, MessageType_PlasmaSealRequest, &fbb, message); +} + +Status ReadSealRequest(uint8_t* data, ObjectID* object_id, unsigned char* digest) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + ARROW_CHECK(message->digest()->size() == kDigestSize); + memcpy(digest, message->digest()->data(), kDigestSize); + return Status::OK(); +} + +Status SendSealReply(int sock, ObjectID object_id, int error) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaSealReply( + fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); + return PlasmaSend(sock, MessageType_PlasmaSealReply, &fbb, message); +} + +Status ReadSealReply(uint8_t* data, ObjectID* object_id) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + return plasma_error_status(message->error()); +} + +// Release messages. + +Status SendReleaseRequest(int sock, ObjectID object_id) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaSealRequest(fbb, fbb.CreateString(object_id.binary())); + return PlasmaSend(sock, MessageType_PlasmaReleaseRequest, &fbb, message); +} + +Status ReadReleaseRequest(uint8_t* data, ObjectID* object_id) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + return Status::OK(); +} + +Status SendReleaseReply(int sock, ObjectID object_id, int error) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaReleaseReply( + fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); + return PlasmaSend(sock, MessageType_PlasmaReleaseReply, &fbb, message); +} + +Status ReadReleaseReply(uint8_t* data, ObjectID* object_id) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + return plasma_error_status(message->error()); +} + +// Delete messages. + +Status SendDeleteRequest(int sock, ObjectID object_id) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaDeleteRequest(fbb, fbb.CreateString(object_id.binary())); + return PlasmaSend(sock, MessageType_PlasmaDeleteRequest, &fbb, message); +} + +Status ReadDeleteRequest(uint8_t* data, ObjectID* object_id) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + return Status::OK(); +} + +Status SendDeleteReply(int sock, ObjectID object_id, int error) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaDeleteReply( + fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); + return PlasmaSend(sock, MessageType_PlasmaDeleteReply, &fbb, message); +} + +Status ReadDeleteReply(uint8_t* data, ObjectID* object_id) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + return plasma_error_status(message->error()); +} + +// Satus messages. + +Status SendStatusRequest(int sock, const ObjectID* object_ids, int64_t num_objects) { + flatbuffers::FlatBufferBuilder fbb; + auto message = + CreatePlasmaStatusRequest(fbb, to_flatbuffer(&fbb, object_ids, num_objects)); + return PlasmaSend(sock, MessageType_PlasmaStatusRequest, &fbb, message); +} + +Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objects) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + for (uoffset_t i = 0; i < num_objects; ++i) { + object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str()); + } + return Status::OK(); +} + +Status SendStatusReply( + int sock, ObjectID object_ids[], int object_status[], int64_t num_objects) { + flatbuffers::FlatBufferBuilder fbb; + auto message = + CreatePlasmaStatusReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects), + fbb.CreateVector(object_status, num_objects)); + return PlasmaSend(sock, MessageType_PlasmaStatusReply, &fbb, message); +} + +int64_t ReadStatusReply_num_objects(uint8_t* data) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + return message->object_ids()->size(); +} + +Status ReadStatusReply( + uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + for (uoffset_t i = 0; i < num_objects; ++i) { + object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str()); + } + for (uoffset_t i = 0; i < num_objects; ++i) { + object_status[i] = message->status()->data()[i]; + } + return Status::OK(); +} + +// Contains messages. + +Status SendContainsRequest(int sock, ObjectID object_id) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaContainsRequest(fbb, fbb.CreateString(object_id.binary())); + return PlasmaSend(sock, MessageType_PlasmaContainsRequest, &fbb, message); +} + +Status ReadContainsRequest(uint8_t* data, ObjectID* object_id) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + return Status::OK(); +} + +Status SendContainsReply(int sock, ObjectID object_id, bool has_object) { + flatbuffers::FlatBufferBuilder fbb; + auto message = + CreatePlasmaContainsReply(fbb, fbb.CreateString(object_id.binary()), has_object); + return PlasmaSend(sock, MessageType_PlasmaContainsReply, &fbb, message); +} + +Status ReadContainsReply(uint8_t* data, ObjectID* object_id, bool* has_object) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + *has_object = message->has_object(); + return Status::OK(); +} + +// Connect messages. + +Status SendConnectRequest(int sock) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaConnectRequest(fbb); + return PlasmaSend(sock, MessageType_PlasmaConnectRequest, &fbb, message); +} + +Status ReadConnectRequest(uint8_t* data) { + return Status::OK(); +} + +Status SendConnectReply(int sock, int64_t memory_capacity) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaConnectReply(fbb, memory_capacity); + return PlasmaSend(sock, MessageType_PlasmaConnectReply, &fbb, message); +} + +Status ReadConnectReply(uint8_t* data, int64_t* memory_capacity) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *memory_capacity = message->memory_capacity(); + return Status::OK(); +} + +// Evict messages. + +Status SendEvictRequest(int sock, int64_t num_bytes) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaEvictRequest(fbb, num_bytes); + return PlasmaSend(sock, MessageType_PlasmaEvictRequest, &fbb, message); +} + +Status ReadEvictRequest(uint8_t* data, int64_t* num_bytes) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *num_bytes = message->num_bytes(); + return Status::OK(); +} + +Status SendEvictReply(int sock, int64_t num_bytes) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaEvictReply(fbb, num_bytes); + return PlasmaSend(sock, MessageType_PlasmaEvictReply, &fbb, message); +} + +Status ReadEvictReply(uint8_t* data, int64_t& num_bytes) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + num_bytes = message->num_bytes(); + return Status::OK(); +} + +// Get messages. + +Status SendGetRequest( + int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaGetRequest( + fbb, to_flatbuffer(&fbb, object_ids, num_objects), timeout_ms); + return PlasmaSend(sock, MessageType_PlasmaGetRequest, &fbb, message); +} + +Status ReadGetRequest( + uint8_t* data, std::vector& object_ids, int64_t* timeout_ms) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + for (uoffset_t i = 0; i < message->object_ids()->size(); ++i) { + auto object_id = message->object_ids()->Get(i)->str(); + object_ids.push_back(ObjectID::from_binary(object_id)); + } + *timeout_ms = message->timeout_ms(); + return Status::OK(); +} + +Status SendGetReply(int sock, ObjectID object_ids[], + std::unordered_map& plasma_objects, + int64_t num_objects) { + flatbuffers::FlatBufferBuilder fbb; + std::vector objects; + + for (int i = 0; i < num_objects; ++i) { + const PlasmaObject& object = plasma_objects[object_ids[i]]; + objects.push_back(PlasmaObjectSpec(object.handle.store_fd, object.handle.mmap_size, + object.data_offset, object.data_size, object.metadata_offset, + object.metadata_size)); + } + auto message = CreatePlasmaGetReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects), + fbb.CreateVectorOfStructs(objects.data(), num_objects)); + return PlasmaSend(sock, MessageType_PlasmaGetReply, &fbb, message); +} + +Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[], + int64_t num_objects) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + for (uoffset_t i = 0; i < num_objects; ++i) { + object_ids[i] = ObjectID::from_binary(message->object_ids()->Get(i)->str()); + } + for (uoffset_t i = 0; i < num_objects; ++i) { + const PlasmaObjectSpec* object = message->plasma_objects()->Get(i); + plasma_objects[i].handle.store_fd = object->segment_index(); + plasma_objects[i].handle.mmap_size = object->mmap_size(); + plasma_objects[i].data_offset = object->data_offset(); + plasma_objects[i].data_size = object->data_size(); + plasma_objects[i].metadata_offset = object->metadata_offset(); + plasma_objects[i].metadata_size = object->metadata_size(); + } + return Status::OK(); +} + +// Fetch messages. + +Status SendFetchRequest(int sock, const ObjectID* object_ids, int64_t num_objects) { + flatbuffers::FlatBufferBuilder fbb; + auto message = + CreatePlasmaFetchRequest(fbb, to_flatbuffer(&fbb, object_ids, num_objects)); + return PlasmaSend(sock, MessageType_PlasmaFetchRequest, &fbb, message); +} + +Status ReadFetchRequest(uint8_t* data, std::vector& object_ids) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + for (uoffset_t i = 0; i < message->object_ids()->size(); ++i) { + object_ids.push_back(ObjectID::from_binary(message->object_ids()->Get(i)->str())); + } + return Status::OK(); +} + +// Wait messages. + +Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests, + int num_ready_objects, int64_t timeout_ms) { + flatbuffers::FlatBufferBuilder fbb; + + std::vector> object_request_specs; + for (int i = 0; i < num_requests; i++) { + object_request_specs.push_back(CreateObjectRequestSpec(fbb, + fbb.CreateString(object_requests[i].object_id.binary()), + object_requests[i].type)); + } + + auto message = CreatePlasmaWaitRequest( + fbb, fbb.CreateVector(object_request_specs), num_ready_objects, timeout_ms); + return PlasmaSend(sock, MessageType_PlasmaWaitRequest, &fbb, message); +} + +Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests, + int64_t* timeout_ms, int* num_ready_objects) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *num_ready_objects = message->num_ready_objects(); + *timeout_ms = message->timeout(); + + for (uoffset_t i = 0; i < message->object_requests()->size(); i++) { + ObjectID object_id = + ObjectID::from_binary(message->object_requests()->Get(i)->object_id()->str()); + ObjectRequest object_request({object_id, message->object_requests()->Get(i)->type(), + ObjectStatus_Nonexistent}); + object_requests[object_id] = object_request; + } + return Status::OK(); +} + +Status SendWaitReply( + int sock, const ObjectRequestMap& object_requests, int num_ready_objects) { + flatbuffers::FlatBufferBuilder fbb; + + std::vector> object_replies; + for (const auto& entry : object_requests) { + const auto& object_request = entry.second; + object_replies.push_back(CreateObjectReply( + fbb, fbb.CreateString(object_request.object_id.binary()), object_request.status)); + } + + auto message = CreatePlasmaWaitReply( + fbb, fbb.CreateVector(object_replies.data(), num_ready_objects), num_ready_objects); + return PlasmaSend(sock, MessageType_PlasmaWaitReply, &fbb, message); +} + +Status ReadWaitReply( + uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects) { + DCHECK(data); + + auto message = flatbuffers::GetRoot(data); + *num_ready_objects = message->num_ready_objects(); + for (int i = 0; i < *num_ready_objects; i++) { + object_requests[i].object_id = + ObjectID::from_binary(message->object_requests()->Get(i)->object_id()->str()); + object_requests[i].status = message->object_requests()->Get(i)->status(); + } + return Status::OK(); +} + +// Subscribe messages. + +Status SendSubscribeRequest(int sock) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaSubscribeRequest(fbb); + return PlasmaSend(sock, MessageType_PlasmaSubscribeRequest, &fbb, message); +} + +// Data messages. + +Status SendDataRequest(int sock, ObjectID object_id, const char* address, int port) { + flatbuffers::FlatBufferBuilder fbb; + auto addr = fbb.CreateString(address, strlen(address)); + auto message = + CreatePlasmaDataRequest(fbb, fbb.CreateString(object_id.binary()), addr, port); + return PlasmaSend(sock, MessageType_PlasmaDataRequest, &fbb, message); +} + +Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* port) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + DCHECK(message->object_id()->size() == sizeof(ObjectID)); + *object_id = ObjectID::from_binary(message->object_id()->str()); + *address = strdup(message->address()->c_str()); + *port = message->port(); + return Status::OK(); +} + +Status SendDataReply( + int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size) { + flatbuffers::FlatBufferBuilder fbb; + auto message = CreatePlasmaDataReply( + fbb, fbb.CreateString(object_id.binary()), object_size, metadata_size); + return PlasmaSend(sock, MessageType_PlasmaDataReply, &fbb, message); +} + +Status ReadDataReply( + uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size) { + DCHECK(data); + auto message = flatbuffers::GetRoot(data); + *object_id = ObjectID::from_binary(message->object_id()->str()); + *object_size = (int64_t)message->object_size(); + *metadata_size = (int64_t)message->metadata_size(); + return Status::OK(); +} diff --git a/cpp/src/plasma/protocol.h b/cpp/src/plasma/protocol.h new file mode 100644 index 0000000000000..5d9d13675144f --- /dev/null +++ b/cpp/src/plasma/protocol.h @@ -0,0 +1,170 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_PROTOCOL_H +#define PLASMA_PROTOCOL_H + +#include + +#include "arrow/status.h" +#include "format/plasma_generated.h" +#include "plasma/plasma.h" + +using arrow::Status; + +/* Plasma receive message. */ + +Status PlasmaReceive(int sock, int64_t message_type, std::vector* buffer); + +/* Plasma Create message functions. */ + +Status SendCreateRequest( + int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size); + +Status ReadCreateRequest( + uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size); + +Status SendCreateReply(int sock, ObjectID object_id, PlasmaObject* object, int error); + +Status ReadCreateReply(uint8_t* data, ObjectID* object_id, PlasmaObject* object); + +/* Plasma Seal message functions. */ + +Status SendSealRequest(int sock, ObjectID object_id, unsigned char* digest); + +Status ReadSealRequest(uint8_t* data, ObjectID* object_id, unsigned char* digest); + +Status SendSealReply(int sock, ObjectID object_id, int error); + +Status ReadSealReply(uint8_t* data, ObjectID* object_id); + +/* Plasma Get message functions. */ + +Status SendGetRequest( + int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms); + +Status ReadGetRequest( + uint8_t* data, std::vector& object_ids, int64_t* timeout_ms); + +Status SendGetReply(int sock, ObjectID object_ids[], + std::unordered_map& plasma_objects, + int64_t num_objects); + +Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[], + int64_t num_objects); + +/* Plasma Release message functions. */ + +Status SendReleaseRequest(int sock, ObjectID object_id); + +Status ReadReleaseRequest(uint8_t* data, ObjectID* object_id); + +Status SendReleaseReply(int sock, ObjectID object_id, int error); + +Status ReadReleaseReply(uint8_t* data, ObjectID* object_id); + +/* Plasma Delete message functions. */ + +Status SendDeleteRequest(int sock, ObjectID object_id); + +Status ReadDeleteRequest(uint8_t* data, ObjectID* object_id); + +Status SendDeleteReply(int sock, ObjectID object_id, int error); + +Status ReadDeleteReply(uint8_t* data, ObjectID* object_id); + +/* Satus messages. */ + +Status SendStatusRequest(int sock, const ObjectID* object_ids, int64_t num_objects); + +Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objects); + +Status SendStatusReply( + int sock, ObjectID object_ids[], int object_status[], int64_t num_objects); + +int64_t ReadStatusReply_num_objects(uint8_t* data); + +Status ReadStatusReply( + uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects); + +/* Plasma Constains message functions. */ + +Status SendContainsRequest(int sock, ObjectID object_id); + +Status ReadContainsRequest(uint8_t* data, ObjectID* object_id); + +Status SendContainsReply(int sock, ObjectID object_id, bool has_object); + +Status ReadContainsReply(uint8_t* data, ObjectID* object_id, bool* has_object); + +/* Plasma Connect message functions. */ + +Status SendConnectRequest(int sock); + +Status ReadConnectRequest(uint8_t* data); + +Status SendConnectReply(int sock, int64_t memory_capacity); + +Status ReadConnectReply(uint8_t* data, int64_t* memory_capacity); + +/* Plasma Evict message functions (no reply so far). */ + +Status SendEvictRequest(int sock, int64_t num_bytes); + +Status ReadEvictRequest(uint8_t* data, int64_t* num_bytes); + +Status SendEvictReply(int sock, int64_t num_bytes); + +Status ReadEvictReply(uint8_t* data, int64_t& num_bytes); + +/* Plasma Fetch Remote message functions. */ + +Status SendFetchRequest(int sock, const ObjectID* object_ids, int64_t num_objects); + +Status ReadFetchRequest(uint8_t* data, std::vector& object_ids); + +/* Plasma Wait message functions. */ + +Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests, + int num_ready_objects, int64_t timeout_ms); + +Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests, + int64_t* timeout_ms, int* num_ready_objects); + +Status SendWaitReply( + int sock, const ObjectRequestMap& object_requests, int num_ready_objects); + +Status ReadWaitReply( + uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects); + +/* Plasma Subscribe message functions. */ + +Status SendSubscribeRequest(int sock); + +/* Data messages. */ + +Status SendDataRequest(int sock, ObjectID object_id, const char* address, int port); + +Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* port); + +Status SendDataReply( + int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size); + +Status ReadDataReply( + uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size); + +#endif /* PLASMA_PROTOCOL */ diff --git a/cpp/src/plasma/store.cc b/cpp/src/plasma/store.cc new file mode 100644 index 0000000000000..9394e3de310b2 --- /dev/null +++ b/cpp/src/plasma/store.cc @@ -0,0 +1,683 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// PLASMA STORE: This is a simple object store server process +// +// It accepts incoming client connections on a unix domain socket +// (name passed in via the -s option of the executable) and uses a +// single thread to serve the clients. Each client establishes a +// connection and can create objects, wait for objects and seal +// objects through that connection. +// +// It keeps a hash table that maps object_ids (which are 20 byte long, +// just enough to store and SHA1 hash) to memory mapped files. + +#include "plasma/store.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "format/common_generated.h" +#include "plasma/common.h" +#include "plasma/fling.h" +#include "plasma/io.h" +#include "plasma/malloc.h" + +extern "C" { +void* dlmalloc(size_t bytes); +void* dlmemalign(size_t alignment, size_t bytes); +void dlfree(void* mem); +size_t dlmalloc_set_footprint_limit(size_t bytes); +} + +struct GetRequest { + GetRequest(Client* client, const std::vector& object_ids); + /// The client that called get. + Client* client; + /// The ID of the timer that will time out and cause this wait to return to + /// the client if it hasn't already returned. + int64_t timer; + /// The object IDs involved in this request. This is used in the reply. + std::vector object_ids; + /// The object information for the objects in this request. This is used in + /// the reply. + std::unordered_map objects; + /// The minimum number of objects to wait for in this request. + int64_t num_objects_to_wait_for; + /// The number of object requests in this wait request that are already + /// satisfied. + int64_t num_satisfied; +}; + +GetRequest::GetRequest(Client* client, const std::vector& object_ids) + : client(client), + timer(-1), + object_ids(object_ids.begin(), object_ids.end()), + objects(object_ids.size()), + num_satisfied(0) { + std::unordered_set unique_ids( + object_ids.begin(), object_ids.end()); + num_objects_to_wait_for = unique_ids.size(); +} + +Client::Client(int fd) : fd(fd) {} + +PlasmaStore::PlasmaStore(EventLoop* loop, int64_t system_memory) + : loop_(loop), eviction_policy_(&store_info_) { + store_info_.memory_capacity = system_memory; +} + +// TODO(pcm): Get rid of this destructor by using RAII to clean up data. +PlasmaStore::~PlasmaStore() { + for (const auto& element : pending_notifications_) { + auto object_notifications = element.second.object_notifications; + for (size_t i = 0; i < object_notifications.size(); ++i) { + uint8_t* notification = reinterpret_cast(object_notifications.at(i)); + uint8_t* data = notification; + // TODO(pcm): Get rid of this delete. + delete[] data; + } + } +} + +// If this client is not already using the object, add the client to the +// object's list of clients, otherwise do nothing. +void PlasmaStore::add_client_to_object_clients(ObjectTableEntry* entry, Client* client) { + // Check if this client is already using the object. + if (entry->clients.find(client) != entry->clients.end()) { return; } + // If there are no other clients using this object, notify the eviction policy + // that the object is being used. + if (entry->clients.size() == 0) { + // Tell the eviction policy that this object is being used. + std::vector objects_to_evict; + eviction_policy_.begin_object_access(entry->object_id, &objects_to_evict); + delete_objects(objects_to_evict); + } + // Add the client pointer to the list of clients using this object. + entry->clients.insert(client); +} + +// Create a new object buffer in the hash table. +int PlasmaStore::create_object(const ObjectID& object_id, int64_t data_size, + int64_t metadata_size, Client* client, PlasmaObject* result) { + ARROW_LOG(DEBUG) << "creating object " << object_id.hex(); + if (store_info_.objects.count(object_id) != 0) { + // There is already an object with the same ID in the Plasma Store, so + // ignore this requst. + return PlasmaError_ObjectExists; + } + // Try to evict objects until there is enough space. + uint8_t* pointer; + do { + // Allocate space for the new object. We use dlmemalign instead of dlmalloc + // in order to align the allocated region to a 64-byte boundary. This is not + // strictly necessary, but it is an optimization that could speed up the + // computation of a hash of the data (see compute_object_hash_parallel in + // plasma_client.cc). Note that even though this pointer is 64-byte aligned, + // it is not guaranteed that the corresponding pointer in the client will be + // 64-byte aligned, but in practice it often will be. + pointer = + reinterpret_cast(dlmemalign(BLOCK_SIZE, data_size + metadata_size)); + if (pointer == NULL) { + // Tell the eviction policy how much space we need to create this object. + std::vector objects_to_evict; + bool success = + eviction_policy_.require_space(data_size + metadata_size, &objects_to_evict); + delete_objects(objects_to_evict); + // Return an error to the client if not enough space could be freed to + // create the object. + if (!success) { return PlasmaError_OutOfMemory; } + } + } while (pointer == NULL); + int fd; + int64_t map_size; + ptrdiff_t offset; + get_malloc_mapinfo(pointer, &fd, &map_size, &offset); + assert(fd != -1); + + auto entry = std::unique_ptr(new ObjectTableEntry()); + entry->object_id = object_id; + entry->info.object_id = object_id.binary(); + entry->info.data_size = data_size; + entry->info.metadata_size = metadata_size; + entry->pointer = pointer; + // TODO(pcm): Set the other fields. + entry->fd = fd; + entry->map_size = map_size; + entry->offset = offset; + entry->state = PLASMA_CREATED; + + store_info_.objects[object_id] = std::move(entry); + result->handle.store_fd = fd; + result->handle.mmap_size = map_size; + result->data_offset = offset; + result->metadata_offset = offset + data_size; + result->data_size = data_size; + result->metadata_size = metadata_size; + // Notify the eviction policy that this object was created. This must be done + // immediately before the call to add_client_to_object_clients so that the + // eviction policy does not have an opportunity to evict the object. + eviction_policy_.object_created(object_id); + // Record that this client is using this object. + add_client_to_object_clients(store_info_.objects[object_id].get(), client); + return PlasmaError_OK; +} + +void PlasmaObject_init(PlasmaObject* object, ObjectTableEntry* entry) { + DCHECK(object != NULL); + DCHECK(entry != NULL); + DCHECK(entry->state == PLASMA_SEALED); + object->handle.store_fd = entry->fd; + object->handle.mmap_size = entry->map_size; + object->data_offset = entry->offset; + object->metadata_offset = entry->offset + entry->info.data_size; + object->data_size = entry->info.data_size; + object->metadata_size = entry->info.metadata_size; +} + +void PlasmaStore::return_from_get(GetRequest* get_req) { + // Send the get reply to the client. + Status s = SendGetReply(get_req->client->fd, &get_req->object_ids[0], get_req->objects, + get_req->object_ids.size()); + warn_if_sigpipe(s.ok() ? 0 : -1, get_req->client->fd); + // If we successfully sent the get reply message to the client, then also send + // the file descriptors. + if (s.ok()) { + // Send all of the file descriptors for the present objects. + for (const auto& object_id : get_req->object_ids) { + PlasmaObject& object = get_req->objects[object_id]; + // We use the data size to indicate whether the object is present or not. + if (object.data_size != -1) { + int error_code = send_fd(get_req->client->fd, object.handle.store_fd); + // If we failed to send the file descriptor, loop until we have sent it + // successfully. TODO(rkn): This is problematic for two reasons. First + // of all, sending the file descriptor should just succeed without any + // errors, but sometimes I see a "Message too long" error number. + // Second, looping like this allows a client to potentially block the + // plasma store event loop which should never happen. + while (error_code < 0) { + if (errno == EMSGSIZE) { + ARROW_LOG(WARNING) << "Failed to send file descriptor, retrying."; + error_code = send_fd(get_req->client->fd, object.handle.store_fd); + continue; + } + warn_if_sigpipe(error_code, get_req->client->fd); + break; + } + } + } + } + + // Remove the get request from each of the relevant object_get_requests hash + // tables if it is present there. It should only be present there if the get + // request timed out. + for (ObjectID& object_id : get_req->object_ids) { + auto& get_requests = object_get_requests_[object_id]; + // Erase get_req from the vector. + auto it = std::find(get_requests.begin(), get_requests.end(), get_req); + if (it != get_requests.end()) { get_requests.erase(it); } + } + // Remove the get request. + if (get_req->timer != -1) { ARROW_CHECK(loop_->remove_timer(get_req->timer) == AE_OK); } + delete get_req; +} + +void PlasmaStore::update_object_get_requests(const ObjectID& object_id) { + std::vector& get_requests = object_get_requests_[object_id]; + size_t index = 0; + size_t num_requests = get_requests.size(); + for (size_t i = 0; i < num_requests; ++i) { + GetRequest* get_req = get_requests[index]; + auto entry = get_object_table_entry(&store_info_, object_id); + ARROW_CHECK(entry != NULL); + + PlasmaObject_init(&get_req->objects[object_id], entry); + get_req->num_satisfied += 1; + // Record the fact that this client will be using this object and will + // be responsible for releasing this object. + add_client_to_object_clients(entry, get_req->client); + + // If this get request is done, reply to the client. + if (get_req->num_satisfied == get_req->num_objects_to_wait_for) { + return_from_get(get_req); + } else { + // The call to return_from_get will remove the current element in the + // array, so we only increment the counter in the else branch. + index += 1; + } + } + + DCHECK(index == get_requests.size()); + // Remove the array of get requests for this object, since no one should be + // waiting for this object anymore. + object_get_requests_.erase(object_id); +} + +void PlasmaStore::process_get_request( + Client* client, const std::vector& object_ids, int64_t timeout_ms) { + // Create a get request for this object. + GetRequest* get_req = new GetRequest(client, object_ids); + + for (auto object_id : object_ids) { + // Check if this object is already present locally. If so, record that the + // object is being used and mark it as accounted for. + auto entry = get_object_table_entry(&store_info_, object_id); + if (entry && entry->state == PLASMA_SEALED) { + // Update the get request to take into account the present object. + PlasmaObject_init(&get_req->objects[object_id], entry); + get_req->num_satisfied += 1; + // If necessary, record that this client is using this object. In the case + // where entry == NULL, this will be called from seal_object. + add_client_to_object_clients(entry, client); + } else { + // Add a placeholder plasma object to the get request to indicate that the + // object is not present. This will be parsed by the client. We set the + // data size to -1 to indicate that the object is not present. + get_req->objects[object_id].data_size = -1; + // Add the get request to the relevant data structures. + object_get_requests_[object_id].push_back(get_req); + } + } + + // If all of the objects are present already or if the timeout is 0, return to + // the client. + if (get_req->num_satisfied == get_req->num_objects_to_wait_for || timeout_ms == 0) { + return_from_get(get_req); + } else if (timeout_ms != -1) { + // Set a timer that will cause the get request to return to the client. Note + // that a timeout of -1 is used to indicate that no timer should be set. + get_req->timer = loop_->add_timer(timeout_ms, [this, get_req](int64_t timer_id) { + return_from_get(get_req); + return kEventLoopTimerDone; + }); + } +} + +int PlasmaStore::remove_client_from_object_clients( + ObjectTableEntry* entry, Client* client) { + auto it = entry->clients.find(client); + if (it != entry->clients.end()) { + entry->clients.erase(it); + // If no more clients are using this object, notify the eviction policy + // that the object is no longer being used. + if (entry->clients.size() == 0) { + // Tell the eviction policy that this object is no longer being used. + std::vector objects_to_evict; + eviction_policy_.end_object_access(entry->object_id, &objects_to_evict); + delete_objects(objects_to_evict); + } + // Return 1 to indicate that the client was removed. + return 1; + } else { + // Return 0 to indicate that the client was not removed. + return 0; + } +} + +void PlasmaStore::release_object(const ObjectID& object_id, Client* client) { + auto entry = get_object_table_entry(&store_info_, object_id); + ARROW_CHECK(entry != NULL); + // Remove the client from the object's array of clients. + ARROW_CHECK(remove_client_from_object_clients(entry, client) == 1); +} + +// Check if an object is present. +int PlasmaStore::contains_object(const ObjectID& object_id) { + auto entry = get_object_table_entry(&store_info_, object_id); + return entry && (entry->state == PLASMA_SEALED) ? OBJECT_FOUND : OBJECT_NOT_FOUND; +} + +// Seal an object that has been created in the hash table. +void PlasmaStore::seal_object(const ObjectID& object_id, unsigned char digest[]) { + ARROW_LOG(DEBUG) << "sealing object " << object_id.hex(); + auto entry = get_object_table_entry(&store_info_, object_id); + ARROW_CHECK(entry != NULL); + ARROW_CHECK(entry->state == PLASMA_CREATED); + // Set the state of object to SEALED. + entry->state = PLASMA_SEALED; + // Set the object digest. + entry->info.digest = std::string(reinterpret_cast(&digest[0]), kDigestSize); + // Inform all subscribers that a new object has been sealed. + push_notification(&entry->info); + + // Update all get requests that involve this object. + update_object_get_requests(object_id); +} + +void PlasmaStore::delete_objects(const std::vector& object_ids) { + for (const auto& object_id : object_ids) { + ARROW_LOG(DEBUG) << "deleting object " << object_id.hex(); + auto entry = get_object_table_entry(&store_info_, object_id); + // TODO(rkn): This should probably not fail, but should instead throw an + // error. Maybe we should also support deleting objects that have been + // created but not sealed. + ARROW_CHECK(entry != NULL) << "To delete an object it must be in the object table."; + ARROW_CHECK(entry->state == PLASMA_SEALED) + << "To delete an object it must have been sealed."; + ARROW_CHECK(entry->clients.size() == 0) + << "To delete an object, there must be no clients currently using it."; + dlfree(entry->pointer); + store_info_.objects.erase(object_id); + // Inform all subscribers that the object has been deleted. + ObjectInfoT notification; + notification.object_id = object_id.binary(); + notification.is_deletion = true; + push_notification(¬ification); + } +} + +void PlasmaStore::connect_client(int listener_sock) { + int client_fd = AcceptClient(listener_sock); + // This is freed in disconnect_client. + Client* client = new Client(client_fd); + // Add a callback to handle events on this socket. + // TODO(pcm): Check return value. + loop_->add_file_event(client_fd, kEventLoopRead, [this, client](int events) { + Status s = process_message(client); + if (!s.ok()) { ARROW_LOG(FATAL) << "Failed to process file event: " << s; } + }); + ARROW_LOG(DEBUG) << "New connection with fd " << client_fd; +} + +void PlasmaStore::disconnect_client(Client* client) { + ARROW_CHECK(client != NULL); + ARROW_CHECK(client->fd > 0); + loop_->remove_file_event(client->fd); + // Close the socket. + close(client->fd); + ARROW_LOG(INFO) << "Disconnecting client on fd " << client->fd; + // If this client was using any objects, remove it from the appropriate + // lists. + for (const auto& entry : store_info_.objects) { + remove_client_from_object_clients(entry.second.get(), client); + } + // Note, the store may still attempt to send a message to the disconnected + // client (for example, when an object ID that the client was waiting for + // is ready). In these cases, the attempt to send the message will fail, but + // the store should just ignore the failure. + delete client; +} + +/// Send notifications about sealed objects to the subscribers. This is called +/// in seal_object. If the socket's send buffer is full, the notification will +/// be +/// buffered, and this will be called again when the send buffer has room. +/// +/// @param client The client to send the notification to. +/// @return Void. +void PlasmaStore::send_notifications(int client_fd) { + auto it = pending_notifications_.find(client_fd); + + int num_processed = 0; + bool closed = false; + // Loop over the array of pending notifications and send as many of them as + // possible. + for (size_t i = 0; i < it->second.object_notifications.size(); ++i) { + uint8_t* notification = + reinterpret_cast(it->second.object_notifications.at(i)); + // Decode the length, which is the first bytes of the message. + int64_t size = *(reinterpret_cast(notification)); + + // Attempt to send a notification about this object ID. + ssize_t nbytes = send(client_fd, notification, sizeof(int64_t) + size, 0); + if (nbytes >= 0) { + ARROW_CHECK(nbytes == static_cast(sizeof(int64_t)) + size); + } else if (nbytes == -1 && + (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { + ARROW_LOG(DEBUG) << "The socket's send buffer is full, so we are caching this " + "notification and will send it later."; + // Add a callback to the event loop to send queued notifications whenever + // there is room in the socket's send buffer. Callbacks can be added + // more than once here and will be overwritten. The callback is removed + // at the end of the method. + // TODO(pcm): Introduce status codes and check in case the file descriptor + // is added twice. + loop_->add_file_event(client_fd, kEventLoopWrite, + [this, client_fd](int events) { send_notifications(client_fd); }); + break; + } else { + ARROW_LOG(WARNING) << "Failed to send notification to client on fd " << client_fd; + if (errno == EPIPE) { + closed = true; + break; + } + } + num_processed += 1; + // The corresponding malloc happened in create_object_info_buffer + // within push_notification. + delete[] notification; + } + // Remove the sent notifications from the array. + it->second.object_notifications.erase(it->second.object_notifications.begin(), + it->second.object_notifications.begin() + num_processed); + + // Stop sending notifications if the pipe was broken. + if (closed) { + close(client_fd); + pending_notifications_.erase(client_fd); + } + + // If we have sent all notifications, remove the fd from the event loop. + if (it->second.object_notifications.empty()) { loop_->remove_file_event(client_fd); } +} + +void PlasmaStore::push_notification(ObjectInfoT* object_info) { + for (auto& element : pending_notifications_) { + uint8_t* notification = create_object_info_buffer(object_info); + element.second.object_notifications.push_back(notification); + send_notifications(element.first); + // The notification gets freed in send_notifications when the notification + // is sent over the socket. + } +} + +// Subscribe to notifications about sealed objects. +void PlasmaStore::subscribe_to_updates(Client* client) { + ARROW_LOG(DEBUG) << "subscribing to updates on fd " << client->fd; + // TODO(rkn): The store could block here if the client doesn't send a file + // descriptor. + int fd = recv_fd(client->fd); + if (fd < 0) { + // This may mean that the client died before sending the file descriptor. + ARROW_LOG(WARNING) << "Failed to receive file descriptor from client on fd " + << client->fd << "."; + return; + } + + // Create a new array to buffer notifications that can't be sent to the + // subscriber yet because the socket send buffer is full. TODO(rkn): the queue + // never gets freed. + // TODO(pcm): Is the following neccessary? + pending_notifications_[fd]; + + // Push notifications to the new subscriber about existing objects. + for (const auto& entry : store_info_.objects) { + push_notification(&entry.second->info); + } + send_notifications(fd); +} + +Status PlasmaStore::process_message(Client* client) { + int64_t type; + Status s = ReadMessage(client->fd, &type, &input_buffer_); + ARROW_CHECK(s.ok() || s.IsIOError()); + + uint8_t* input = input_buffer_.data(); + ObjectID object_id; + PlasmaObject object; + // TODO(pcm): Get rid of the following. + memset(&object, 0, sizeof(object)); + + // Process the different types of requests. + switch (type) { + case MessageType_PlasmaCreateRequest: { + int64_t data_size; + int64_t metadata_size; + RETURN_NOT_OK(ReadCreateRequest(input, &object_id, &data_size, &metadata_size)); + int error_code = + create_object(object_id, data_size, metadata_size, client, &object); + HANDLE_SIGPIPE( + SendCreateReply(client->fd, object_id, &object, error_code), client->fd); + if (error_code == PlasmaError_OK) { + warn_if_sigpipe(send_fd(client->fd, object.handle.store_fd), client->fd); + } + } break; + case MessageType_PlasmaGetRequest: { + std::vector object_ids_to_get; + int64_t timeout_ms; + RETURN_NOT_OK(ReadGetRequest(input, object_ids_to_get, &timeout_ms)); + process_get_request(client, object_ids_to_get, timeout_ms); + } break; + case MessageType_PlasmaReleaseRequest: + RETURN_NOT_OK(ReadReleaseRequest(input, &object_id)); + release_object(object_id, client); + break; + case MessageType_PlasmaContainsRequest: + RETURN_NOT_OK(ReadContainsRequest(input, &object_id)); + if (contains_object(object_id) == OBJECT_FOUND) { + HANDLE_SIGPIPE(SendContainsReply(client->fd, object_id, 1), client->fd); + } else { + HANDLE_SIGPIPE(SendContainsReply(client->fd, object_id, 0), client->fd); + } + break; + case MessageType_PlasmaSealRequest: { + unsigned char digest[kDigestSize]; + RETURN_NOT_OK(ReadSealRequest(input, &object_id, &digest[0])); + seal_object(object_id, &digest[0]); + } break; + case MessageType_PlasmaEvictRequest: { + // This code path should only be used for testing. + int64_t num_bytes; + RETURN_NOT_OK(ReadEvictRequest(input, &num_bytes)); + std::vector objects_to_evict; + int64_t num_bytes_evicted = + eviction_policy_.choose_objects_to_evict(num_bytes, &objects_to_evict); + delete_objects(objects_to_evict); + HANDLE_SIGPIPE(SendEvictReply(client->fd, num_bytes_evicted), client->fd); + } break; + case MessageType_PlasmaSubscribeRequest: + subscribe_to_updates(client); + break; + case MessageType_PlasmaConnectRequest: { + HANDLE_SIGPIPE( + SendConnectReply(client->fd, store_info_.memory_capacity), client->fd); + } break; + case DISCONNECT_CLIENT: + ARROW_LOG(DEBUG) << "Disconnecting client on fd " << client->fd; + disconnect_client(client); + break; + default: + // This code should be unreachable. + ARROW_CHECK(0); + } + return Status::OK(); +} + +// Report "success" to valgrind. +void signal_handler(int signal) { + if (signal == SIGTERM) { exit(0); } +} + +void start_server(char* socket_name, int64_t system_memory) { + // Ignore SIGPIPE signals. If we don't do this, then when we attempt to write + // to a client that has already died, the store could die. + signal(SIGPIPE, SIG_IGN); + // Create the event loop. + EventLoop loop; + PlasmaStore store(&loop, system_memory); + int socket = bind_ipc_sock(socket_name, true); + ARROW_CHECK(socket >= 0); + // TODO(pcm): Check return value. + loop.add_file_event(socket, kEventLoopRead, + [&store, socket](int events) { store.connect_client(socket); }); + loop.run(); +} + +int main(int argc, char* argv[]) { + signal(SIGTERM, signal_handler); + char* socket_name = NULL; + int64_t system_memory = -1; + int c; + while ((c = getopt(argc, argv, "s:m:")) != -1) { + switch (c) { + case 's': + socket_name = optarg; + break; + case 'm': { + char extra; + int scanned = sscanf(optarg, "%" SCNd64 "%c", &system_memory, &extra); + ARROW_CHECK(scanned == 1); + ARROW_LOG(INFO) << "Allowing the Plasma store to use up to " + << static_cast(system_memory) / 1000000000 + << "GB of memory."; + break; + } + default: + exit(-1); + } + } + if (!socket_name) { + ARROW_LOG(FATAL) << "please specify socket for incoming connections with -s switch"; + } + if (system_memory == -1) { + ARROW_LOG(FATAL) << "please specify the amount of system memory with -m switch"; + } +#ifdef __linux__ + // On Linux, check that the amount of memory available in /dev/shm is large + // enough to accommodate the request. If it isn't, then fail. + int shm_fd = open("/dev/shm", O_RDONLY); + struct statvfs shm_vfs_stats; + fstatvfs(shm_fd, &shm_vfs_stats); + // The value shm_vfs_stats.f_bsize is the block size, and the value + // shm_vfs_stats.f_bavail is the number of available blocks. + int64_t shm_mem_avail = shm_vfs_stats.f_bsize * shm_vfs_stats.f_bavail; + close(shm_fd); + if (system_memory > shm_mem_avail) { + ARROW_LOG(FATAL) << "System memory request exceeds memory available in /dev/shm. The " + "request is for " + << system_memory << " bytes, and the amount available is " + << shm_mem_avail + << " bytes. You may be able to free up space by deleting files in " + "/dev/shm. If you are inside a Docker container, you may need to " + "pass " + "an argument with the flag '--shm-size' to 'docker run'."; + } +#endif + // Make it so dlmalloc fails if we try to request more memory than is + // available. + dlmalloc_set_footprint_limit((size_t)system_memory); + ARROW_LOG(DEBUG) << "starting server listening on " << socket_name; + start_server(socket_name, system_memory); +} diff --git a/cpp/src/plasma/store.h b/cpp/src/plasma/store.h new file mode 100644 index 0000000000000..8bd94265410f6 --- /dev/null +++ b/cpp/src/plasma/store.h @@ -0,0 +1,169 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PLASMA_STORE_H +#define PLASMA_STORE_H + +#include +#include + +#include "plasma/common.h" +#include "plasma/events.h" +#include "plasma/eviction_policy.h" +#include "plasma/plasma.h" +#include "plasma/protocol.h" + +struct GetRequest; + +struct NotificationQueue { + /// The object notifications for clients. We notify the client about the + /// objects in the order that the objects were sealed or deleted. + std::deque object_notifications; +}; + +/// Contains all information that is associated with a Plasma store client. +struct Client { + explicit Client(int fd); + + /// The file descriptor used to communicate with the client. + int fd; +}; + +class PlasmaStore { + public: + PlasmaStore(EventLoop* loop, int64_t system_memory); + + ~PlasmaStore(); + + /// Create a new object. The client must do a call to release_object to tell + /// the store when it is done with the object. + /// + /// @param object_id Object ID of the object to be created. + /// @param data_size Size in bytes of the object to be created. + /// @param metadata_size Size in bytes of the object metadata. + /// @return One of the following error codes: + /// - PlasmaError_OK, if the object was created successfully. + /// - PlasmaError_ObjectExists, if an object with this ID is already + /// present in the store. In this case, the client should not call + /// plasma_release. + /// - PlasmaError_OutOfMemory, if the store is out of memory and + /// cannot create the object. In this case, the client should not call + /// plasma_release. + int create_object(const ObjectID& object_id, int64_t data_size, int64_t metadata_size, + Client* client, PlasmaObject* result); + + /// Delete objects that have been created in the hash table. This should only + /// be called on objects that are returned by the eviction policy to evict. + /// + /// @param object_ids Object IDs of the objects to be deleted. + /// @return Void. + void delete_objects(const std::vector& object_ids); + + /// Process a get request from a client. This method assumes that we will + /// eventually have these objects sealed. If one of the objects has not yet + /// been sealed, the client that requested the object will be notified when it + /// is sealed. + /// + /// For each object, the client must do a call to release_object to tell the + /// store when it is done with the object. + /// + /// @param client The client making this request. + /// @param object_ids Object IDs of the objects to be gotten. + /// @param timeout_ms The timeout for the get request in milliseconds. + /// @return Void. + void process_get_request( + Client* client, const std::vector& object_ids, int64_t timeout_ms); + + /// Seal an object. The object is now immutable and can be accessed with get. + /// + /// @param object_id Object ID of the object to be sealed. + /// @param digest The digest of the object. This is used to tell if two + /// objects + /// with the same object ID are the same. + /// @return Void. + void seal_object(const ObjectID& object_id, unsigned char digest[]); + + /// Check if the plasma store contains an object: + /// + /// @param object_id Object ID that will be checked. + /// @return OBJECT_FOUND if the object is in the store, OBJECT_NOT_FOUND if + /// not + int contains_object(const ObjectID& object_id); + + /// Record the fact that a particular client is no longer using an object. + /// + /// @param object_id The object ID of the object that is being released. + /// @param client The client making this request. + /// @param Void. + void release_object(const ObjectID& object_id, Client* client); + + /// Subscribe a file descriptor to updates about new sealed objects. + /// + /// @param client The client making this request. + /// @return Void. + void subscribe_to_updates(Client* client); + + /// Connect a new client to the PlasmaStore. + /// + /// @param listener_sock The socket that is listening to incoming connections. + /// @return Void. + void connect_client(int listener_sock); + + /// Disconnect a client from the PlasmaStore. + /// + /// @param client The client that is disconnected. + /// @return Void. + void disconnect_client(Client* client); + + void send_notifications(int client_fd); + + Status process_message(Client* client); + + private: + void push_notification(ObjectInfoT* object_notification); + + void add_client_to_object_clients(ObjectTableEntry* entry, Client* client); + + void return_from_get(GetRequest* get_req); + + void update_object_get_requests(const ObjectID& object_id); + + int remove_client_from_object_clients(ObjectTableEntry* entry, Client* client); + + /// Event loop of the plasma store. + EventLoop* loop_; + /// The plasma store information, including the object tables, that is exposed + /// to the eviction policy. + PlasmaStoreInfo store_info_; + /// The state that is managed by the eviction policy. + EvictionPolicy eviction_policy_; + /// Input buffer. This is allocated only once to avoid mallocs for every + /// call to process_message. + std::vector input_buffer_; + /// A hash table mapping object IDs to a vector of the get requests that are + /// waiting for the object to arrive. + std::unordered_map, UniqueIDHasher> + object_get_requests_; + /// The pending notifications that have not been sent to subscribers because + /// the socket send buffers were full. This is a hash table from client file + /// descriptor to an array of object_ids to send to that client. + /// TODO(pcm): Consider putting this into the Client data structure and + /// reorganize the code slightly. + std::unordered_map pending_notifications_; +}; + +#endif // PLASMA_STORE_H diff --git a/cpp/src/plasma/test/client_tests.cc b/cpp/src/plasma/test/client_tests.cc new file mode 100644 index 0000000000000..29b5b135144c3 --- /dev/null +++ b/cpp/src/plasma/test/client_tests.cc @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "gtest/gtest.h" + +#include +#include +#include +#include +#include +#include + +#include "plasma/client.h" +#include "plasma/common.h" +#include "plasma/plasma.h" +#include "plasma/protocol.h" + +std::string g_test_executable; // NOLINT + +class TestPlasmaStore : public ::testing::Test { + public: + // TODO(pcm): At the moment, stdout of the test gets mixed up with + // stdout of the object store. Consider changing that. + void SetUp() { + std::string plasma_directory = + g_test_executable.substr(0, g_test_executable.find_last_of("/")); + std::string plasma_command = + plasma_directory + + "/plasma_store -m 1000000000 -s /tmp/store 1> /dev/null 2> /dev/null &"; + system(plasma_command.c_str()); + ARROW_CHECK_OK(client_.Connect("/tmp/store", "", PLASMA_DEFAULT_RELEASE_DELAY)); + } + virtual void Finish() { + ARROW_CHECK_OK(client_.Disconnect()); + system("killall plasma_store &"); + } + + protected: + PlasmaClient client_; +}; + +TEST_F(TestPlasmaStore, ContainsTest) { + ObjectID object_id = ObjectID::from_random(); + + // Test for object non-existence. + bool has_object; + ARROW_CHECK_OK(client_.Contains(object_id, &has_object)); + ASSERT_EQ(has_object, false); + + // Test for the object being in local Plasma store. + // First create object. + int64_t data_size = 100; + uint8_t metadata[] = {5}; + int64_t metadata_size = sizeof(metadata); + uint8_t* data; + ARROW_CHECK_OK(client_.Create(object_id, data_size, metadata, metadata_size, &data)); + ARROW_CHECK_OK(client_.Seal(object_id)); + // Avoid race condition of Plasma Manager waiting for notification. + ObjectBuffer object_buffer; + ARROW_CHECK_OK(client_.Get(&object_id, 1, -1, &object_buffer)); + ARROW_CHECK_OK(client_.Contains(object_id, &has_object)); + ASSERT_EQ(has_object, true); +} + +TEST_F(TestPlasmaStore, GetTest) { + ObjectID object_id = ObjectID::from_random(); + ObjectBuffer object_buffer; + + // Test for object non-existence. + ARROW_CHECK_OK(client_.Get(&object_id, 1, 0, &object_buffer)); + ASSERT_EQ(object_buffer.data_size, -1); + + // Test for the object being in local Plasma store. + // First create object. + int64_t data_size = 4; + uint8_t metadata[] = {5}; + int64_t metadata_size = sizeof(metadata); + uint8_t* data; + ARROW_CHECK_OK(client_.Create(object_id, data_size, metadata, metadata_size, &data)); + for (int64_t i = 0; i < data_size; i++) { + data[i] = static_cast(i % 4); + } + ARROW_CHECK_OK(client_.Seal(object_id)); + + ARROW_CHECK_OK(client_.Get(&object_id, 1, -1, &object_buffer)); + for (int64_t i = 0; i < data_size; i++) { + ASSERT_EQ(data[i], object_buffer.data[i]); + } +} + +TEST_F(TestPlasmaStore, MultipleGetTest) { + ObjectID object_id1 = ObjectID::from_random(); + ObjectID object_id2 = ObjectID::from_random(); + ObjectID object_ids[2] = {object_id1, object_id2}; + ObjectBuffer object_buffer[2]; + + int64_t data_size = 4; + uint8_t metadata[] = {5}; + int64_t metadata_size = sizeof(metadata); + uint8_t* data; + ARROW_CHECK_OK(client_.Create(object_id1, data_size, metadata, metadata_size, &data)); + data[0] = 1; + ARROW_CHECK_OK(client_.Seal(object_id1)); + + ARROW_CHECK_OK(client_.Create(object_id2, data_size, metadata, metadata_size, &data)); + data[0] = 2; + ARROW_CHECK_OK(client_.Seal(object_id2)); + + ARROW_CHECK_OK(client_.Get(object_ids, 2, -1, object_buffer)); + ASSERT_EQ(object_buffer[0].data[0], 1); + ASSERT_EQ(object_buffer[1].data[0], 2); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + g_test_executable = std::string(argv[0]); + return RUN_ALL_TESTS(); +} diff --git a/cpp/src/plasma/test/run_tests.sh b/cpp/src/plasma/test/run_tests.sh new file mode 100644 index 0000000000000..958bd08398e23 --- /dev/null +++ b/cpp/src/plasma/test/run_tests.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Cause the script to exit if a single command fails. +set -e + +./src/plasma/plasma_store -s /tmp/plasma_store_socket_1 -m 0 & +sleep 1 +./src/plasma/manager_tests +killall plasma_store +./src/plasma/serialization_tests + +# Start the Redis shards. +./src/common/thirdparty/redis/src/redis-server --loglevel warning --loadmodule ./src/common/redis_module/libray_redis_module.so --port 6379 & +redis_pid1=$! +./src/common/thirdparty/redis/src/redis-server --loglevel warning --loadmodule ./src/common/redis_module/libray_redis_module.so --port 6380 & +redis_pid2=$! +sleep 1 + +# Flush the redis server +./src/common/thirdparty/redis/src/redis-cli flushall +# Register the shard location with the primary shard. +./src/common/thirdparty/redis/src/redis-cli set NumRedisShards 1 +./src/common/thirdparty/redis/src/redis-cli rpush RedisShards 127.0.0.1:6380 +sleep 1 +./src/plasma/plasma_store -s /tmp/store1 -m 1000000000 & +plasma1_pid=$! +./src/plasma/plasma_manager -m /tmp/manager1 -s /tmp/store1 -h 127.0.0.1 -p 11111 -r 127.0.0.1:6379 & +plasma2_pid=$! +./src/plasma/plasma_store -s /tmp/store2 -m 1000000000 & +plasma3_pid=$! +./src/plasma/plasma_manager -m /tmp/manager2 -s /tmp/store2 -h 127.0.0.1 -p 22222 -r 127.0.0.1:6379 & +plasma4_pid=$! +sleep 1 + +./src/plasma/client_tests + +kill $plasma4_pid +kill $plasma3_pid +kill $plasma2_pid +kill $plasma1_pid +kill $redis_pid1 +wait $redis_pid1 +kill $redis_pid2 +wait $redis_pid2 diff --git a/cpp/src/plasma/test/run_valgrind.sh b/cpp/src/plasma/test/run_valgrind.sh new file mode 100644 index 0000000000000..0472194128679 --- /dev/null +++ b/cpp/src/plasma/test/run_valgrind.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Cause the script to exit if a single command fails. +set -e + +./src/plasma/plasma_store -s /tmp/plasma_store_socket_1 -m 0 & +sleep 1 +valgrind --leak-check=full --error-exitcode=1 ./src/plasma/manager_tests +killall plasma_store +valgrind --leak-check=full --error-exitcode=1 ./src/plasma/serialization_tests diff --git a/cpp/src/plasma/test/serialization_tests.cc b/cpp/src/plasma/test/serialization_tests.cc new file mode 100644 index 0000000000000..325cead06e770 --- /dev/null +++ b/cpp/src/plasma/test/serialization_tests.cc @@ -0,0 +1,388 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "gtest/gtest.h" + +#include +#include + +#include "plasma/common.h" +#include "plasma/io.h" +#include "plasma/plasma.h" +#include "plasma/protocol.h" + +/** + * Create a temporary file. Needs to be closed by the caller. + * + * @return File descriptor of the file. + */ +int create_temp_file(void) { + static char temp[] = "/tmp/tempfileXXXXXX"; + char file_name[32]; + strncpy(file_name, temp, 32); + return mkstemp(file_name); +} + +/** + * Seek to the beginning of a file and read a message from it. + * + * @param fd File descriptor of the file. + * @param message type Message type that we expect in the file. + * + * @return Pointer to the content of the message. Needs to be freed by the + * caller. + */ +std::vector read_message_from_file(int fd, int message_type) { + /* Go to the beginning of the file. */ + lseek(fd, 0, SEEK_SET); + int64_t type; + std::vector data; + ARROW_CHECK_OK(ReadMessage(fd, &type, &data)); + ARROW_CHECK(type == message_type); + return data; +} + +PlasmaObject random_plasma_object(void) { + unsigned int seed = static_cast(time(NULL)); + int random = rand_r(&seed); + PlasmaObject object; + memset(&object, 0, sizeof(object)); + object.handle.store_fd = random + 7; + object.handle.mmap_size = random + 42; + object.data_offset = random + 1; + object.metadata_offset = random + 2; + object.data_size = random + 3; + object.metadata_size = random + 4; + return object; +} + +TEST(PlasmaSerialization, CreateRequest) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + int64_t data_size1 = 42; + int64_t metadata_size1 = 11; + ARROW_CHECK_OK(SendCreateRequest(fd, object_id1, data_size1, metadata_size1)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaCreateRequest); + ObjectID object_id2; + int64_t data_size2; + int64_t metadata_size2; + ARROW_CHECK_OK( + ReadCreateRequest(data.data(), &object_id2, &data_size2, &metadata_size2)); + ASSERT_EQ(data_size1, data_size2); + ASSERT_EQ(metadata_size1, metadata_size2); + ASSERT_EQ(object_id1, object_id2); + close(fd); +} + +TEST(PlasmaSerialization, CreateReply) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + PlasmaObject object1 = random_plasma_object(); + ARROW_CHECK_OK(SendCreateReply(fd, object_id1, &object1, 0)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaCreateReply); + ObjectID object_id2; + PlasmaObject object2; + memset(&object2, 0, sizeof(object2)); + ARROW_CHECK_OK(ReadCreateReply(data.data(), &object_id2, &object2)); + ASSERT_EQ(object_id1, object_id2); + ASSERT_EQ(memcmp(&object1, &object2, sizeof(object1)), 0); + close(fd); +} + +TEST(PlasmaSerialization, SealRequest) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + unsigned char digest1[kDigestSize]; + memset(&digest1[0], 7, kDigestSize); + ARROW_CHECK_OK(SendSealRequest(fd, object_id1, &digest1[0])); + std::vector data = read_message_from_file(fd, MessageType_PlasmaSealRequest); + ObjectID object_id2; + unsigned char digest2[kDigestSize]; + ARROW_CHECK_OK(ReadSealRequest(data.data(), &object_id2, &digest2[0])); + ASSERT_EQ(object_id1, object_id2); + ASSERT_EQ(memcmp(&digest1[0], &digest2[0], kDigestSize), 0); + close(fd); +} + +TEST(PlasmaSerialization, SealReply) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + ARROW_CHECK_OK(SendSealReply(fd, object_id1, PlasmaError_ObjectExists)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaSealReply); + ObjectID object_id2; + Status s = ReadSealReply(data.data(), &object_id2); + ASSERT_EQ(object_id1, object_id2); + ASSERT_TRUE(s.IsPlasmaObjectExists()); + close(fd); +} + +TEST(PlasmaSerialization, GetRequest) { + int fd = create_temp_file(); + ObjectID object_ids[2]; + object_ids[0] = ObjectID::from_random(); + object_ids[1] = ObjectID::from_random(); + int64_t timeout_ms = 1234; + ARROW_CHECK_OK(SendGetRequest(fd, object_ids, 2, timeout_ms)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaGetRequest); + std::vector object_ids_return; + int64_t timeout_ms_return; + ARROW_CHECK_OK(ReadGetRequest(data.data(), object_ids_return, &timeout_ms_return)); + ASSERT_EQ(object_ids[0], object_ids_return[0]); + ASSERT_EQ(object_ids[1], object_ids_return[1]); + ASSERT_EQ(timeout_ms, timeout_ms_return); + close(fd); +} + +TEST(PlasmaSerialization, GetReply) { + int fd = create_temp_file(); + ObjectID object_ids[2]; + object_ids[0] = ObjectID::from_random(); + object_ids[1] = ObjectID::from_random(); + std::unordered_map plasma_objects; + plasma_objects[object_ids[0]] = random_plasma_object(); + plasma_objects[object_ids[1]] = random_plasma_object(); + ARROW_CHECK_OK(SendGetReply(fd, object_ids, plasma_objects, 2)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaGetReply); + ObjectID object_ids_return[2]; + PlasmaObject plasma_objects_return[2]; + memset(&plasma_objects_return, 0, sizeof(plasma_objects_return)); + ARROW_CHECK_OK( + ReadGetReply(data.data(), object_ids_return, &plasma_objects_return[0], 2)); + ASSERT_EQ(object_ids[0], object_ids_return[0]); + ASSERT_EQ(object_ids[1], object_ids_return[1]); + ASSERT_EQ(memcmp(&plasma_objects[object_ids[0]], &plasma_objects_return[0], + sizeof(PlasmaObject)), + 0); + ASSERT_EQ(memcmp(&plasma_objects[object_ids[1]], &plasma_objects_return[1], + sizeof(PlasmaObject)), + 0); + close(fd); +} + +TEST(PlasmaSerialization, ReleaseRequest) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + ARROW_CHECK_OK(SendReleaseRequest(fd, object_id1)); + std::vector data = + read_message_from_file(fd, MessageType_PlasmaReleaseRequest); + ObjectID object_id2; + ARROW_CHECK_OK(ReadReleaseRequest(data.data(), &object_id2)); + ASSERT_EQ(object_id1, object_id2); + close(fd); +} + +TEST(PlasmaSerialization, ReleaseReply) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + ARROW_CHECK_OK(SendReleaseReply(fd, object_id1, PlasmaError_ObjectExists)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaReleaseReply); + ObjectID object_id2; + Status s = ReadReleaseReply(data.data(), &object_id2); + ASSERT_EQ(object_id1, object_id2); + ASSERT_TRUE(s.IsPlasmaObjectExists()); + close(fd); +} + +TEST(PlasmaSerialization, DeleteRequest) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + ARROW_CHECK_OK(SendDeleteRequest(fd, object_id1)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaDeleteRequest); + ObjectID object_id2; + ARROW_CHECK_OK(ReadDeleteRequest(data.data(), &object_id2)); + ASSERT_EQ(object_id1, object_id2); + close(fd); +} + +TEST(PlasmaSerialization, DeleteReply) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + int error1 = PlasmaError_ObjectExists; + ARROW_CHECK_OK(SendDeleteReply(fd, object_id1, error1)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaDeleteReply); + ObjectID object_id2; + Status s = ReadDeleteReply(data.data(), &object_id2); + ASSERT_EQ(object_id1, object_id2); + ASSERT_TRUE(s.IsPlasmaObjectExists()); + close(fd); +} + +TEST(PlasmaSerialization, StatusRequest) { + int fd = create_temp_file(); + int64_t num_objects = 2; + ObjectID object_ids[num_objects]; + object_ids[0] = ObjectID::from_random(); + object_ids[1] = ObjectID::from_random(); + ARROW_CHECK_OK(SendStatusRequest(fd, object_ids, num_objects)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaStatusRequest); + ObjectID object_ids_read[num_objects]; + ARROW_CHECK_OK(ReadStatusRequest(data.data(), object_ids_read, num_objects)); + ASSERT_EQ(object_ids[0], object_ids_read[0]); + ASSERT_EQ(object_ids[1], object_ids_read[1]); + close(fd); +} + +TEST(PlasmaSerialization, StatusReply) { + int fd = create_temp_file(); + ObjectID object_ids[2]; + object_ids[0] = ObjectID::from_random(); + object_ids[1] = ObjectID::from_random(); + int object_statuses[2] = {42, 43}; + ARROW_CHECK_OK(SendStatusReply(fd, object_ids, object_statuses, 2)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaStatusReply); + int64_t num_objects = ReadStatusReply_num_objects(data.data()); + ObjectID object_ids_read[num_objects]; + int object_statuses_read[num_objects]; + ARROW_CHECK_OK( + ReadStatusReply(data.data(), object_ids_read, object_statuses_read, num_objects)); + ASSERT_EQ(object_ids[0], object_ids_read[0]); + ASSERT_EQ(object_ids[1], object_ids_read[1]); + ASSERT_EQ(object_statuses[0], object_statuses_read[0]); + ASSERT_EQ(object_statuses[1], object_statuses_read[1]); + close(fd); +} + +TEST(PlasmaSerialization, EvictRequest) { + int fd = create_temp_file(); + int64_t num_bytes = 111; + ARROW_CHECK_OK(SendEvictRequest(fd, num_bytes)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaEvictRequest); + int64_t num_bytes_received; + ARROW_CHECK_OK(ReadEvictRequest(data.data(), &num_bytes_received)); + ASSERT_EQ(num_bytes, num_bytes_received); + close(fd); +} + +TEST(PlasmaSerialization, EvictReply) { + int fd = create_temp_file(); + int64_t num_bytes = 111; + ARROW_CHECK_OK(SendEvictReply(fd, num_bytes)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaEvictReply); + int64_t num_bytes_received; + ARROW_CHECK_OK(ReadEvictReply(data.data(), num_bytes_received)); + ASSERT_EQ(num_bytes, num_bytes_received); + close(fd); +} + +TEST(PlasmaSerialization, FetchRequest) { + int fd = create_temp_file(); + ObjectID object_ids[2]; + object_ids[0] = ObjectID::from_random(); + object_ids[1] = ObjectID::from_random(); + ARROW_CHECK_OK(SendFetchRequest(fd, object_ids, 2)); + std::vector data = read_message_from_file(fd, MessageType_PlasmaFetchRequest); + std::vector object_ids_read; + ARROW_CHECK_OK(ReadFetchRequest(data.data(), object_ids_read)); + ASSERT_EQ(object_ids[0], object_ids_read[0]); + ASSERT_EQ(object_ids[1], object_ids_read[1]); + close(fd); +} + +TEST(PlasmaSerialization, WaitRequest) { + int fd = create_temp_file(); + const int num_objects_in = 2; + ObjectRequest object_requests_in[num_objects_in] = { + ObjectRequest({ObjectID::from_random(), PLASMA_QUERY_ANYWHERE, 0}), + ObjectRequest({ObjectID::from_random(), PLASMA_QUERY_LOCAL, 0})}; + const int num_ready_objects_in = 1; + int64_t timeout_ms = 1000; + + ARROW_CHECK_OK(SendWaitRequest( + fd, &object_requests_in[0], num_objects_in, num_ready_objects_in, timeout_ms)); + /* Read message back. */ + std::vector data = read_message_from_file(fd, MessageType_PlasmaWaitRequest); + int num_ready_objects_out; + int64_t timeout_ms_read; + ObjectRequestMap object_requests_out; + ARROW_CHECK_OK(ReadWaitRequest( + data.data(), object_requests_out, &timeout_ms_read, &num_ready_objects_out)); + ASSERT_EQ(num_objects_in, object_requests_out.size()); + ASSERT_EQ(num_ready_objects_out, num_ready_objects_in); + for (int i = 0; i < num_objects_in; i++) { + const ObjectID& object_id = object_requests_in[i].object_id; + ASSERT_EQ(1, object_requests_out.count(object_id)); + const auto& entry = object_requests_out.find(object_id); + ASSERT_TRUE(entry != object_requests_out.end()); + ASSERT_EQ(entry->second.object_id, object_requests_in[i].object_id); + ASSERT_EQ(entry->second.type, object_requests_in[i].type); + } + close(fd); +} + +TEST(PlasmaSerialization, WaitReply) { + int fd = create_temp_file(); + const int num_objects_in = 2; + /* Create a map with two ObjectRequests in it. */ + ObjectRequestMap objects_in(num_objects_in); + ObjectID id1 = ObjectID::from_random(); + objects_in[id1] = ObjectRequest({id1, 0, ObjectStatus_Local}); + ObjectID id2 = ObjectID::from_random(); + objects_in[id2] = ObjectRequest({id2, 0, ObjectStatus_Nonexistent}); + + ARROW_CHECK_OK(SendWaitReply(fd, objects_in, num_objects_in)); + /* Read message back. */ + std::vector data = read_message_from_file(fd, MessageType_PlasmaWaitReply); + ObjectRequest objects_out[2]; + int num_objects_out; + ARROW_CHECK_OK(ReadWaitReply(data.data(), &objects_out[0], &num_objects_out)); + ASSERT_EQ(num_objects_in, num_objects_out); + for (int i = 0; i < num_objects_out; i++) { + /* Each object request must appear exactly once. */ + ASSERT_EQ(objects_in.count(objects_out[i].object_id), 1); + const auto& entry = objects_in.find(objects_out[i].object_id); + ASSERT_TRUE(entry != objects_in.end()); + ASSERT_EQ(entry->second.object_id, objects_out[i].object_id); + ASSERT_EQ(entry->second.status, objects_out[i].status); + } + close(fd); +} + +TEST(PlasmaSerialization, DataRequest) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + const char* address1 = "address1"; + int port1 = 12345; + ARROW_CHECK_OK(SendDataRequest(fd, object_id1, address1, port1)); + /* Reading message back. */ + std::vector data = read_message_from_file(fd, MessageType_PlasmaDataRequest); + ObjectID object_id2; + char* address2; + int port2; + ARROW_CHECK_OK(ReadDataRequest(data.data(), &object_id2, &address2, &port2)); + ASSERT_EQ(object_id1, object_id2); + ASSERT_EQ(strcmp(address1, address2), 0); + ASSERT_EQ(port1, port2); + free(address2); + close(fd); +} + +TEST(PlasmaSerialization, DataReply) { + int fd = create_temp_file(); + ObjectID object_id1 = ObjectID::from_random(); + int64_t object_size1 = 146; + int64_t metadata_size1 = 198; + ARROW_CHECK_OK(SendDataReply(fd, object_id1, object_size1, metadata_size1)); + /* Reading message back. */ + std::vector data = read_message_from_file(fd, MessageType_PlasmaDataReply); + ObjectID object_id2; + int64_t object_size2; + int64_t metadata_size2; + ARROW_CHECK_OK(ReadDataReply(data.data(), &object_id2, &object_size2, &metadata_size2)); + ASSERT_EQ(object_id1, object_id2); + ASSERT_EQ(object_size1, object_size2); + ASSERT_EQ(metadata_size1, metadata_size2); +} diff --git a/cpp/src/plasma/thirdparty/ae/ae.c b/cpp/src/plasma/thirdparty/ae/ae.c new file mode 100644 index 0000000000000..e66808a81466d --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/ae.c @@ -0,0 +1,465 @@ +/* A simple event-driven programming library. Originally I wrote this code + * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated + * it in form of a library for easy reuse. + * + * Copyright (c) 2006-2010, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ae.h" +#include "zmalloc.h" +#include "config.h" + +/* Include the best multiplexing layer supported by this system. + * The following should be ordered by performances, descending. */ +#ifdef HAVE_EVPORT +#include "ae_evport.c" +#else + #ifdef HAVE_EPOLL + #include "ae_epoll.c" + #else + #ifdef HAVE_KQUEUE + #include "ae_kqueue.c" + #else + #include "ae_select.c" + #endif + #endif +#endif + +aeEventLoop *aeCreateEventLoop(int setsize) { + aeEventLoop *eventLoop; + int i; + + if ((eventLoop = zmalloc(sizeof(*eventLoop))) == NULL) goto err; + eventLoop->events = zmalloc(sizeof(aeFileEvent)*setsize); + eventLoop->fired = zmalloc(sizeof(aeFiredEvent)*setsize); + if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err; + eventLoop->setsize = setsize; + eventLoop->lastTime = time(NULL); + eventLoop->timeEventHead = NULL; + eventLoop->timeEventNextId = 0; + eventLoop->stop = 0; + eventLoop->maxfd = -1; + eventLoop->beforesleep = NULL; + if (aeApiCreate(eventLoop) == -1) goto err; + /* Events with mask == AE_NONE are not set. So let's initialize the + * vector with it. */ + for (i = 0; i < setsize; i++) + eventLoop->events[i].mask = AE_NONE; + return eventLoop; + +err: + if (eventLoop) { + zfree(eventLoop->events); + zfree(eventLoop->fired); + zfree(eventLoop); + } + return NULL; +} + +/* Return the current set size. */ +int aeGetSetSize(aeEventLoop *eventLoop) { + return eventLoop->setsize; +} + +/* Resize the maximum set size of the event loop. + * If the requested set size is smaller than the current set size, but + * there is already a file descriptor in use that is >= the requested + * set size minus one, AE_ERR is returned and the operation is not + * performed at all. + * + * Otherwise AE_OK is returned and the operation is successful. */ +int aeResizeSetSize(aeEventLoop *eventLoop, int setsize) { + int i; + + if (setsize == eventLoop->setsize) return AE_OK; + if (eventLoop->maxfd >= setsize) return AE_ERR; + if (aeApiResize(eventLoop,setsize) == -1) return AE_ERR; + + eventLoop->events = zrealloc(eventLoop->events,sizeof(aeFileEvent)*setsize); + eventLoop->fired = zrealloc(eventLoop->fired,sizeof(aeFiredEvent)*setsize); + eventLoop->setsize = setsize; + + /* Make sure that if we created new slots, they are initialized with + * an AE_NONE mask. */ + for (i = eventLoop->maxfd+1; i < setsize; i++) + eventLoop->events[i].mask = AE_NONE; + return AE_OK; +} + +void aeDeleteEventLoop(aeEventLoop *eventLoop) { + aeApiFree(eventLoop); + zfree(eventLoop->events); + zfree(eventLoop->fired); + zfree(eventLoop); +} + +void aeStop(aeEventLoop *eventLoop) { + eventLoop->stop = 1; +} + +int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, + aeFileProc *proc, void *clientData) +{ + if (fd >= eventLoop->setsize) { + errno = ERANGE; + return AE_ERR; + } + aeFileEvent *fe = &eventLoop->events[fd]; + + if (aeApiAddEvent(eventLoop, fd, mask) == -1) + return AE_ERR; + fe->mask |= mask; + if (mask & AE_READABLE) fe->rfileProc = proc; + if (mask & AE_WRITABLE) fe->wfileProc = proc; + fe->clientData = clientData; + if (fd > eventLoop->maxfd) + eventLoop->maxfd = fd; + return AE_OK; +} + +void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask) +{ + if (fd >= eventLoop->setsize) return; + aeFileEvent *fe = &eventLoop->events[fd]; + if (fe->mask == AE_NONE) return; + + aeApiDelEvent(eventLoop, fd, mask); + fe->mask = fe->mask & (~mask); + if (fd == eventLoop->maxfd && fe->mask == AE_NONE) { + /* Update the max fd */ + int j; + + for (j = eventLoop->maxfd-1; j >= 0; j--) + if (eventLoop->events[j].mask != AE_NONE) break; + eventLoop->maxfd = j; + } +} + +int aeGetFileEvents(aeEventLoop *eventLoop, int fd) { + if (fd >= eventLoop->setsize) return 0; + aeFileEvent *fe = &eventLoop->events[fd]; + + return fe->mask; +} + +static void aeGetTime(long *seconds, long *milliseconds) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + *seconds = tv.tv_sec; + *milliseconds = tv.tv_usec/1000; +} + +static void aeAddMillisecondsToNow(long long milliseconds, long *sec, long *ms) { + long cur_sec, cur_ms, when_sec, when_ms; + + aeGetTime(&cur_sec, &cur_ms); + when_sec = cur_sec + milliseconds/1000; + when_ms = cur_ms + milliseconds%1000; + if (when_ms >= 1000) { + when_sec ++; + when_ms -= 1000; + } + *sec = when_sec; + *ms = when_ms; +} + +long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, + aeTimeProc *proc, void *clientData, + aeEventFinalizerProc *finalizerProc) +{ + long long id = eventLoop->timeEventNextId++; + aeTimeEvent *te; + + te = zmalloc(sizeof(*te)); + if (te == NULL) return AE_ERR; + te->id = id; + aeAddMillisecondsToNow(milliseconds,&te->when_sec,&te->when_ms); + te->timeProc = proc; + te->finalizerProc = finalizerProc; + te->clientData = clientData; + te->next = eventLoop->timeEventHead; + eventLoop->timeEventHead = te; + return id; +} + +int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id) +{ + aeTimeEvent *te = eventLoop->timeEventHead; + while(te) { + if (te->id == id) { + te->id = AE_DELETED_EVENT_ID; + return AE_OK; + } + te = te->next; + } + return AE_ERR; /* NO event with the specified ID found */ +} + +/* Search the first timer to fire. + * This operation is useful to know how many time the select can be + * put in sleep without to delay any event. + * If there are no timers NULL is returned. + * + * Note that's O(N) since time events are unsorted. + * Possible optimizations (not needed by Redis so far, but...): + * 1) Insert the event in order, so that the nearest is just the head. + * Much better but still insertion or deletion of timers is O(N). + * 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)). + */ +static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) +{ + aeTimeEvent *te = eventLoop->timeEventHead; + aeTimeEvent *nearest = NULL; + + while(te) { + if (!nearest || te->when_sec < nearest->when_sec || + (te->when_sec == nearest->when_sec && + te->when_ms < nearest->when_ms)) + nearest = te; + te = te->next; + } + return nearest; +} + +/* Process time events */ +static int processTimeEvents(aeEventLoop *eventLoop) { + int processed = 0; + aeTimeEvent *te, *prev; + long long maxId; + time_t now = time(NULL); + + /* If the system clock is moved to the future, and then set back to the + * right value, time events may be delayed in a random way. Often this + * means that scheduled operations will not be performed soon enough. + * + * Here we try to detect system clock skews, and force all the time + * events to be processed ASAP when this happens: the idea is that + * processing events earlier is less dangerous than delaying them + * indefinitely, and practice suggests it is. */ + if (now < eventLoop->lastTime) { + te = eventLoop->timeEventHead; + while(te) { + te->when_sec = 0; + te = te->next; + } + } + eventLoop->lastTime = now; + + prev = NULL; + te = eventLoop->timeEventHead; + maxId = eventLoop->timeEventNextId-1; + while(te) { + long now_sec, now_ms; + long long id; + + /* Remove events scheduled for deletion. */ + if (te->id == AE_DELETED_EVENT_ID) { + aeTimeEvent *next = te->next; + if (prev == NULL) + eventLoop->timeEventHead = te->next; + else + prev->next = te->next; + if (te->finalizerProc) + te->finalizerProc(eventLoop, te->clientData); + zfree(te); + te = next; + continue; + } + + /* Make sure we don't process time events created by time events in + * this iteration. Note that this check is currently useless: we always + * add new timers on the head, however if we change the implementation + * detail, this check may be useful again: we keep it here for future + * defense. */ + if (te->id > maxId) { + te = te->next; + continue; + } + aeGetTime(&now_sec, &now_ms); + if (now_sec > te->when_sec || + (now_sec == te->when_sec && now_ms >= te->when_ms)) + { + int retval; + + id = te->id; + retval = te->timeProc(eventLoop, id, te->clientData); + processed++; + if (retval != AE_NOMORE) { + aeAddMillisecondsToNow(retval,&te->when_sec,&te->when_ms); + } else { + te->id = AE_DELETED_EVENT_ID; + } + } + prev = te; + te = te->next; + } + return processed; +} + +/* Process every pending time event, then every pending file event + * (that may be registered by time event callbacks just processed). + * Without special flags the function sleeps until some file event + * fires, or when the next time event occurs (if any). + * + * If flags is 0, the function does nothing and returns. + * if flags has AE_ALL_EVENTS set, all the kind of events are processed. + * if flags has AE_FILE_EVENTS set, file events are processed. + * if flags has AE_TIME_EVENTS set, time events are processed. + * if flags has AE_DONT_WAIT set the function returns ASAP until all + * the events that's possible to process without to wait are processed. + * + * The function returns the number of events processed. */ +int aeProcessEvents(aeEventLoop *eventLoop, int flags) +{ + int processed = 0, numevents; + + /* Nothing to do? return ASAP */ + if (!(flags & AE_TIME_EVENTS) && !(flags & AE_FILE_EVENTS)) return 0; + + /* Note that we want call select() even if there are no + * file events to process as long as we want to process time + * events, in order to sleep until the next time event is ready + * to fire. */ + if (eventLoop->maxfd != -1 || + ((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) { + int j; + aeTimeEvent *shortest = NULL; + struct timeval tv, *tvp; + + if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT)) + shortest = aeSearchNearestTimer(eventLoop); + if (shortest) { + long now_sec, now_ms; + + aeGetTime(&now_sec, &now_ms); + tvp = &tv; + + /* How many milliseconds we need to wait for the next + * time event to fire? */ + long long ms = + (shortest->when_sec - now_sec)*1000 + + shortest->when_ms - now_ms; + + if (ms > 0) { + tvp->tv_sec = ms/1000; + tvp->tv_usec = (ms % 1000)*1000; + } else { + tvp->tv_sec = 0; + tvp->tv_usec = 0; + } + } else { + /* If we have to check for events but need to return + * ASAP because of AE_DONT_WAIT we need to set the timeout + * to zero */ + if (flags & AE_DONT_WAIT) { + tv.tv_sec = tv.tv_usec = 0; + tvp = &tv; + } else { + /* Otherwise we can block */ + tvp = NULL; /* wait forever */ + } + } + + numevents = aeApiPoll(eventLoop, tvp); + for (j = 0; j < numevents; j++) { + aeFileEvent *fe = &eventLoop->events[eventLoop->fired[j].fd]; + int mask = eventLoop->fired[j].mask; + int fd = eventLoop->fired[j].fd; + int rfired = 0; + + /* note the fe->mask & mask & ... code: maybe an already processed + * event removed an element that fired and we still didn't + * processed, so we check if the event is still valid. */ + if (fe->mask & mask & AE_READABLE) { + rfired = 1; + fe->rfileProc(eventLoop,fd,fe->clientData,mask); + } + if (fe->mask & mask & AE_WRITABLE) { + if (!rfired || fe->wfileProc != fe->rfileProc) + fe->wfileProc(eventLoop,fd,fe->clientData,mask); + } + processed++; + } + } + /* Check time events */ + if (flags & AE_TIME_EVENTS) + processed += processTimeEvents(eventLoop); + + return processed; /* return the number of processed file/time events */ +} + +/* Wait for milliseconds until the given file descriptor becomes + * writable/readable/exception */ +int aeWait(int fd, int mask, long long milliseconds) { + struct pollfd pfd; + int retmask = 0, retval; + + memset(&pfd, 0, sizeof(pfd)); + pfd.fd = fd; + if (mask & AE_READABLE) pfd.events |= POLLIN; + if (mask & AE_WRITABLE) pfd.events |= POLLOUT; + + if ((retval = poll(&pfd, 1, milliseconds))== 1) { + if (pfd.revents & POLLIN) retmask |= AE_READABLE; + if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE; + if (pfd.revents & POLLERR) retmask |= AE_WRITABLE; + if (pfd.revents & POLLHUP) retmask |= AE_WRITABLE; + return retmask; + } else { + return retval; + } +} + +void aeMain(aeEventLoop *eventLoop) { + eventLoop->stop = 0; + while (!eventLoop->stop) { + if (eventLoop->beforesleep != NULL) + eventLoop->beforesleep(eventLoop); + aeProcessEvents(eventLoop, AE_ALL_EVENTS); + } +} + +char *aeGetApiName(void) { + return aeApiName(); +} + +void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep) { + eventLoop->beforesleep = beforesleep; +} diff --git a/cpp/src/plasma/thirdparty/ae/ae.h b/cpp/src/plasma/thirdparty/ae/ae.h new file mode 100644 index 0000000000000..827c4c9e4e59e --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/ae.h @@ -0,0 +1,123 @@ +/* A simple event-driven programming library. Originally I wrote this code + * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated + * it in form of a library for easy reuse. + * + * Copyright (c) 2006-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __AE_H__ +#define __AE_H__ + +#include + +#define AE_OK 0 +#define AE_ERR -1 + +#define AE_NONE 0 +#define AE_READABLE 1 +#define AE_WRITABLE 2 + +#define AE_FILE_EVENTS 1 +#define AE_TIME_EVENTS 2 +#define AE_ALL_EVENTS (AE_FILE_EVENTS|AE_TIME_EVENTS) +#define AE_DONT_WAIT 4 + +#define AE_NOMORE -1 +#define AE_DELETED_EVENT_ID -1 + +/* Macros */ +#define AE_NOTUSED(V) ((void) V) + +struct aeEventLoop; + +/* Types and data structures */ +typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); +typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData); +typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData); +typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop); + +/* File event structure */ +typedef struct aeFileEvent { + int mask; /* one of AE_(READABLE|WRITABLE) */ + aeFileProc *rfileProc; + aeFileProc *wfileProc; + void *clientData; +} aeFileEvent; + +/* Time event structure */ +typedef struct aeTimeEvent { + long long id; /* time event identifier. */ + long when_sec; /* seconds */ + long when_ms; /* milliseconds */ + aeTimeProc *timeProc; + aeEventFinalizerProc *finalizerProc; + void *clientData; + struct aeTimeEvent *next; +} aeTimeEvent; + +/* A fired event */ +typedef struct aeFiredEvent { + int fd; + int mask; +} aeFiredEvent; + +/* State of an event based program */ +typedef struct aeEventLoop { + int maxfd; /* highest file descriptor currently registered */ + int setsize; /* max number of file descriptors tracked */ + long long timeEventNextId; + time_t lastTime; /* Used to detect system clock skew */ + aeFileEvent *events; /* Registered events */ + aeFiredEvent *fired; /* Fired events */ + aeTimeEvent *timeEventHead; + int stop; + void *apidata; /* This is used for polling API specific data */ + aeBeforeSleepProc *beforesleep; +} aeEventLoop; + +/* Prototypes */ +aeEventLoop *aeCreateEventLoop(int setsize); +void aeDeleteEventLoop(aeEventLoop *eventLoop); +void aeStop(aeEventLoop *eventLoop); +int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, + aeFileProc *proc, void *clientData); +void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask); +int aeGetFileEvents(aeEventLoop *eventLoop, int fd); +long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, + aeTimeProc *proc, void *clientData, + aeEventFinalizerProc *finalizerProc); +int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id); +int aeProcessEvents(aeEventLoop *eventLoop, int flags); +int aeWait(int fd, int mask, long long milliseconds); +void aeMain(aeEventLoop *eventLoop); +char *aeGetApiName(void); +void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep); +int aeGetSetSize(aeEventLoop *eventLoop); +int aeResizeSetSize(aeEventLoop *eventLoop, int setsize); + +#endif diff --git a/cpp/src/plasma/thirdparty/ae/ae_epoll.c b/cpp/src/plasma/thirdparty/ae/ae_epoll.c new file mode 100644 index 0000000000000..410aac70dc5af --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/ae_epoll.c @@ -0,0 +1,135 @@ +/* Linux epoll(2) based ae.c module + * + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include + +typedef struct aeApiState { + int epfd; + struct epoll_event *events; +} aeApiState; + +static int aeApiCreate(aeEventLoop *eventLoop) { + aeApiState *state = zmalloc(sizeof(aeApiState)); + + if (!state) return -1; + state->events = zmalloc(sizeof(struct epoll_event)*eventLoop->setsize); + if (!state->events) { + zfree(state); + return -1; + } + state->epfd = epoll_create(1024); /* 1024 is just a hint for the kernel */ + if (state->epfd == -1) { + zfree(state->events); + zfree(state); + return -1; + } + eventLoop->apidata = state; + return 0; +} + +static int aeApiResize(aeEventLoop *eventLoop, int setsize) { + aeApiState *state = eventLoop->apidata; + + state->events = zrealloc(state->events, sizeof(struct epoll_event)*setsize); + return 0; +} + +static void aeApiFree(aeEventLoop *eventLoop) { + aeApiState *state = eventLoop->apidata; + + close(state->epfd); + zfree(state->events); + zfree(state); +} + +static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { + aeApiState *state = eventLoop->apidata; + struct epoll_event ee = {0}; /* avoid valgrind warning */ + /* If the fd was already monitored for some event, we need a MOD + * operation. Otherwise we need an ADD operation. */ + int op = eventLoop->events[fd].mask == AE_NONE ? + EPOLL_CTL_ADD : EPOLL_CTL_MOD; + + ee.events = 0; + mask |= eventLoop->events[fd].mask; /* Merge old events */ + if (mask & AE_READABLE) ee.events |= EPOLLIN; + if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; + ee.data.fd = fd; + if (epoll_ctl(state->epfd,op,fd,&ee) == -1) return -1; + return 0; +} + +static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int delmask) { + aeApiState *state = eventLoop->apidata; + struct epoll_event ee = {0}; /* avoid valgrind warning */ + int mask = eventLoop->events[fd].mask & (~delmask); + + ee.events = 0; + if (mask & AE_READABLE) ee.events |= EPOLLIN; + if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; + ee.data.fd = fd; + if (mask != AE_NONE) { + epoll_ctl(state->epfd,EPOLL_CTL_MOD,fd,&ee); + } else { + /* Note, Kernel < 2.6.9 requires a non null event pointer even for + * EPOLL_CTL_DEL. */ + epoll_ctl(state->epfd,EPOLL_CTL_DEL,fd,&ee); + } +} + +static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { + aeApiState *state = eventLoop->apidata; + int retval, numevents = 0; + + retval = epoll_wait(state->epfd,state->events,eventLoop->setsize, + tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1); + if (retval > 0) { + int j; + + numevents = retval; + for (j = 0; j < numevents; j++) { + int mask = 0; + struct epoll_event *e = state->events+j; + + if (e->events & EPOLLIN) mask |= AE_READABLE; + if (e->events & EPOLLOUT) mask |= AE_WRITABLE; + if (e->events & EPOLLERR) mask |= AE_WRITABLE; + if (e->events & EPOLLHUP) mask |= AE_WRITABLE; + eventLoop->fired[j].fd = e->data.fd; + eventLoop->fired[j].mask = mask; + } + } + return numevents; +} + +static char *aeApiName(void) { + return "epoll"; +} diff --git a/cpp/src/plasma/thirdparty/ae/ae_evport.c b/cpp/src/plasma/thirdparty/ae/ae_evport.c new file mode 100644 index 0000000000000..5c317becb6f7d --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/ae_evport.c @@ -0,0 +1,320 @@ +/* ae.c module for illumos event ports. + * + * Copyright (c) 2012, Joyent, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +#include +#include +#include + +#include +#include + +#include + +static int evport_debug = 0; + +/* + * This file implements the ae API using event ports, present on Solaris-based + * systems since Solaris 10. Using the event port interface, we associate file + * descriptors with the port. Each association also includes the set of poll(2) + * events that the consumer is interested in (e.g., POLLIN and POLLOUT). + * + * There's one tricky piece to this implementation: when we return events via + * aeApiPoll, the corresponding file descriptors become dissociated from the + * port. This is necessary because poll events are level-triggered, so if the + * fd didn't become dissociated, it would immediately fire another event since + * the underlying state hasn't changed yet. We must re-associate the file + * descriptor, but only after we know that our caller has actually read from it. + * The ae API does not tell us exactly when that happens, but we do know that + * it must happen by the time aeApiPoll is called again. Our solution is to + * keep track of the last fds returned by aeApiPoll and re-associate them next + * time aeApiPoll is invoked. + * + * To summarize, in this module, each fd association is EITHER (a) represented + * only via the in-kernel association OR (b) represented by pending_fds and + * pending_masks. (b) is only true for the last fds we returned from aeApiPoll, + * and only until we enter aeApiPoll again (at which point we restore the + * in-kernel association). + */ +#define MAX_EVENT_BATCHSZ 512 + +typedef struct aeApiState { + int portfd; /* event port */ + int npending; /* # of pending fds */ + int pending_fds[MAX_EVENT_BATCHSZ]; /* pending fds */ + int pending_masks[MAX_EVENT_BATCHSZ]; /* pending fds' masks */ +} aeApiState; + +static int aeApiCreate(aeEventLoop *eventLoop) { + int i; + aeApiState *state = zmalloc(sizeof(aeApiState)); + if (!state) return -1; + + state->portfd = port_create(); + if (state->portfd == -1) { + zfree(state); + return -1; + } + + state->npending = 0; + + for (i = 0; i < MAX_EVENT_BATCHSZ; i++) { + state->pending_fds[i] = -1; + state->pending_masks[i] = AE_NONE; + } + + eventLoop->apidata = state; + return 0; +} + +static int aeApiResize(aeEventLoop *eventLoop, int setsize) { + /* Nothing to resize here. */ + return 0; +} + +static void aeApiFree(aeEventLoop *eventLoop) { + aeApiState *state = eventLoop->apidata; + + close(state->portfd); + zfree(state); +} + +static int aeApiLookupPending(aeApiState *state, int fd) { + int i; + + for (i = 0; i < state->npending; i++) { + if (state->pending_fds[i] == fd) + return (i); + } + + return (-1); +} + +/* + * Helper function to invoke port_associate for the given fd and mask. + */ +static int aeApiAssociate(const char *where, int portfd, int fd, int mask) { + int events = 0; + int rv, err; + + if (mask & AE_READABLE) + events |= POLLIN; + if (mask & AE_WRITABLE) + events |= POLLOUT; + + if (evport_debug) + fprintf(stderr, "%s: port_associate(%d, 0x%x) = ", where, fd, events); + + rv = port_associate(portfd, PORT_SOURCE_FD, fd, events, + (void *)(uintptr_t)mask); + err = errno; + + if (evport_debug) + fprintf(stderr, "%d (%s)\n", rv, rv == 0 ? "no error" : strerror(err)); + + if (rv == -1) { + fprintf(stderr, "%s: port_associate: %s\n", where, strerror(err)); + + if (err == EAGAIN) + fprintf(stderr, "aeApiAssociate: event port limit exceeded."); + } + + return rv; +} + +static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { + aeApiState *state = eventLoop->apidata; + int fullmask, pfd; + + if (evport_debug) + fprintf(stderr, "aeApiAddEvent: fd %d mask 0x%x\n", fd, mask); + + /* + * Since port_associate's "events" argument replaces any existing events, we + * must be sure to include whatever events are already associated when + * we call port_associate() again. + */ + fullmask = mask | eventLoop->events[fd].mask; + pfd = aeApiLookupPending(state, fd); + + if (pfd != -1) { + /* + * This fd was recently returned from aeApiPoll. It should be safe to + * assume that the consumer has processed that poll event, but we play + * it safer by simply updating pending_mask. The fd will be + * re-associated as usual when aeApiPoll is called again. + */ + if (evport_debug) + fprintf(stderr, "aeApiAddEvent: adding to pending fd %d\n", fd); + state->pending_masks[pfd] |= fullmask; + return 0; + } + + return (aeApiAssociate("aeApiAddEvent", state->portfd, fd, fullmask)); +} + +static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { + aeApiState *state = eventLoop->apidata; + int fullmask, pfd; + + if (evport_debug) + fprintf(stderr, "del fd %d mask 0x%x\n", fd, mask); + + pfd = aeApiLookupPending(state, fd); + + if (pfd != -1) { + if (evport_debug) + fprintf(stderr, "deleting event from pending fd %d\n", fd); + + /* + * This fd was just returned from aeApiPoll, so it's not currently + * associated with the port. All we need to do is update + * pending_mask appropriately. + */ + state->pending_masks[pfd] &= ~mask; + + if (state->pending_masks[pfd] == AE_NONE) + state->pending_fds[pfd] = -1; + + return; + } + + /* + * The fd is currently associated with the port. Like with the add case + * above, we must look at the full mask for the file descriptor before + * updating that association. We don't have a good way of knowing what the + * events are without looking into the eventLoop state directly. We rely on + * the fact that our caller has already updated the mask in the eventLoop. + */ + + fullmask = eventLoop->events[fd].mask; + if (fullmask == AE_NONE) { + /* + * We're removing *all* events, so use port_dissociate to remove the + * association completely. Failure here indicates a bug. + */ + if (evport_debug) + fprintf(stderr, "aeApiDelEvent: port_dissociate(%d)\n", fd); + + if (port_dissociate(state->portfd, PORT_SOURCE_FD, fd) != 0) { + perror("aeApiDelEvent: port_dissociate"); + abort(); /* will not return */ + } + } else if (aeApiAssociate("aeApiDelEvent", state->portfd, fd, + fullmask) != 0) { + /* + * ENOMEM is a potentially transient condition, but the kernel won't + * generally return it unless things are really bad. EAGAIN indicates + * we've reached an resource limit, for which it doesn't make sense to + * retry (counter-intuitively). All other errors indicate a bug. In any + * of these cases, the best we can do is to abort. + */ + abort(); /* will not return */ + } +} + +static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { + aeApiState *state = eventLoop->apidata; + struct timespec timeout, *tsp; + int mask, i; + uint_t nevents; + port_event_t event[MAX_EVENT_BATCHSZ]; + + /* + * If we've returned fd events before, we must re-associate them with the + * port now, before calling port_get(). See the block comment at the top of + * this file for an explanation of why. + */ + for (i = 0; i < state->npending; i++) { + if (state->pending_fds[i] == -1) + /* This fd has since been deleted. */ + continue; + + if (aeApiAssociate("aeApiPoll", state->portfd, + state->pending_fds[i], state->pending_masks[i]) != 0) { + /* See aeApiDelEvent for why this case is fatal. */ + abort(); + } + + state->pending_masks[i] = AE_NONE; + state->pending_fds[i] = -1; + } + + state->npending = 0; + + if (tvp != NULL) { + timeout.tv_sec = tvp->tv_sec; + timeout.tv_nsec = tvp->tv_usec * 1000; + tsp = &timeout; + } else { + tsp = NULL; + } + + /* + * port_getn can return with errno == ETIME having returned some events (!). + * So if we get ETIME, we check nevents, too. + */ + nevents = 1; + if (port_getn(state->portfd, event, MAX_EVENT_BATCHSZ, &nevents, + tsp) == -1 && (errno != ETIME || nevents == 0)) { + if (errno == ETIME || errno == EINTR) + return 0; + + /* Any other error indicates a bug. */ + perror("aeApiPoll: port_get"); + abort(); + } + + state->npending = nevents; + + for (i = 0; i < nevents; i++) { + mask = 0; + if (event[i].portev_events & POLLIN) + mask |= AE_READABLE; + if (event[i].portev_events & POLLOUT) + mask |= AE_WRITABLE; + + eventLoop->fired[i].fd = event[i].portev_object; + eventLoop->fired[i].mask = mask; + + if (evport_debug) + fprintf(stderr, "aeApiPoll: fd %d mask 0x%x\n", + (int)event[i].portev_object, mask); + + state->pending_fds[i] = event[i].portev_object; + state->pending_masks[i] = (uintptr_t)event[i].portev_user; + } + + return nevents; +} + +static char *aeApiName(void) { + return "evport"; +} diff --git a/cpp/src/plasma/thirdparty/ae/ae_kqueue.c b/cpp/src/plasma/thirdparty/ae/ae_kqueue.c new file mode 100644 index 0000000000000..6796f4ceb5939 --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/ae_kqueue.c @@ -0,0 +1,138 @@ +/* Kqueue(2)-based ae.c module + * + * Copyright (C) 2009 Harish Mallipeddi - harish.mallipeddi@gmail.com + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +#include +#include + +typedef struct aeApiState { + int kqfd; + struct kevent *events; +} aeApiState; + +static int aeApiCreate(aeEventLoop *eventLoop) { + aeApiState *state = zmalloc(sizeof(aeApiState)); + + if (!state) return -1; + state->events = zmalloc(sizeof(struct kevent)*eventLoop->setsize); + if (!state->events) { + zfree(state); + return -1; + } + state->kqfd = kqueue(); + if (state->kqfd == -1) { + zfree(state->events); + zfree(state); + return -1; + } + eventLoop->apidata = state; + return 0; +} + +static int aeApiResize(aeEventLoop *eventLoop, int setsize) { + aeApiState *state = eventLoop->apidata; + + state->events = zrealloc(state->events, sizeof(struct kevent)*setsize); + return 0; +} + +static void aeApiFree(aeEventLoop *eventLoop) { + aeApiState *state = eventLoop->apidata; + + close(state->kqfd); + zfree(state->events); + zfree(state); +} + +static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { + aeApiState *state = eventLoop->apidata; + struct kevent ke; + + if (mask & AE_READABLE) { + EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); + if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; + } + if (mask & AE_WRITABLE) { + EV_SET(&ke, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); + if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; + } + return 0; +} + +static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { + aeApiState *state = eventLoop->apidata; + struct kevent ke; + + if (mask & AE_READABLE) { + EV_SET(&ke, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); + kevent(state->kqfd, &ke, 1, NULL, 0, NULL); + } + if (mask & AE_WRITABLE) { + EV_SET(&ke, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); + kevent(state->kqfd, &ke, 1, NULL, 0, NULL); + } +} + +static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { + aeApiState *state = eventLoop->apidata; + int retval, numevents = 0; + + if (tvp != NULL) { + struct timespec timeout; + timeout.tv_sec = tvp->tv_sec; + timeout.tv_nsec = tvp->tv_usec * 1000; + retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, + &timeout); + } else { + retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, + NULL); + } + + if (retval > 0) { + int j; + + numevents = retval; + for(j = 0; j < numevents; j++) { + int mask = 0; + struct kevent *e = state->events+j; + + if (e->filter == EVFILT_READ) mask |= AE_READABLE; + if (e->filter == EVFILT_WRITE) mask |= AE_WRITABLE; + eventLoop->fired[j].fd = e->ident; + eventLoop->fired[j].mask = mask; + } + } + return numevents; +} + +static char *aeApiName(void) { + return "kqueue"; +} diff --git a/cpp/src/plasma/thirdparty/ae/ae_select.c b/cpp/src/plasma/thirdparty/ae/ae_select.c new file mode 100644 index 0000000000000..c039a8ea3128d --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/ae_select.c @@ -0,0 +1,106 @@ +/* Select()-based ae.c module. + * + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +#include + +typedef struct aeApiState { + fd_set rfds, wfds; + /* We need to have a copy of the fd sets as it's not safe to reuse + * FD sets after select(). */ + fd_set _rfds, _wfds; +} aeApiState; + +static int aeApiCreate(aeEventLoop *eventLoop) { + aeApiState *state = zmalloc(sizeof(aeApiState)); + + if (!state) return -1; + FD_ZERO(&state->rfds); + FD_ZERO(&state->wfds); + eventLoop->apidata = state; + return 0; +} + +static int aeApiResize(aeEventLoop *eventLoop, int setsize) { + /* Just ensure we have enough room in the fd_set type. */ + if (setsize >= FD_SETSIZE) return -1; + return 0; +} + +static void aeApiFree(aeEventLoop *eventLoop) { + zfree(eventLoop->apidata); +} + +static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { + aeApiState *state = eventLoop->apidata; + + if (mask & AE_READABLE) FD_SET(fd,&state->rfds); + if (mask & AE_WRITABLE) FD_SET(fd,&state->wfds); + return 0; +} + +static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { + aeApiState *state = eventLoop->apidata; + + if (mask & AE_READABLE) FD_CLR(fd,&state->rfds); + if (mask & AE_WRITABLE) FD_CLR(fd,&state->wfds); +} + +static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { + aeApiState *state = eventLoop->apidata; + int retval, j, numevents = 0; + + memcpy(&state->_rfds,&state->rfds,sizeof(fd_set)); + memcpy(&state->_wfds,&state->wfds,sizeof(fd_set)); + + retval = select(eventLoop->maxfd+1, + &state->_rfds,&state->_wfds,NULL,tvp); + if (retval > 0) { + for (j = 0; j <= eventLoop->maxfd; j++) { + int mask = 0; + aeFileEvent *fe = &eventLoop->events[j]; + + if (fe->mask == AE_NONE) continue; + if (fe->mask & AE_READABLE && FD_ISSET(j,&state->_rfds)) + mask |= AE_READABLE; + if (fe->mask & AE_WRITABLE && FD_ISSET(j,&state->_wfds)) + mask |= AE_WRITABLE; + eventLoop->fired[numevents].fd = j; + eventLoop->fired[numevents].mask = mask; + numevents++; + } + } + return numevents; +} + +static char *aeApiName(void) { + return "select"; +} diff --git a/cpp/src/plasma/thirdparty/ae/config.h b/cpp/src/plasma/thirdparty/ae/config.h new file mode 100644 index 0000000000000..4f8e1ea1bc38c --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/config.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CONFIG_H +#define __CONFIG_H + +#ifdef __APPLE__ +#include +#endif + +/* Test for polling API */ +#ifdef __linux__ +#define HAVE_EPOLL 1 +#endif + +#if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__) +#define HAVE_KQUEUE 1 +#endif + +#ifdef __sun +#include +#ifdef _DTRACE_VERSION +#define HAVE_EVPORT 1 +#endif +#endif + + +#endif diff --git a/cpp/src/plasma/thirdparty/ae/zmalloc.h b/cpp/src/plasma/thirdparty/ae/zmalloc.h new file mode 100644 index 0000000000000..6c27dd4e5c3d3 --- /dev/null +++ b/cpp/src/plasma/thirdparty/ae/zmalloc.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ZMALLOC_H +#define _ZMALLOC_H + +#ifndef zmalloc +#define zmalloc malloc +#endif + +#ifndef zfree +#define zfree free +#endif + +#ifndef zrealloc +#define zrealloc realloc +#endif + +#endif /* _ZMALLOC_H */ diff --git a/cpp/src/plasma/thirdparty/dlmalloc.c b/cpp/src/plasma/thirdparty/dlmalloc.c new file mode 100644 index 0000000000000..84ccbd28fc4ec --- /dev/null +++ b/cpp/src/plasma/thirdparty/dlmalloc.c @@ -0,0 +1,6281 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/publicdomain/zero/1.0/ Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (minimum) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + It is also possible to limit the maximum total allocatable + space, using malloc_set_footprint_limit. This is not + designed as a security feature in itself (calls to set limits + are not screened or privileged), but may be useful as one + aspect of a secure implementation. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with a lock. By default, this uses a plain + pthread mutex, win32 critical section, or a spin-lock if if + available for the platform and not disabled by setting + USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined, + recursive versions are used instead (which are not required for + base functionality but may be needed in layered extensions). + Using a global lock is not especially fast, and can be a major + bottleneck. It is designed only to provide minimal protection + in concurrent environments, and to provide a basis for + extensions. If you are using malloc in a concurrent program, + consider instead using nedmalloc + (http://www.nedprod.com/programs/portable/nedmalloc/) or + ptmalloc (See http://www.malloc.de), which are derived from + versions of this malloc. + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. In real-time + applications, you can optionally suppress segment traversals using + NO_SEGMENT_TRAVERSAL, which assures bounded execution even when + system allocators return non-contiguous spaces, at the typical + expense of carrying around more memory and increased fragmentation. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. Normally, this requires use of locks. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. You can also +use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. Beware that there seem to be some + cases where this malloc might not be a pure drop-in replacement for + Win32 malloc: Random-looking failures from Win32 GDI API's (eg; + SetDIBits()) may be due to bugs in some video driver implementations + when pixel buffers are malloc()ed, and the region spans more than + one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) + default granularity, pixel buffers may straddle virtual allocation + regions more often than when using the Microsoft allocator. You can + avoid this by using VirtualAlloc() and VirtualFree() for all pixel + buffers rather than using malloc(). If this is not possible, + recompile this malloc with a larger DEFAULT_GRANULARITY. Note: + in cases where MSC and gcc (cygwin) are known to differ on WIN32, + conditions use _MSC_VER to distinguish them. + +DLMALLOC_EXPORT default: extern + Defines how public APIs are declared. If you want to export via a + Windows DLL, you might define this as + #define DLMALLOC_EXPORT extern __declspec(dllexport) + If you want a POSIX ELF shared object, you might use + #define DLMALLOC_EXPORT extern __attribute__((visibility("default"))) + +MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *)) + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) If set to a + non-zero value other than 1, locks are used, but their + implementation is left out, so lock functions must be supplied manually, + as described below. + +USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available + If true, uses custom spin locks for locking. This is currently + supported only gcc >= 4.1, older gccs on x86 platforms, and recent + MS compilers. Otherwise, posix locks or win32 critical sections are + used. + +USE_RECURSIVE_LOCKS default: not defined + If defined nonzero, uses recursive (aka reentrant) locks, otherwise + uses plain mutexes. This is not required for malloc proper, but may + be needed for layered allocators such as nedmalloc. + +LOCK_AT_FORK default: not defined + If defined nonzero, performs pthread_atfork upon initialization + to initialize child lock while holding parent lock. The implementation + assumes that pthread locks (not custom locks) are being used. In other + cases, you may need to customize the implementation. + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +MALLOC_INSPECT_ALL default: NOT defined + If defined, compiles malloc_inspect_all and mspace_inspect_all, that + perform traversal of all heap space. Unless access to these + functions is otherwise restricted, you probably do not want to + include them in secure implementations. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. + +MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +NO_SEGMENT_TRAVERSAL default: 0 + If non-zero, suppresses traversals of memory segments + returned by either MORECORE or CALL_MMAP. This disables + merging of segments that are contiguous, and selectively + releasing them to the OS if unused, but bounds execution times. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 except on WINCE. + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero and on WIN32 except for WINCE. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. Similarly for Win32 under recent MS compilers. + (On most x86s, the asm version is only slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +NO_MALLOC_STATS default: 0 + If defined, don't compile "malloc_stats". This avoids calls to + fprintf and bringing in stdio dependencies you might not want. + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP + The number of consolidated frees between checks to release + unused segments when freeing. When using non-contiguous segments, + especially with multiple mspaces, checking only for topmost space + doesn't always suffice to trigger trimming. To compensate for this, + free() will, with a period of MAX_RELEASE_CHECK_RATE (or the + current number of segments, if greater) try to release unused + segments to the OS when freeing chunks that result in + consolidation. The best value for this parameter is a compromise + between slowing down frees with relatively costly checks that + rarely trigger versus holding on to unused memory. To effectively + disable, set to MAX_SIZE_T. This may lead to a very slight speed + improvement at the expense of carrying around more memory. +*/ + +/* Version identifier to allow people to support multiple versions */ +#ifndef DLMALLOC_VERSION +#define DLMALLOC_VERSION 20806 +#endif /* DLMALLOC_VERSION */ + +#ifndef DLMALLOC_EXPORT +#define DLMALLOC_EXPORT extern +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#ifdef _WIN32_WCE +#define LACKS_FCNTL_H +#define WIN32 1 +#endif /* _WIN32_WCE */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define LACKS_SCHED_H +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef MMAP_CLEARS +#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ +#define MMAP_CLEARS 0 +#else +#define MMAP_CLEARS 1 +#endif /* _WIN32_WCE */ +#endif /*MMAP_CLEARS */ +#endif /* WIN32 */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +/* OSX allocators provide 16 byte alignment */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)16U) +#endif +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ +#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) +#endif /* USE_LOCKS */ + +#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ +#if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER>=1310)) +#ifndef USE_SPIN_LOCKS +#define USE_SPIN_LOCKS 1 +#endif /* USE_SPIN_LOCKS */ +#elif USE_SPIN_LOCKS +#error "USE_SPIN_LOCKS defined without implementation" +#endif /* ... locks available... */ +#elif !defined(USE_SPIN_LOCKS) +#define USE_SPIN_LOCKS 0 +#endif /* USE_LOCKS */ + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ + +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef MALLOC_INSPECT_ALL +#define MALLOC_INSPECT_ALL 0 +#endif /* MALLOC_INSPECT_ALL */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#define _GNU_SOURCE /* Turns on mremap() definition */ +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#define MORECORE_DEFAULT sbrk +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if (MORECORE_CONTIGUOUS || defined(WIN32)) +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef MAX_RELEASE_CHECK_RATE +#if HAVE_MMAP +#define MAX_RELEASE_CHECK_RATE 4095 +#else +#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* MAX_RELEASE_CHECK_RATE */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ +#ifndef NO_MALLOC_STATS +#define NO_MALLOC_STATS 0 +#endif /* NO_MALLOC_STATS */ +#ifndef NO_SEGMENT_TRAVERSAL +#define NO_SEGMENT_TRAVERSAL 0 +#endif /* NO_SEGMENT_TRAVERSAL */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ +#ifndef STRUCT_MALLINFO_DECLARED +/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO +#define STRUCT_MALLINFO_DECLARED 1 +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; +#endif /* STRUCT_MALLINFO_DECLARED */ +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +/* + Try to persuade compilers to inline. The most critical functions for + inlining are defined as macros, so these aren't used for them. +*/ + +#ifndef FORCEINLINE + #if defined(__GNUC__) +#define FORCEINLINE __inline __attribute__ ((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif +#endif +#ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__ ((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#ifndef FORCEINLINE + #define FORCEINLINE inline +#endif +#endif /* __cplusplus */ +#ifndef FORCEINLINE + #define FORCEINLINE +#endif + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlposix_memalign posix_memalign +#define dlrealloc realloc +#define dlrealloc_in_place realloc_in_place +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlmalloc_footprint_limit malloc_footprint_limit +#define dlmalloc_set_footprint_limit malloc_set_footprint_limit +#define dlmalloc_inspect_all malloc_inspect_all +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#define dlbulk_free bulk_free +#endif /* USE_DL_PREFIX */ + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +DLMALLOC_EXPORT void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +DLMALLOC_EXPORT void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ +DLMALLOC_EXPORT void* dlrealloc(void*, size_t); + +/* + realloc_in_place(void* p, size_t n) + Resizes the space allocated for p to size n, only if this can be + done without moving p (i.e., only if there is adjacent space + available if n is greater than p's current allocated size, or n is + less than or equal to p's size). This may be used instead of plain + realloc if an alternative allocation strategy is needed upon failure + to expand space; for example, reallocation of a buffer that must be + memory-aligned or cleared. You can use realloc_in_place to trigger + these alternatives only when needed. + + Returns p if successful; otherwise null. +*/ +DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); + +/* + int posix_memalign(void** pp, size_t alignment, size_t n); + Allocates a chunk of n bytes, aligned in accord with the alignment + argument. Differs from memalign only in that it (1) assigns the + allocated memory to *pp rather than returning it, (2) fails and + returns EINVAL if the alignment is not a power of two (3) fails and + returns ENOMEM if memory cannot be allocated. +*/ +DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +DLMALLOC_EXPORT void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. To workaround the fact that mallopt is specified to use int, + not size_t parameters, the value -1 is specially treated as the + maximum unsigned size_t value. + + SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +DLMALLOC_EXPORT int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +DLMALLOC_EXPORT size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void); + +/* + malloc_footprint_limit(); + Returns the number of bytes that the heap is allowed to obtain from + the system, returning the last value returned by + malloc_set_footprint_limit, or the maximum size_t value if + never set. The returned value reflects a permission. There is no + guarantee that this number of bytes can actually be obtained from + the system. +*/ +DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); + +/* + malloc_set_footprint_limit(); + Sets the maximum number of bytes to obtain from the system, causing + failure returns from malloc and related functions upon attempts to + exceed this value. The argument value may be subject to page + rounding to an enforceable limit; this actual value is returned. + Using an argument of the maximum possible size_t effectively + disables checks. If the argument is less than or equal to the + current malloc_footprint, then all future allocations that require + additional system memory will fail. However, invocation cannot + retroactively deallocate existing used memory. +*/ +DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); + +#if MALLOC_INSPECT_ALL +/* + malloc_inspect_all(void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg); + Traverses the heap and calls the given handler for each managed + region, skipping all bytes that are (or may be) used for bookkeeping + purposes. Traversal does not include include chunks that have been + directly memory mapped. Each reported region begins at the start + address, and continues up to but not including the end address. The + first used_bytes of the region contain allocated data. If + used_bytes is zero, the region is unallocated. The handler is + invoked with the given callback argument. If locks are defined, they + are held during the entire traversal. It is a bad idea to invoke + other malloc functions from within the handler. + + For example, to count the number of in-use chunks with size greater + than 1000, you could write: + static int count = 0; + void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: + malloc_inspect_all(count_chunks, NULL); + + malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. +*/ +DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), + void* arg); + +#endif /* MALLOC_INSPECT_ALL */ + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be freed when it is no longer needed. This can be + done all at once using bulk_free. + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be freed when it is no longer needed. This can be + done all at once using bulk_free. + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); + +/* + bulk_free(void* array[], size_t n_elements) + Frees and clears (sets to null) each non-null pointer in the given + array. This is likely to be faster than freeing them one-by-one. + If footers are used, pointers that have been allocated in different + mspaces are not freed or cleared, and the count of all such pointers + is returned. For large arrays of pointers with poor locality, it + may be worthwhile to sort this array before calling bulk_free. +*/ +DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +DLMALLOC_EXPORT void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +DLMALLOC_EXPORT int dlmalloc_trim(size_t); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +DLMALLOC_EXPORT void dlmalloc_stats(void); + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + +#endif /* ONLY_MSPACES */ + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_track_large_chunks controls whether requests for large chunks + are allocated in their own untracked mmapped regions, separate from + others in this mspace. By default large chunks are not tracked, + which reduces fragmentation. However, such chunks are not + necessarily released to the system upon destroy_mspace. Enabling + tracking by setting to true may increase fragmentation, but avoids + leakage when relying on destroy_mspace to release all memory + allocated using this space. The function returns the previous + setting. +*/ +DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); + + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + malloc_usable_size(void* p) behaves the same as malloc_usable_size; +*/ +DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +DLMALLOC_EXPORT int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ +#if !NO_MALLOC_STATS +#include /* for printing in malloc_stats */ +#endif /* NO_MALLOC_STATS */ +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#undef assert +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#ifndef assert +#define assert(x) +#endif +#define DEBUG 0 +#endif /* DEBUG */ +#if !defined(WIN32) && !defined(LACKS_TIME_H) +#include /* for magic initialization */ +#endif /* WIN32 */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ +#if (defined(linux) && !defined(__USE_GNU)) +#define __USE_GNU 1 +#include /* for mmap */ +#undef __USE_GNU +#else +#include /* for mmap */ +#endif /* linux */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#ifndef LACKS_UNISTD_H +#include /* for sbrk, sysconf */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ + +/* Declarations for locking */ +#if USE_LOCKS +#ifndef WIN32 +#if defined (__SVR4) && defined (__sun) /* solaris */ +#include +#elif !defined(LACKS_SCHED_H) +#include +#endif /* solaris or LACKS_SCHED_H */ +#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS +#include +#endif /* USE_RECURSIVE_LOCKS ... */ +#elif defined(_MSC_VER) +#ifndef _M_AMD64 +/* These are already defined on AMD64 builds */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); +LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _M_AMD64 */ +#pragma intrinsic (_InterlockedCompareExchange) +#pragma intrinsic (_InterlockedExchange) +#define interlockedcompareexchange _InterlockedCompareExchange +#define interlockedexchange _InterlockedExchange +#elif defined(WIN32) && defined(__GNUC__) +#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) +#define interlockedexchange __sync_lock_test_and_set +#endif /* Win32 */ +#else /* USE_LOCKS */ +#endif /* USE_LOCKS */ + +#ifndef LOCK_AT_FORK +#define LOCK_AT_FORK 0 +#endif + +/* Declarations for bit scanning on win32 */ +#if defined(_MSC_VER) && _MSC_VER>=1300 +#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +unsigned char _BitScanForward(unsigned long *index, unsigned long mask); +unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#define BitScanForward _BitScanForward +#define BitScanReverse _BitScanReverse +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) +#endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define SIZE_T_FOUR ((size_t)4) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if HAVE_MMAP + +#ifndef WIN32 +#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static FORCEINLINE void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static FORCEINLINE void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static FORCEINLINE int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = (char*)ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define MMAP_DEFAULT(s) win32mmap(s) +#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MREMAP +#ifndef WIN32 +#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#endif /* WIN32 */ +#endif /* HAVE_MREMAP */ + +/** + * Define CALL_MORECORE + */ +#if HAVE_MORECORE + #ifdef MORECORE + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ +#else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ +#if HAVE_MMAP + #define USE_MMAP_BIT (SIZE_T_ONE) + + #ifdef MMAP + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ + #ifdef MUNMAP + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ + #ifdef DIRECT_MMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ +#else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) +#endif /* HAVE_MMAP */ + +/** + * Define CALL_MREMAP + */ +#if HAVE_MMAP && HAVE_MREMAP + #ifdef MREMAP + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ +#else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +/* mstate bit set if continguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +/* + When locks are defined, there is one global lock, plus + one per-mspace lock. + + The global lock_ensures that mparams.magic and other unique + mparams values are initialized only once. It also protects + sequences of calls to MORECORE. In many cases sys_alloc requires + two calls, that should not be interleaved with calls by other + threads. This does not protect against direct calls to MORECORE + by other threads not using this lock, so there is still code to + cope the best we can on interference. + + Per-mspace locks surround calls to malloc, free, etc. + By default, locks are simple non-reentrant mutexes. + + Because lock-protected regions generally have bounded times, it is + OK to use the supplied simple spinlocks. Spinlocks are likely to + improve performance for lightly contended applications, but worsen + performance under heavy contention. + + If USE_LOCKS is > 1, the definitions of lock routines here are + bypassed, in which case you will need to define the type MLOCK_T, + and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK + and TRY_LOCK. You must also declare a + static MLOCK_T malloc_global_mutex = { initialization values };. + +*/ + +#if !USE_LOCKS +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) (0) +#define DESTROY_LOCK(l) (0) +#define ACQUIRE_MALLOC_GLOBAL_LOCK() +#define RELEASE_MALLOC_GLOBAL_LOCK() + +#else +#if USE_LOCKS > 1 +/* ----------------------- User-defined locks ------------------------ */ +/* Define your own lock implementation here */ +/* #define INITIAL_LOCK(lk) ... */ +/* #define DESTROY_LOCK(lk) ... */ +/* #define ACQUIRE_LOCK(lk) ... */ +/* #define RELEASE_LOCK(lk) ... */ +/* #define TRY_LOCK(lk) ... */ +/* static MLOCK_T malloc_global_mutex = ... */ + +#elif USE_SPIN_LOCKS + +/* First, define CAS_LOCK and CLEAR_LOCK on ints */ +/* Note CAS_LOCK defined to return 0 on success */ + +#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) +#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) +#define CLEAR_LOCK(sl) __sync_lock_release(sl) + +#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) +/* Custom spin locks for older gcc on x86 */ +static FORCEINLINE int x86_cas_lock(int *sl) { + int ret; + int val = 1; + int cmp = 0; + __asm__ __volatile__ ("lock; cmpxchgl %1, %2" + : "=a" (ret) + : "r" (val), "m" (*(sl)), "0"(cmp) + : "memory", "cc"); + return ret; +} + +static FORCEINLINE void x86_clear_lock(int* sl) { + assert(*sl != 0); + int prev = 0; + int ret; + __asm__ __volatile__ ("lock; xchgl %0, %1" + : "=r" (ret) + : "m" (*(sl)), "0"(prev) + : "memory"); +} + +#define CAS_LOCK(sl) x86_cas_lock(sl) +#define CLEAR_LOCK(sl) x86_clear_lock(sl) + +#else /* Win32 MSC */ +#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1) +#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0) + +#endif /* ... gcc spins locks ... */ + +/* How to yield for a spin lock */ +#define SPINS_PER_YIELD 63 +#if defined(_MSC_VER) +#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ +#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) +#elif defined (__SVR4) && defined (__sun) /* solaris */ +#define SPIN_LOCK_YIELD thr_yield(); +#elif !defined(LACKS_SCHED_H) +#define SPIN_LOCK_YIELD sched_yield(); +#else +#define SPIN_LOCK_YIELD +#endif /* ... yield ... */ + +#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 +/* Plain spin locks use single word (embedded in malloc_states) */ +static int spin_acquire_lock(int *sl) { + int spins = 0; + while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { + if ((++spins & SPINS_PER_YIELD) == 0) { + SPIN_LOCK_YIELD; + } + } + return 0; +} + +#define MLOCK_T int +#define TRY_LOCK(sl) !CAS_LOCK(sl) +#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) +#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) +#define INITIAL_LOCK(sl) (*sl = 0) +#define DESTROY_LOCK(sl) (0) +static MLOCK_T malloc_global_mutex = 0; + +#else /* USE_RECURSIVE_LOCKS */ +/* types for lock owners */ +#ifdef WIN32 +#define THREAD_ID_T DWORD +#define CURRENT_THREAD GetCurrentThreadId() +#define EQ_OWNER(X,Y) ((X) == (Y)) +#else +/* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need to + somehow redefine these or not use spin locks. +*/ +#define THREAD_ID_T pthread_t +#define CURRENT_THREAD pthread_self() +#define EQ_OWNER(X,Y) pthread_equal(X, Y) +#endif + +struct malloc_recursive_lock { + int sl; + unsigned int c; + THREAD_ID_T threadid; +}; + +#define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; + +static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { + assert(lk->sl != 0); + if (--lk->c == 0) { + CLEAR_LOCK(&lk->sl); + } +} + +static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; + int spins = 0; + for (;;) { + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; + lk->c = 1; + return 0; + } + } + else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; + return 0; + } + if ((++spins & SPINS_PER_YIELD) == 0) { + SPIN_LOCK_YIELD; + } + } +} + +static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; + lk->c = 1; + return 1; + } + } + else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; + return 1; + } + return 0; +} + +#define RELEASE_LOCK(lk) recursive_release_lock(lk) +#define TRY_LOCK(lk) recursive_try_lock(lk) +#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) +#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) +#define DESTROY_LOCK(lk) (0) +#endif /* USE_RECURSIVE_LOCKS */ + +#elif defined(WIN32) /* Win32 critical sections */ +#define MLOCK_T CRITICAL_SECTION +#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) +#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) +#define TRY_LOCK(lk) TryEnterCriticalSection(lk) +#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) +#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) +#define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; +static volatile LONG malloc_global_mutex_status; + +/* Use spin loop to initialize global lock */ +static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; + if (stat > 0) + return; + /* transition to < 0 while initializing, then to > 0) */ + if (stat == 0 && + interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); + interlockedexchange(&malloc_global_mutex_status, (LONG)1); + return; + } + SleepEx(0, FALSE); + } +} + +#else /* pthreads-based locks */ +#define MLOCK_T pthread_mutex_t +#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) +#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) +#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) +#define INITIAL_LOCK(lk) pthread_init_lock(lk) +#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) + +#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) +/* Cope with old-style linux recursive lock initialization by adding */ +/* skipped internal declaration from pthread.h */ +extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, + int __kind)); +#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP +#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) +#endif /* USE_RECURSIVE_LOCKS ... */ + +static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; + +static int pthread_init_lock (MLOCK_T *lk) { + pthread_mutexattr_t attr; + if (pthread_mutexattr_init(&attr)) return 1; +#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; +#endif + if (pthread_mutex_init(lk, &attr)) return 1; + if (pthread_mutexattr_destroy(&attr)) return 1; + return 0; +} + +#endif /* ... lock types ... */ + +/* Common code for all lock types */ +#define USE_LOCK_BIT (2U) + +#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK +#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); +#endif + +#ifndef RELEASE_MALLOC_GLOBAL_LOCK +#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); +#endif + +#endif /* USE_LOCKS */ + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 0) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse (unless the chunk is mmapped). This redundancy enables usage + checks within free and realloc, and reduces indirection when freeing + and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, have both cinuse and pinuse bits + cleared in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use, unless mmapped, in which case both bits are cleared. + + FLAG4_BIT is not used by this malloc, but might be useful in extensions. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define FLAG4_BIT (SIZE_T_FOUR) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) +#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define flag4inuse(p) ((p)->head & FLAG4_BIT) +#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) +#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) + +#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define set_flag4(p) ((p)->head |= FLAG4_BIT) +#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If USE_MMAP_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ +}; + +#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) +#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Max allowed footprint + The maximum allowed bytes to allocate from system (zero means no limit) + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + trimming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; + void* extp; /* Unused but available for extensions */ + size_t exts; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. Note that the non-zeroness of "magic" + also serves as an initialization flag. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* Ensure mparams initialized */ +#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) + +#if !ONLY_MSPACES + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) + +#endif /* !ONLY_MSPACES */ + +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#if USE_LOCKS +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) +#else +#define disable_lock(M) +#endif + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#if HAVE_MMAP +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) +#else +#define disable_mmap(M) +#endif + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity - SIZE_T_ONE))\ + & ~(mparams.granularity - SIZE_T_ONE)) + + +/* For mmap, use granularity alignment on windows, else page-align */ +#ifdef WIN32 +#define mmap_align(S) granularity_align(S) +#else +#define mmap_align(S) page_align(S) +#endif + +/* For sys_alloc, enough padding to ensure can malloc request on success */ +#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS +#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I. Use x86 asm if possible */ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_tree_index(S, I)\ +{\ + unsigned int X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = _bit_scan_reverse (X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + _BitScanReverse((DWORD *) &K, (DWORD) X);\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + +/* index corresponding to given bit. Use x86 asm if possible */ + +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = __builtin_ctz(X); \ + I = (bindex_t)J;\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = _bit_scan_forward (X); \ + I = (bindex_t)J;\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + _BitScanForward((DWORD *) &J, X);\ + I = (bindex_t)J;\ +} + +#elif USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* GNUC */ + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probabalistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has inuse status */ +#define ok_inuse(p) is_inuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_inuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Macros for setting head/foot of non-mmapped chunks */ + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +#if LOCK_AT_FORK +static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } +static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } +static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } +#endif /* LOCK_AT_FORK */ + +/* Initialize mparams */ +static int init_mparams(void) { +#ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) + init_malloc_global_mutex(); +#endif + + ACQUIRE_MALLOC_GLOBAL_LOCK(); + if (mparams.magic == 0) { + size_t magic; + size_t psize; + size_t gsize; + +#ifndef WIN32 + psize = malloc_getpagesize; + gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + psize = system_info.dwPageSize; + gsize = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((gsize & (gsize-SIZE_T_ONE)) != 0) || + ((psize & (psize-SIZE_T_ONE)) != 0)) + ABORT; + mparams.granularity = gsize; + mparams.page_size = psize; + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if !ONLY_MSPACES + /* Set up lock for main malloc area */ + gm->mflags = mparams.default_mflags; + (void)INITIAL_LOCK(&gm->mutex); +#endif +#if LOCK_AT_FORK + pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); +#endif + + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + magic = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ +#ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); +#elif defined(LACKS_TIME_H) + magic = (size_t)&magic ^ (size_t)0x55555555U; +#else + magic = (size_t)(time(0) ^ (size_t)0x55555555U); +#endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + /* Until memory modes commonly available, use volatile-write */ + (*(volatile size_t *)(&(mparams.magic))) = magic; + } + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + return 1; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val; + ensure_initialization(); + val = (value == -1)? MAX_SIZE_T : (size_t)value; + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!pinuse(chunk_plus_offset(p, sz))); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(is_inuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!is_inuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || is_inuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~INUSE_BITS; + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!is_inuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (is_inuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + /*assert(m->topsize == chunksize(m->top)); redundant */ + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + ensure_initialization(); + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!is_inuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +#if !NO_MALLOC_STATS +static void internal_malloc_stats(mstate m) { + ensure_initialization(); + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!is_inuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + POSTACTION(m); /* drop lock */ + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + } +} +#endif /* NO_MALLOC_STATS */ + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ + if (B == F) {\ + clear_smallmap(M, I);\ + }\ + else if (RTCHECK(B == smallbin_at(M,I) ||\ + (ok_address(M, B) && B->fd == P))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F) {\ + clear_smallmap(M, I);\ + }\ + else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + assert(is_small(DVS));\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendents + correspond properly to bit masks. We use the rightmost descendent + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (m->footprint_limit != 0) { + size_t fp = m->footprint + mmsize; + if (fp <= m->footprint || fp > m->footprint_limit) + return 0; + } + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset; + p->head = psize; + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (m->least_addr == 0 || mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { + size_t oldsize = chunksize(oldp); + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, flags); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = psize; + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallmap = m->treemap = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!is_inuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.sflags = mmapped; + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + size_t asize; /* allocation size */ + + ensure_initialization(); + + /* Directly map large chunks, but only if already initialized */ + if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + asize = granularity_align(nb + SYS_ALLOC_PADDING); + if (asize <= nb) + return 0; /* wraparound */ + if (m->footprint_limit != 0) { + size_t fp = m->footprint + asize; + if (fp <= m->footprint || fp > m->footprint_limit) + return 0; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + + In all cases, we need to request enough bytes from system to ensure + we can malloc nb bytes upon success, so pad with enough space for + top_foot, plus alignment-pad to make sure we don't lose bytes if + not on boundary, and round this up to a granularity unit. + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + ACQUIRE_MALLOC_GLOBAL_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + size_t fp; + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + ssize += (page_align((size_t)base) - (size_t)base); + fp = m->footprint + ssize; /* recheck limits */ + if (ssize > nb && ssize < HALF_MAX_SIZE_T && + (m->footprint_limit == 0 || + (fp > m->footprint && fp <= m->footprint_limit)) && + (br = (char*)(CALL_MORECORE(ssize))) == base) { + tbase = base; + tsize = ssize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); + /* Use mem here only if it did continuously extend old space */ + if (ssize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { + tbase = br; + tsize = ssize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && + ssize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + ssize += esize; + else { /* Can't use; try to release */ + (void) CALL_MORECORE(-ssize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = ssize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char* mp = (char*)(CALL_MMAP(asize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = asize; + mmap_flag = USE_MMAP_BIT; + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MALLOC_GLOBAL_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) + m->least_addr = tbase; + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.sflags = mmap_flag; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + init_bins(m); +#if !ONLY_MSPACES + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else +#endif + { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + /* Only consider most recent segment if traversal suppressed */ + while (sp != 0 && tbase != sp->base + sp->size) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & USE_MMAP_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & USE_MMAP_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + int nsegs = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + ++nsegs; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + break; + pred = sp; + sp = next; + } + /* Reset check counter */ + m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? + (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + ensure_initialization(); + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + (void)newsize; /* placate people compiling -Wunused-variable */ + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0 && m->topsize > m->trim_check) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* Consolidate and bin a chunk. Differs from exported versions + of free mainly in that the chunk need not be marked as inuse. +*/ +static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + mchunkptr prev; + size_t prevsize = p->prev_foot; + if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + m->footprint -= psize; + return; + } + prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (p != m->dv) { + unlink_chunk(m, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + m->dvsize = psize; + set_free_with_pinuse(p, psize, next); + return; + } + } + else { + CORRUPTION_ERROR_ACTION(m); + return; + } + } + if (RTCHECK(ok_address(m, next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == m->top) { + size_t tsize = m->topsize += psize; + m->top = p; + p->head = tsize | PINUSE_BIT; + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + return; + } + else if (next == m->dv) { + size_t dsize = m->dvsize += psize; + m->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + return; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(m, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == m->dv) { + m->dvsize = psize; + return; + } + } + } + else { + set_free_with_pinuse(p, psize, next); + } + insert_chunk(m, p, psize); + } + else { + CORRUPTION_ERROR_ACTION(m); + } +} + +/* ---------------------------- malloc --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + +#if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ +#endif + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +/* ---------------------------- free --------------------------- */ + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceeding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +#endif /* !ONLY_MSPACES */ + +/* ------------ Internal support for realloc, memalign, etc -------------- */ + +/* Try to realloc; only in-place unless can_move true */ +static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, + int can_move) { + mchunkptr newp = 0; + size_t oldsize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, oldsize); + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && + ok_next(p, next) && ok_pinuse(next))) { + if (is_mmapped(p)) { + newp = mmap_resize(m, p, nb, can_move); + } + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + mchunkptr r = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, r, rsize); + dispose_chunk(m, r, rsize); + } + newp = p; + } + else if (next == m->top) { /* extend into top */ + if (oldsize + m->topsize > nb) { + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = p; + } + } + else if (next == m->dv) { /* extend into dv */ + size_t dvs = m->dvsize; + if (oldsize + dvs >= nb) { + size_t dsize = oldsize + dvs - nb; + if (dsize >= MIN_CHUNK_SIZE) { + mchunkptr r = chunk_plus_offset(p, nb); + mchunkptr n = chunk_plus_offset(r, dsize); + set_inuse(m, p, nb); + set_size_and_pinuse_of_free_chunk(r, dsize); + clear_pinuse(n); + m->dvsize = dsize; + m->dv = r; + } + else { /* exhaust dv */ + size_t newsize = oldsize + dvs; + set_inuse(m, p, newsize); + m->dvsize = 0; + m->dv = 0; + } + newp = p; + } + } + else if (!cinuse(next)) { /* extend into next free chunk */ + size_t nextsize = chunksize(next); + if (oldsize + nextsize >= nb) { + size_t rsize = oldsize + nextsize - nb; + unlink_chunk(m, next, nextsize); + if (rsize < MIN_CHUNK_SIZE) { + size_t newsize = oldsize + nextsize; + set_inuse(m, p, newsize); + } + else { + mchunkptr r = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, r, rsize); + dispose_chunk(m, r, rsize); + } + newp = p; + } + } + } + else { + USAGE_ERROR_ACTION(m, chunk2mem(p)); + } + return newp; +} + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + void* mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + mem = internal_malloc(m, req); + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (PREACTION(m)) + return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = newsize; + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + dispose_chunk(m, p, leadsize); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + dispose_chunk(m, remainder, remainder_size); + } + } + + mem = chunk2mem(p); + assert (chunksize(p) >= nb); + assert(((size_t)mem & (alignment - 1)) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + } + } + return mem; +} + +/* + Common support for independent_X routines, handling + all of the combinations that can result. + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed +*/ +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + ensure_initialization(); + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + +/* Try to free all pointers in the given array. + Note: this could be made faster, by delaying consolidation, + at the price of disabling some user integrity checks, We + still optimize some consolidations by combining adjacent + chunks before freeing, which will occur often if allocated + with ialloc or the array is sorted. +*/ +static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { + size_t unfreed = 0; + if (!PREACTION(m)) { + void** a; + void** fence = &(array[nelem]); + for (a = array; a != fence; ++a) { + void* mem = *a; + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t psize = chunksize(p); +#if FOOTERS + if (get_mstate_for(p) != m) { + ++unfreed; + continue; + } +#endif + check_inuse_chunk(m, p); + *a = 0; + if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { + void ** b = a + 1; /* try to merge with next chunk */ + mchunkptr next = next_chunk(p); + if (b != fence && *b == chunk2mem(next)) { + size_t newsize = chunksize(next) + psize; + set_inuse(m, p, newsize); + *b = chunk2mem(p); + } + else + dispose_chunk(m, p, psize); + } + else { + CORRUPTION_ERROR_ACTION(m); + break; + } + } + } + if (should_trim(m, m->topsize)) + sys_trim(m, 0); + POSTACTION(m); + } + return unfreed; +} + +/* Traversal */ +#if MALLOC_INSPECT_ALL +static void internal_inspect_all(mstate m, + void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { + if (is_initialized(m)) { + mchunkptr top = m->top; + msegmentptr s; + for (s = &m->seg; s != 0; s = s->next) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { + mchunkptr next = next_chunk(q); + size_t sz = chunksize(q); + size_t used; + void* start; + if (is_inuse(q)) { + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + start = chunk2mem(q); + } + else { + used = 0; + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void*)((char*)q + sizeof(struct malloc_chunk)); + } + else { + start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + } + } + if (start < (void*)next) /* skip if all space is bookkeeping */ + handler(start, next, used, arg); + if (q == top) + break; + q = next; + } + } + } +} +#endif /* MALLOC_INSPECT_ALL */ + +/* ------------------ Exported realloc, memalign, etc -------------------- */ + +#if !ONLY_MSPACES + +void* dlrealloc(void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem == 0) { + mem = dlmalloc(bytes); + } + else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } +#ifdef REALLOC_ZERO_BYTES_FREES + else if (bytes == 0) { + dlfree(oldmem); + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); + POSTACTION(m); + if (newp != 0) { + check_inuse_chunk(m, newp); + mem = chunk2mem(newp); + } + else { + mem = internal_malloc(m, bytes); + if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); + memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + } + } + } + return mem; +} + +void* dlrealloc_in_place(void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); + POSTACTION(m); + if (newp == oldp) { + check_inuse_chunk(m, newp); + mem = oldmem; + } + } + } + } + return mem; +} + +void* dlmemalign(size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) { + return dlmalloc(bytes); + } + return internal_memalign(gm, alignment, bytes); +} + +int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { + void* mem = 0; + if (alignment == MALLOC_ALIGNMENT) + mem = dlmalloc(bytes); + else { + size_t d = alignment / sizeof(void*); + size_t r = alignment % sizeof(void*); + if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) + return EINVAL; + else if (bytes <= MAX_REQUEST - alignment) { + if (alignment < MIN_CHUNK_SIZE) + alignment = MIN_CHUNK_SIZE; + mem = internal_memalign(gm, alignment, bytes); + } + } + if (mem == 0) + return ENOMEM; + else { + *pp = mem; + return 0; + } +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +size_t dlbulk_free(void* array[], size_t nelem) { + return internal_bulk_free(gm, array, nelem); +} + +#if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { + ensure_initialization(); + if (!PREACTION(gm)) { + internal_inspect_all(gm, handler, arg); + POSTACTION(gm); + } +} +#endif /* MALLOC_INSPECT_ALL */ + +int dlmalloc_trim(size_t pad) { + int result = 0; + ensure_initialization(); + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +size_t dlmalloc_footprint_limit(void) { + size_t maf = gm->footprint_limit; + return maf == 0 ? MAX_SIZE_T : maf; +} + +size_t dlmalloc_set_footprint_limit(size_t bytes) { + ensure_initialization(); + size_t result; /* invert sense of 0 */ + if (bytes == 0) + result = granularity_align(1); /* Use minimal size */ + if (bytes == MAX_SIZE_T) + result = 0; /* disable */ + else + result = granularity_align(bytes); + return gm->footprint_limit = result; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +#if !NO_MALLOC_STATS +void dlmalloc_stats() { + internal_malloc_stats(gm); +} +#endif /* NO_MALLOC_STATS */ + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (is_inuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +#endif /* !ONLY_MSPACES */ + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + (void)INITIAL_LOCK(&m->mutex); + msp->head = (msize|INUSE_BITS); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + m->mflags = mparams.default_mflags; + m->extp = 0; + m->exts = 0; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + m->seg.sflags = USE_MMAP_BIT; + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + m->seg.sflags = EXTERN_BIT; + set_lock(m, locked); + } + return (mspace)m; +} + +int mspace_track_large_chunks(mspace msp, int enable) { + int ret = 0; + mstate ms = (mstate)msp; + if (!PREACTION(ms)) { + if (!use_mmap(ms)) { + ret = 1; + } + if (!enable) { + enable_mmap(ms); + } else { + disable_mmap(ms); + } + POSTACTION(ms); + } + return ret; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = sp->sflags; + (void)base; /* placate people compiling -Wunused-variable */ + sp = sp->next; + if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + (void)msp; /* placate people compiling -Wunused */ +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem == 0) { + mem = mspace_malloc(msp, bytes); + } + else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } +#ifdef REALLOC_ZERO_BYTES_FREES + else if (bytes == 0) { + mspace_free(msp, oldmem); + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = (mstate)msp; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); + POSTACTION(m); + if (newp != 0) { + check_inuse_chunk(m, newp); + mem = chunk2mem(newp); + } + else { + mem = mspace_malloc(m, bytes); + if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); + memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + mspace_free(m, oldmem); + } + } + } + } + return mem; +} + +void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = (mstate)msp; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + (void)msp; /* placate people compiling -Wunused */ + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); + POSTACTION(m); + if (newp == oldp) { + check_inuse_chunk(m, newp); + mem = oldmem; + } + } + } + } + return mem; +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (alignment <= MALLOC_ALIGNMENT) + return mspace_malloc(msp, bytes); + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { + return internal_bulk_free((mstate)msp, array, nelem); +} + +#if MALLOC_INSPECT_ALL +void mspace_inspect_all(mspace msp, + void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + internal_inspect_all(ms, handler, arg); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} +#endif /* MALLOC_INSPECT_ALL */ + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +#if !NO_MALLOC_STATS +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} +#endif /* NO_MALLOC_STATS */ + +size_t mspace_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +size_t mspace_max_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +size_t mspace_footprint_limit(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + size_t maf = ms->footprint_limit; + result = (maf == 0) ? MAX_SIZE_T : maf; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (bytes == 0) + result = granularity_align(1); /* Use minimal size */ + if (bytes == MAX_SIZE_T) + result = 0; /* disable */ + else + result = granularity_align(bytes); + ms->footprint_limit = result; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +size_t mspace_usable_size(const void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (is_inuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea + * fix bad comparison in dlposix_memalign + * don't reuse adjusted asize in sys_alloc + * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion + * reduce compiler warnings -- thanks to all who reported/suggested these + + v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee) + * Always perform unlink checks unless INSECURE + * Add posix_memalign. + * Improve realloc to expand in more cases; expose realloc_in_place. + Thanks to Peter Buhr for the suggestion. + * Add footprint_limit, inspect_all, bulk_free. Thanks + to Barry Hayes and others for the suggestions. + * Internal refactorings to avoid calls while holding locks + * Use non-reentrant locks by default. Thanks to Roland McGrath + for the suggestion. + * Small fixes to mspace_destroy, reset_on_error. + * Various configuration extensions/changes. Thanks + to all who contributed these. + + V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu) + * Update Creative Commons URL + + V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) + * Use zeros instead of prev foot for is_mmapped + * Add mspace_track_large_chunks; thanks to Jean Brouwers + * Fix set_inuse in internal_realloc; thanks to Jean Brouwers + * Fix insufficient sys_alloc padding when using 16byte alignment + * Fix bad error check in mspace_footprint + * Adaptations for ptmalloc; thanks to Wolfram Gloger. + * Reentrant spin locks; thanks to Earl Chew and others + * Win32 improvements; thanks to Niall Douglas and Earl Chew + * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options + * Extension hook in malloc_state + * Various small adjustments to reduce warnings on some compilers + * Various configuration extensions/changes for more platforms. Thanks + to all who contributed these. + + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshhold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occuring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/cpp/src/plasma/thirdparty/xxhash.cc b/cpp/src/plasma/thirdparty/xxhash.cc new file mode 100644 index 0000000000000..f74880b0de71d --- /dev/null +++ b/cpp/src/plasma/thirdparty/xxhash.cc @@ -0,0 +1,889 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif defined(__INTEL_COMPILER) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. + * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. + * By default, this option is disabled. To enable it, uncomment below define : + */ +/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independence be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; set to 0 when the input data + * is guaranteed to be aligned. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/*! Modify the local functions below should you wish to use some other memory routines +* for malloc(), free() */ +#include +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/*! and for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#define XXH_STATIC_LINKING_ONLY +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# define FORCE_INLINE static __forceinline +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; +# endif +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; } __attribute__((packed)) unalign; +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN + static const int g_one = 1; +# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bits hash functions +*********************************************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p<=limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32) len; + + while (p+4<=bEnd) { + h32 += XXH_get32bits(p) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, input, len); + return XXH32_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + + +/*====== Hash streaming ======*/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem32; + const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p+4<=bEnd) { + h32 += XXH_readLE32(p, endian) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bits hash functions +*********************************************************************/ + +/*====== Memory access ======*/ + +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t U64; +# else + typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; +static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/*====== xxh64 ======*/ + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U64 h64; +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, input, len); + return XXH64_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + +/*====== Hash Streaming ======*/ + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + +FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem64; + const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (U64) state->total_len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#endif /* XXH_NO_LONG_LONG */ diff --git a/cpp/src/plasma/thirdparty/xxhash.h b/cpp/src/plasma/thirdparty/xxhash.h new file mode 100644 index 0000000000000..9d831e03b35f6 --- /dev/null +++ b/cpp/src/plasma/thirdparty/xxhash.h @@ -0,0 +1,293 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bits version, named XXH64, is available since r35. +It offers much better speed, but for 64-bits applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** +* API modifier +******************************/ +/** XXH_PRIVATE_API +* This is useful to include xxhash functions in `static` mode +* in order to inline them, and remove their symbol from the public list. +* Methodology : +* #define XXH_PRIVATE_API +* #include "xxhash.h" +* `xxhash.c` is automatically included. +* It's not useful to compile and link it as a separate module. +*/ +#ifdef XXH_PRIVATE_API +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else +# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_PRIVATE_API */ + +/*!XXH_NAMESPACE, aka Namespace Emulation : + +If you want to include _and expose_ xxHash functions from within your own library, +but also want to avoid symbol collisions with other libraries which may also include xxHash, + +you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library +with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + +Note that no change is required within the calling program as long as it includes `xxhash.h` : +regular symbol name will be automatically translated by this header. +*/ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/*-********************************************************************** +* 32-bits hash +************************************************************************/ +typedef unsigned int XXH32_hash_t; + +/*! XXH32() : + Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); + +/*====== Streaming ======*/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +/* +These functions generate the xxHash of an input provided in multiple segments. +Note that, for small input, they are slower than single-call functions, due to state management. +For small input, prefer `XXH32()` and `XXH64()` . + +XXH state must first be allocated, using XXH*_createState() . + +Start a new hash by initializing state with a seed, using XXH*_reset(). + +Then, feed the hash state by calling XXH*_update() as many times as necessary. +Obviously, input must be allocated and read accessible. +The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + +Finally, a hash value can be produced anytime, by using XXH*_digest(). +This function returns the nn-bits hash as an int or long long. + +It's still possible to continue inserting input into the hash state after a digest, +and generate some new hashes later on, by calling again XXH*_digest(). + +When done, free XXH state space if it was allocated dynamically. +*/ + +/*====== Canonical representation ======*/ + +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. +* The canonical representation uses human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. +*/ + + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bits hash +************************************************************************/ +typedef unsigned long long XXH64_hash_t; + +/*! XXH64() : + Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark). +*/ +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*====== Streaming ======*/ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/*====== Canonical representation ======*/ +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); +#endif /* XXH_NO_LONG_LONG */ + + +#ifdef XXH_STATIC_LINKING_ONLY + +/* ================================================================================================ + This section contains definitions which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + They shall only be used with static linking. + Never use these definitions in association with dynamic linking ! +=================================================================================================== */ + +/* These definitions are only meant to make possible + static allocation of XXH state, on stack or in a struct for example. + Never use members directly. */ + +struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; /* buffer defined as U32 for alignment */ + unsigned memsize; + unsigned reserved; /* never read nor write, will be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +#ifndef XXH_NO_LONG_LONG /* remove 64-bits support */ +struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ + unsigned memsize; + unsigned reserved[2]; /* never read nor write, will be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ +#endif + +#ifdef XXH_PRIVATE_API +# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ +#endif + +#endif /* XXH_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* XXHASH_H_5627135585666179 */ From fabf7fb6ae80f7d4dc324bf562e35ad6ee55fe31 Mon Sep 17 00:00:00 2001 From: Max Risuhin Date: Sun, 23 Jul 2017 22:32:47 -0400 Subject: [PATCH 05/16] =?UTF-8?q?ARROW-1241:=20[C++]=20Appveyor=20build=20?= =?UTF-8?q?matrix=20extended=20with=20Visual=20Studio=202=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …017 environment build job Author: Max Risuhin Closes #870 from MaxRis/ARROW-1241 and squashes the following commits: a3ba6b0 [Max Risuhin] ARROW-1241: [C++] Appveyor build matrix extended with Visual Studio 2017 environment build job --- appveyor.yml | 7 +++++++ cpp/cmake_modules/SnappyConfig.h | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index c58e1dab7d8fd..91e9ee2649073 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -45,6 +45,13 @@ environment: PYTHON: "3.5" ARCH: "64" CONFIGURATION: "Release" + - JOB: "Build" + GENERATOR: Visual Studio 15 2017 Win64 + PYTHON: "3.5" + ARCH: "64" + CONFIGURATION: "Release" + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + BOOST_ROOT: C:\Libraries\boost_1_64_0 MSVC_DEFAULT_OPTIONS: ON BOOST_ROOT: C:\Libraries\boost_1_63_0 diff --git a/cpp/cmake_modules/SnappyConfig.h b/cpp/cmake_modules/SnappyConfig.h index 74eb77621626b..c998d1813aa7e 100644 --- a/cpp/cmake_modules/SnappyConfig.h +++ b/cpp/cmake_modules/SnappyConfig.h @@ -29,7 +29,7 @@ #ifndef SNAPPY_CONFIG_H #define SNAPPY_CONFIG_H 1 -#if defined(_MSC_VER) && (_MSC_VER <= 1900) +#if defined(_MSC_VER) && (_MSC_VER <= 1910) typedef __int64 ssize_t; #endif From e1b098e9303b240a07a241bb43d88f787f3875cb Mon Sep 17 00:00:00 2001 From: Matt Darwin <(none)> Date: Mon, 24 Jul 2017 10:55:33 -0400 Subject: [PATCH 06/16] ARROW-1240: [JAVA] security: upgrade slf4j to 1.7.25 and logback to 1.2.3 This PR supercedes #871 (repo fork for that one was deleted) Author: Matt Darwin <(none)> Closes #879 from mattdarwin/ARROW-1240-upgrade-logback and squashes the following commits: caed163 [Matt Darwin] upgrading slf4j to 1.7.25 --- java/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/java/pom.xml b/java/pom.xml index 2613a44104576..0d4e6128b4738 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -30,7 +30,7 @@ ${project.basedir}/target/generated-sources 4.11 - 1.7.6 + 1.7.25 18.0 2 2.7.1 @@ -520,7 +520,7 @@ ch.qos.logback logback-classic - 1.0.13 + 1.2.3 test From 457bb071df95e71eef45159619cc6b531fdec08f Mon Sep 17 00:00:00 2001 From: siddharth Date: Mon, 24 Jul 2017 08:13:24 -0700 Subject: [PATCH 07/16] ARROW-1237: [JAVA] expose the ability to set lastSet The changes here expose the ability to set "lastSet" on NullableVector. I believe this is needed only for NullableVarCharVector and NullableVarBinaryVector. Hence the API is exposed through NullableValueVectors.java Author: siddharth Closes #868 from siddharthteotia/ARROW-1237 and squashes the following commits: 786dfea [siddharth] ARROW-1237: addressed review comments and added more tests 73b2fc5 [siddharth] ARROW-1237: added some unit tests f8c7277 [siddharth] ARROW-1237: expose the ability to set lastSet --- .gitignore | 3 +- .../templates/NullableValueVectors.java | 16 ++ .../arrow/vector/complex/ListVector.java | 6 + .../apache/arrow/vector/TestListVector.java | 154 ++++++++++++++++++ .../apache/arrow/vector/TestValueVector.java | 132 ++++++++++++++- 5 files changed, 309 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index dd69b6cec9c5f..e6dfe19bb9807 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,5 @@ MANIFEST cpp/.idea/ python/.eggs/ -.vscode \ No newline at end of file +.vscode +.idea/ diff --git a/java/vector/src/main/codegen/templates/NullableValueVectors.java b/java/vector/src/main/codegen/templates/NullableValueVectors.java index 092097bb2bd6d..1decd0b313802 100644 --- a/java/vector/src/main/codegen/templates/NullableValueVectors.java +++ b/java/vector/src/main/codegen/templates/NullableValueVectors.java @@ -699,6 +699,22 @@ public void reset(){ setCount = 0; <#if type.major = "VarLen">lastSet = -1; } + + public void setLastSet(int value) { + <#if type.major = "VarLen"> + lastSet = value; + <#else> + throw new UnsupportedOperationException(); + + } + + public int getLastSet() { + <#if type.major != "VarLen"> + throw new UnsupportedOperationException(); + <#else> + return lastSet; + + } } } diff --git a/java/vector/src/main/java/org/apache/arrow/vector/complex/ListVector.java b/java/vector/src/main/java/org/apache/arrow/vector/complex/ListVector.java index 4ab624f3694cb..6357294566017 100644 --- a/java/vector/src/main/java/org/apache/arrow/vector/complex/ListVector.java +++ b/java/vector/src/main/java/org/apache/arrow/vector/complex/ListVector.java @@ -393,6 +393,12 @@ public void setValueCount(int valueCount) { vector.getMutator().setValueCount(childValueCount); bits.getMutator().setValueCount(valueCount); } + + public void setLastSet(int value) { + lastSet = value; + } + + public int getLastSet() { return lastSet; } } } diff --git a/java/vector/src/test/java/org/apache/arrow/vector/TestListVector.java b/java/vector/src/test/java/org/apache/arrow/vector/TestListVector.java index 11be3298f7533..29ea7628f452b 100644 --- a/java/vector/src/test/java/org/apache/arrow/vector/TestListVector.java +++ b/java/vector/src/test/java/org/apache/arrow/vector/TestListVector.java @@ -17,15 +17,26 @@ */ package org.apache.arrow.vector; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.vector.complex.ListVector; import org.apache.arrow.vector.complex.impl.UnionListWriter; +import org.apache.arrow.vector.complex.impl.UnionListReader; import org.apache.arrow.vector.complex.reader.FieldReader; +import org.apache.arrow.vector.complex.writer.FieldWriter; +import org.apache.arrow.vector.holders.NullableBigIntHolder; +import org.apache.arrow.vector.types.Types; +import org.apache.arrow.vector.types.Types.*; +import org.apache.arrow.vector.types.pojo.FieldType; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import java.util.List; + public class TestListVector { private BufferAllocator allocator; @@ -80,4 +91,147 @@ public void testCopyFrom() throws Exception { Assert.assertTrue("shouldn't be null", reader.isSet()); } } + + @Test + public void testSetLastSetUsage() throws Exception { + try (ListVector listVector = ListVector.empty("input", allocator)) { + + /* Explicitly add the dataVector */ + MinorType type = MinorType.BIGINT; + listVector.addOrGetVector(FieldType.nullable(type.getType())); + + /* allocate memory */ + listVector.allocateNew(); + + /* get inner vectors; bitVector and offsetVector */ + List innerVectors = listVector.getFieldInnerVectors(); + BitVector bitVector = (BitVector)innerVectors.get(0); + UInt4Vector offsetVector = (UInt4Vector)innerVectors.get(1); + + /* get the underlying data vector -- NullableBigIntVector */ + NullableBigIntVector dataVector = (NullableBigIntVector)listVector.getDataVector(); + + /* check current lastSet */ + assertEquals(Integer.toString(0), Integer.toString(listVector.getMutator().getLastSet())); + + int index = 0; + int offset = 0; + + /* write [10, 11, 12] to the list vector at index */ + bitVector.getMutator().setSafe(index, 1); + dataVector.getMutator().setSafe(0, 1, 10); + dataVector.getMutator().setSafe(1, 1, 11); + dataVector.getMutator().setSafe(2, 1, 12); + offsetVector.getMutator().setSafe(index + 1, 3); + + index += 1; + + /* write [13, 14] to the list vector at index 1 */ + bitVector.getMutator().setSafe(index, 1); + dataVector.getMutator().setSafe(3, 1, 13); + dataVector.getMutator().setSafe(4, 1, 14); + offsetVector.getMutator().setSafe(index + 1, 5); + + index += 1; + + /* write [15, 16, 17] to the list vector at index 2 */ + bitVector.getMutator().setSafe(index, 1); + dataVector.getMutator().setSafe(5, 1, 15); + dataVector.getMutator().setSafe(6, 1, 16); + dataVector.getMutator().setSafe(7, 1, 17); + offsetVector.getMutator().setSafe(index + 1, 8); + + /* check current lastSet */ + assertEquals(Integer.toString(0), Integer.toString(listVector.getMutator().getLastSet())); + + /* set lastset and arbitrary valuecount for list vector. + * + * NOTE: if we don't execute setLastSet() before setLastValueCount(), then + * the latter will corrupt the offsetVector and thus the accessor will not + * retrieve the correct values from underlying dataVector. Run the test + * by commenting out next line and we should see failures from 5th assert + * onwards. This is why doing setLastSet() is important before setValueCount() + * once the vector has been loaded. + * + * Another important thing to remember is the value of lastSet itself. + * Even though the listVector has elements till index 2 only, the lastSet should + * be set as 3. This is because the offsetVector has valid offsets filled till index 3. + * If we do setLastSet(2), the offsetVector at index 3 will contain incorrect value + * after execution of setValueCount(). + * + * correct state of the listVector + * bitvector {1, 1, 1, 0, 0.... } + * offsetvector {0, 3, 5, 8, 8, 8.....} + * datavector { [10, 11, 12], + * [13, 14], + * [15, 16, 17] + * } + * + * if we don't do setLastSet() before setValueCount --> incorrect state + * bitvector {1, 1, 1, 0, 0.... } + * offsetvector {0, 0, 0, 0, 0, 0.....} + * datavector { [10, 11, 12], + * [13, 14], + * [15, 16, 17] + * } + * + * if we do setLastSet(2) before setValueCount --> incorrect state + * bitvector {1, 1, 1, 0, 0.... } + * offsetvector {0, 3, 5, 5, 5, 5.....} + * datavector { [10, 11, 12], + * [13, 14], + * [15, 16, 17] + * } + */ + listVector.getMutator().setLastSet(3); + listVector.getMutator().setValueCount(10); + + /* check the vector output */ + final UInt4Vector.Accessor offsetAccessor = offsetVector.getAccessor(); + final ValueVector.Accessor valueAccessor = dataVector.getAccessor(); + + index = 0; + offset = offsetAccessor.get(index); + assertEquals(Integer.toString(0), Integer.toString(offset)); + + Object actual = valueAccessor.getObject(offset); + assertEquals(new Long(10), (Long)actual); + offset++; + actual = valueAccessor.getObject(offset); + assertEquals(new Long(11), (Long)actual); + offset++; + actual = valueAccessor.getObject(offset); + assertEquals(new Long(12), (Long)actual); + + index++; + offset = offsetAccessor.get(index); + assertEquals(Integer.toString(3), Integer.toString(offset)); + + actual = valueAccessor.getObject(offset); + assertEquals(new Long(13), (Long)actual); + offset++; + actual = valueAccessor.getObject(offset); + assertEquals(new Long(14), (Long)actual); + + index++; + offset = offsetAccessor.get(index); + assertEquals(Integer.toString(5), Integer.toString(offset)); + + actual = valueAccessor.getObject(offsetAccessor.get(index)); + assertEquals(new Long(15), (Long)actual); + offset++; + actual = valueAccessor.getObject(offset); + assertEquals(new Long(16), (Long)actual); + offset++; + actual = valueAccessor.getObject(offset); + assertEquals(new Long(17), (Long)actual); + + index++; + offset = offsetAccessor.get(index); + assertEquals(Integer.toString(8), Integer.toString(offset)); + + actual = valueAccessor.getObject(offsetAccessor.get(index)); + assertNull(actual); + } + } } diff --git a/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java b/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java index 63543b0932908..f5508aab2ce1d 100644 --- a/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java +++ b/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java @@ -26,11 +26,15 @@ import java.nio.charset.Charset; import java.util.List; +import java.util.ArrayList; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; + +import org.apache.arrow.vector.schema.ArrowRecordBatch; import org.apache.arrow.vector.schema.TypeLayout; import org.apache.arrow.vector.types.Types.MinorType; +import org.apache.arrow.vector.types.pojo.Schema; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.junit.After; @@ -56,6 +60,9 @@ public void init() { private final static byte[] STR1 = "AAAAA1".getBytes(utf8Charset); private final static byte[] STR2 = "BBBBBBBBB2".getBytes(utf8Charset); private final static byte[] STR3 = "CCCC3".getBytes(utf8Charset); + private final static byte[] STR4 = "DDDDDDDD4".getBytes(utf8Charset); + private final static byte[] STR5 = "EEE5".getBytes(utf8Charset); + private final static byte[] STR6 = "FFFFF6".getBytes(utf8Charset); @After public void terminate() throws Exception { @@ -509,11 +516,134 @@ public void testCopyFromWithNulls() { } else { assertEquals(Integer.toString(i), vector2.getAccessor().getObject(i).toString()); } - } + } + } + + @Test + public void testSetLastSetUsage() { + try (final NullableVarCharVector vector = new NullableVarCharVector("myvector", allocator)) { + final NullableVarCharVector.Mutator mutator = vector.getMutator(); + + vector.allocateNew(1024 * 10, 1024); + + setBytes(0, STR1, vector); + setBytes(1, STR2, vector); + setBytes(2, STR3, vector); + setBytes(3, STR4, vector); + setBytes(4, STR5, vector); + setBytes(5, STR6, vector); + + /* Check current lastSet */ + assertEquals(Integer.toString(-1), Integer.toString(mutator.getLastSet())); + + /* Check the vector output */ + final NullableVarCharVector.Accessor accessor = vector.getAccessor(); + assertArrayEquals(STR1, accessor.get(0)); + assertArrayEquals(STR2, accessor.get(1)); + assertArrayEquals(STR3, accessor.get(2)); + assertArrayEquals(STR4, accessor.get(3)); + assertArrayEquals(STR5, accessor.get(4)); + assertArrayEquals(STR6, accessor.get(5)); + + /* + * If we don't do setLastSe(5) before setValueCount(), then the latter will corrupt + * the value vector by filling in all positions [0,valuecount-1] will empty byte arrays. + * Run the test by commenting out next line and we should see incorrect vector output. + */ + mutator.setLastSet(5); + mutator.setValueCount(20); + + /* Check the vector output again */ + assertArrayEquals(STR1, accessor.get(0)); + assertArrayEquals(STR2, accessor.get(1)); + assertArrayEquals(STR3, accessor.get(2)); + assertArrayEquals(STR4, accessor.get(3)); + assertArrayEquals(STR5, accessor.get(4)); + assertArrayEquals(STR6, accessor.get(5)); + } + } + @Test + public void testVectorLoadUnload() { + + try (final NullableVarCharVector vector1 = new NullableVarCharVector("myvector", allocator)) { + + final NullableVarCharVector.Mutator mutator1 = vector1.getMutator(); + + vector1.allocateNew(1024 * 10, 1024); + + mutator1.set(0, STR1); + mutator1.set(1, STR2); + mutator1.set(2, STR3); + mutator1.set(3, STR4); + mutator1.set(4, STR5); + mutator1.set(5, STR6); + assertEquals(Integer.toString(5), Integer.toString(mutator1.getLastSet())); + mutator1.setValueCount(15); + assertEquals(Integer.toString(14), Integer.toString(mutator1.getLastSet())); + + /* Check the vector output */ + final NullableVarCharVector.Accessor accessor1 = vector1.getAccessor(); + assertArrayEquals(STR1, accessor1.get(0)); + assertArrayEquals(STR2, accessor1.get(1)); + assertArrayEquals(STR3, accessor1.get(2)); + assertArrayEquals(STR4, accessor1.get(3)); + assertArrayEquals(STR5, accessor1.get(4)); + assertArrayEquals(STR6, accessor1.get(5)); + + Field field = vector1.getField(); + String fieldName = field.getName(); + + List fields = new ArrayList(); + List fieldVectors = new ArrayList(); + + fields.add(field); + fieldVectors.add(vector1); + + Schema schema = new Schema(fields); + + VectorSchemaRoot schemaRoot1 = new VectorSchemaRoot(schema, fieldVectors, accessor1.getValueCount()); + VectorUnloader vectorUnloader = new VectorUnloader(schemaRoot1); + + try ( + ArrowRecordBatch recordBatch = vectorUnloader.getRecordBatch(); + BufferAllocator finalVectorsAllocator = allocator.newChildAllocator("new vector", 0, Long.MAX_VALUE); + VectorSchemaRoot schemaRoot2 = VectorSchemaRoot.create(schema, finalVectorsAllocator); + ) { + + VectorLoader vectorLoader = new VectorLoader(schemaRoot2); + vectorLoader.load(recordBatch); + + NullableVarCharVector vector2 = (NullableVarCharVector)schemaRoot2.getVector(fieldName); + NullableVarCharVector.Mutator mutator2 = vector2.getMutator(); + + /* + * lastSet would have internally been set by VectorLoader.load() when it invokes + * loadFieldBuffers. + */ + assertEquals(Integer.toString(14), Integer.toString(mutator2.getLastSet())); + mutator2.setValueCount(25); + assertEquals(Integer.toString(24), Integer.toString(mutator2.getLastSet())); + + /* Check the vector output */ + final NullableVarCharVector.Accessor accessor2 = vector2.getAccessor(); + assertArrayEquals(STR1, accessor2.get(0)); + assertArrayEquals(STR2, accessor2.get(1)); + assertArrayEquals(STR3, accessor2.get(2)); + assertArrayEquals(STR4, accessor2.get(3)); + assertArrayEquals(STR5, accessor2.get(4)); + assertArrayEquals(STR6, accessor2.get(5)); + } } } + public static void setBytes(int index, byte[] bytes, NullableVarCharVector vector) { + final int currentOffset = vector.values.offsetVector.getAccessor().get(index); + + vector.bits.getMutator().setToOne(index); + vector.values.offsetVector.getMutator().set(index + 1, currentOffset + bytes.length); + vector.values.data.setBytes(currentOffset, bytes, 0, bytes.length); + } } From 05f7058ce500278d52118c0f0e8320e93a18cf53 Mon Sep 17 00:00:00 2001 From: Antony Mayi Date: Mon, 24 Jul 2017 08:22:38 -0700 Subject: [PATCH 08/16] ARROW-1239: [JAVA] upgrading git-commit-id-plugin Currently used version of the git-commit-id-plugin maven plugin (2.1.9) doesn't work with recent git structures. This is for majority of the users not manifested since Arrow has the java maven root in the project subdirectory (`/java`) instead of top level so this plugin normally doesn't kick in if maven is executed from the subdirectory (usual case - ie `cd java; mvn install` - works fine) as the plugin doesn't see the `.git` directory but it does kick in and fail if executed from the main arrow top level dir as `mvn -f java/pom.xml install` (where the `.git` sits): ``` $ mvn -f java/pom.xml package ... [ERROR] Failed to execute goal pl.project13.maven:git-commit-id-plugin:2.1.9:revision (for-jars) on project arrow-java-root: Execution for-jars of goal pl.project13.maven:git-commit-id-plugin:2.1.9:revision failed: Bare Repository has neither a working tree, nor an index -> [Help 1] ``` Simple fix is upgrading the plugin to recent version (the minimal working version appears to be 2.1.13). This is required for seamless integration with Jenkins (ARROW-1234). Author: Antony Mayi Closes #869 from antonymayi/master and squashes the following commits: 6976ee0 [Antony Mayi] ARROW-1239 - upgrading git-commit-id-plugin --- java/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/java/pom.xml b/java/pom.xml index 0d4e6128b4738..81f80b00b563c 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -231,7 +231,7 @@ pl.project13.maven git-commit-id-plugin - 2.1.9 + 2.2.2 for-jars From a94f4716be8c33e86222d5a0be5a4d2a9102b93d Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Mon, 24 Jul 2017 12:12:42 -0400 Subject: [PATCH 09/16] ARROW-1149: [Plasma] Create Cython client library for Plasma This PR introduces a Cython API to Plasma, a FindPlasma.cmake to make it easier to integrate Plasma with CMake projects and sets up packaging with pyarrow. Author: Philipp Moritz Author: Robert Nishihara Closes #797 from pcmoritz/plasma-cython and squashes the following commits: d8319fc [Philipp Moritz] get for of PlasmaClient.connect d14ab87 [Philipp Moritz] get rid of MutableBuffer 08f24a5 [Philipp Moritz] fix typos and move FixedSizeBufferOutputStream e33443d [Philipp Moritz] fix setup.py develop for plasma 5f7b779 [Philipp Moritz] changes needed to make Ray work with Plasma in Arrow b9e2dee [Philipp Moritz] fix windows build 3e4a84d [Philipp Moritz] fix segfault 0bea267 [Philipp Moritz] debug 23fe5f5 [Philipp Moritz] make plasma store binary part of the pyarrow package for tests b863d13 [Philipp Moritz] fix 997de1e [Philipp Moritz] fix 47dc739 [Philipp Moritz] fixes 47033e7 [Philipp Moritz] switch to pytest ed84c53 [Philipp Moritz] partial fixes 9bc5c15 [Philipp Moritz] implement wait and fetch for the client 45f338f [Philipp Moritz] test plasma on macOS 8b53618 [Philipp Moritz] fix 54f595e [Philipp Moritz] try fixing python 2 tests 2c6d652 [Philipp Moritz] convert docs to numpy format 3270628 [Philipp Moritz] try to get documentation up 44d1a55 [Philipp Moritz] cleanups and release GIL a9f6502 [Philipp Moritz] more fixes 1ff88e7 [Philipp Moritz] fix travix ci 348f9bf [Philipp Moritz] fixes 4ae1a27 [Philipp Moritz] fix fd80203 [Philipp Moritz] Plasma Python extension packaging: It compiles! 3b69973 [Robert Nishihara] Fixed minor python linting. c9f6bcf [Robert Nishihara] Fix indentation and line lengths in plasma.pyx. 67b0951 [Robert Nishihara] Fix long lines in plasma/test/test.py. e26527c [Robert Nishihara] Convert plasma test.py from 2 space indentation to 4 space indentation. acc71d2 [Philipp Moritz] add round trip test for dataframes 2b7f949 [Philipp Moritz] implement mutable arrow python buffers c06f1b5 [Philipp Moritz] fix test 1d7928f [Philipp Moritz] add arrow roundtrip test 6371e2e [Philipp Moritz] fix tests 3021d59 [Philipp Moritz] make ObjectID pickleable dd5a7d8 [Philipp Moritz] fix tests 777e9c7 [Philipp Moritz] introduce plasma namespace a4a9628 [Philipp Moritz] fix c++ tests 924888b [Philipp Moritz] update f970df3 [Philipp Moritz] reduce logging 2ff2480 [Philipp Moritz] workaround for python visibility d4934a9 [Philipp Moritz] update cba92c1 [Philipp Moritz] setup.py for plasma 066d0ea [Philipp Moritz] test 1aea320 [Philipp Moritz] run plasma tests 3c4de52 [Philipp Moritz] use cmake to build the cython extension bf39297 [Philipp Moritz] build and install pyarrow for plasma tests 5bf722a [Philipp Moritz] fix plasma path 1c5434c [Philipp Moritz] fix formatting 187cc24 [Philipp Moritz] add travis tests c3d462d [Philipp Moritz] remove Python C extension d9261b4 [Philipp Moritz] add documentation and license db2d09a [Philipp Moritz] get all python tests in place 78d08ac [Philipp Moritz] make eviction work in Cython 18e0ac4 [Philipp Moritz] get tests bc681ca [Philipp Moritz] port some python tests f8e05f2 [Philipp Moritz] implement plasma.get in the cython client d590c8a [Philipp Moritz] update 5178ee7 [Philipp Moritz] update 9044a01 [Philipp Moritz] initial plasma cython client commit --- .travis.yml | 21 + ci/travis_script_manylinux.sh | 2 +- ci/travis_script_plasma.sh | 97 +++ ci/travis_script_python.sh | 4 +- cpp/src/arrow/util/logging.h | 6 +- cpp/src/plasma/CMakeLists.txt | 51 +- cpp/src/plasma/client.cc | 86 ++- cpp/src/plasma/client.h | 62 +- cpp/src/plasma/common.cc | 9 +- cpp/src/plasma/common.h | 39 +- cpp/src/plasma/events.cc | 4 + cpp/src/plasma/events.h | 4 + cpp/src/plasma/eviction_policy.cc | 4 + cpp/src/plasma/eviction_policy.h | 4 + cpp/src/plasma/extension.cc | 456 -------------- cpp/src/plasma/extension.h | 50 -- cpp/src/plasma/plasma.cc | 4 + cpp/src/plasma/plasma.h | 53 +- cpp/src/plasma/plasma.pc.in | 30 + cpp/src/plasma/protocol.cc | 6 +- cpp/src/plasma/protocol.h | 6 +- cpp/src/plasma/store.cc | 12 +- cpp/src/plasma/store.h | 4 + cpp/src/plasma/test/client_tests.cc | 10 +- cpp/src/plasma/test/serialization_tests.cc | 4 + python/CMakeLists.txt | 18 + python/cmake_modules/FindPlasma.cmake | 99 +++ python/doc/source/api.rst | 15 + python/manylinux1/build_arrow.sh | 5 +- python/pyarrow/__init__.py | 2 +- python/pyarrow/error.pxi | 18 + python/pyarrow/includes/common.pxd | 3 + python/pyarrow/includes/libarrow.pxd | 9 + python/pyarrow/io.pxi | 23 +- python/pyarrow/plasma.pyx | 560 +++++++++++++++++ python/pyarrow/tests/conftest.py | 8 +- python/pyarrow/tests/test_plasma.py | 683 +++++++++++++++++++++ python/setup.py | 15 + 38 files changed, 1855 insertions(+), 631 deletions(-) create mode 100755 ci/travis_script_plasma.sh delete mode 100644 cpp/src/plasma/extension.cc delete mode 100644 cpp/src/plasma/extension.h create mode 100644 cpp/src/plasma/plasma.pc.in create mode 100644 python/cmake_modules/FindPlasma.cmake create mode 100644 python/pyarrow/plasma.pyx create mode 100644 python/pyarrow/tests/test_plasma.py diff --git a/.travis.yml b/.travis.yml index cdf787c831b0f..9cc2b86c05cde 100644 --- a/.travis.yml +++ b/.travis.yml @@ -120,6 +120,27 @@ matrix: - $TRAVIS_BUILD_DIR/ci/travis_before_script_c_glib.sh script: - $TRAVIS_BUILD_DIR/ci/travis_script_c_glib.sh + - compiler: gcc + language: cpp + os: linux + group: deprecated + before_script: + - export CC="gcc-4.9" + - export CXX="g++-4.9" + - $TRAVIS_BUILD_DIR/ci/travis_before_script_cpp.sh + script: + - $TRAVIS_BUILD_DIR/ci/travis_script_cpp.sh + - $TRAVIS_BUILD_DIR/ci/travis_script_plasma.sh + - compiler: clang + osx_image: xcode6.4 + os: osx + cache: + addons: + before_script: + - $TRAVIS_BUILD_DIR/ci/travis_before_script_cpp.sh + script: + - $TRAVIS_BUILD_DIR/ci/travis_script_cpp.sh + - $TRAVIS_BUILD_DIR/ci/travis_script_plasma.sh before_install: - ulimit -c unlimited -S diff --git a/ci/travis_script_manylinux.sh b/ci/travis_script_manylinux.sh index 4e6be62bd3e9d..844d5f719f15a 100755 --- a/ci/travis_script_manylinux.sh +++ b/ci/travis_script_manylinux.sh @@ -18,4 +18,4 @@ set -ex pushd python/manylinux1 git clone ../../ arrow docker build -t arrow-base-x86_64 -f Dockerfile-x86_64 . -docker run --rm -e PYARROW_PARALLEL=3 -v $PWD:/io arrow-base-x86_64 /io/build_arrow.sh +docker run --shm-size=2g --rm -e PYARROW_PARALLEL=3 -v $PWD:/io arrow-base-x86_64 /io/build_arrow.sh diff --git a/ci/travis_script_plasma.sh b/ci/travis_script_plasma.sh new file mode 100755 index 0000000000000..fa384ade89c2f --- /dev/null +++ b/ci/travis_script_plasma.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. See accompanying LICENSE file. + +set -e + +source $TRAVIS_BUILD_DIR/ci/travis_env_common.sh + +export ARROW_HOME=$ARROW_CPP_INSTALL +export PYARROW_WITH_PLASMA=1 + +pushd $ARROW_PYTHON_DIR + +function build_arrow_libraries() { + CPP_BUILD_DIR=$1 + CPP_DIR=$TRAVIS_BUILD_DIR/cpp + + mkdir $CPP_BUILD_DIR + pushd $CPP_BUILD_DIR + + cmake -DARROW_BUILD_TESTS=off \ + -DARROW_PYTHON=on \ + -DARROW_PLASMA=on \ + -DCMAKE_INSTALL_PREFIX=$2 \ + $CPP_DIR + + make -j4 + make install + + popd +} + +python_version_tests() { + PYTHON_VERSION=$1 + CONDA_ENV_DIR=$TRAVIS_BUILD_DIR/pyarrow-test-$PYTHON_VERSION + + export ARROW_HOME=$TRAVIS_BUILD_DIR/arrow-install-$PYTHON_VERSION + export LD_LIBRARY_PATH=$ARROW_HOME/lib:$PARQUET_HOME/lib + + conda create -y -q -p $CONDA_ENV_DIR python=$PYTHON_VERSION cmake curl + source activate $CONDA_ENV_DIR + + python --version + which python + + # faster builds, please + conda install -y -q nomkl + + # Expensive dependencies install from Continuum package repo + conda install -y -q pip numpy pandas cython + + # Build C++ libraries + build_arrow_libraries arrow-build-$PYTHON_VERSION $ARROW_HOME + + # Other stuff pip install + pip install -r requirements.txt + + python setup.py build_ext --inplace + + python -m pytest -vv -r sxX pyarrow + + # Build documentation once + if [[ "$PYTHON_VERSION" == "3.6" ]] + then + conda install -y -q --file=doc/requirements.txt + python setup.py build_sphinx -s doc/source + fi + + # Build and install pyarrow + pushd $TRAVIS_BUILD_DIR/python + python setup.py install + popd + + # Run Plasma tests + pushd $TRAVIS_BUILD_DIR/python + python -m pytest pyarrow/tests/test_plasma.py + if [ $TRAVIS_OS_NAME == "linux" ]; then + PLASMA_VALGRIND=1 python -m pytest pyarrow/tests/test_plasma.py + fi + popd +} + +# run tests for python 2.7 and 3.6 +python_version_tests 2.7 +python_version_tests 3.6 + +popd diff --git a/ci/travis_script_python.sh b/ci/travis_script_python.sh index ac64c548d8225..fdb5ad6a62c93 100755 --- a/ci/travis_script_python.sh +++ b/ci/travis_script_python.sh @@ -17,6 +17,7 @@ set -e source $TRAVIS_BUILD_DIR/ci/travis_env_common.sh export ARROW_HOME=$ARROW_CPP_INSTALL +export PYARROW_WITH_PLASMA=1 pushd $ARROW_PYTHON_DIR export PARQUET_HOME=$TRAVIS_BUILD_DIR/parquet-env @@ -71,9 +72,8 @@ function build_arrow_libraries() { pushd $CPP_BUILD_DIR cmake -DARROW_BUILD_TESTS=off \ - -DARROW_PYTHON=on \ - -DPLASMA_PYTHON=on \ -DARROW_PLASMA=on \ + -DARROW_PYTHON=on \ -DCMAKE_INSTALL_PREFIX=$2 \ $CPP_DIR diff --git a/cpp/src/arrow/util/logging.h b/cpp/src/arrow/util/logging.h index b6181219dbae6..0edaa9dfc37a3 100644 --- a/cpp/src/arrow/util/logging.h +++ b/cpp/src/arrow/util/logging.h @@ -113,8 +113,10 @@ class CerrLog { template CerrLog& operator<<(const T& t) { - has_logged_ = true; - std::cerr << t; + if (severity_ != ARROW_DEBUG) { + has_logged_ = true; + std::cerr << t; + } return *this; } diff --git a/cpp/src/plasma/CMakeLists.txt b/cpp/src/plasma/CMakeLists.txt index 4ff3beba779c2..8bb7e71fdf11b 100644 --- a/cpp/src/plasma/CMakeLists.txt +++ b/cpp/src/plasma/CMakeLists.txt @@ -19,16 +19,13 @@ cmake_minimum_required(VERSION 2.8) project(plasma) +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/../python/cmake_modules") + find_package(PythonLibsNew REQUIRED) find_package(Threads) -option(PLASMA_PYTHON - "Build the Plasma Python extensions" - OFF) - -if(APPLE) - SET(CMAKE_SHARED_LIBRARY_SUFFIX ".so") -endif(APPLE) +set(PLASMA_SO_VERSION "0") +set(PLASMA_ABI_VERSION "${PLASMA_SO_VERSION}.0.0") include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS}) include_directories("${FLATBUFFERS_INCLUDE_DIR}" "${CMAKE_CURRENT_LIST_DIR}/" "${CMAKE_CURRENT_LIST_DIR}/thirdparty/" "${CMAKE_CURRENT_LIST_DIR}/../") @@ -40,7 +37,7 @@ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-conversion") # Compile flatbuffers set(PLASMA_FBS_SRC "${CMAKE_CURRENT_LIST_DIR}/format/plasma.fbs" "${CMAKE_CURRENT_LIST_DIR}/format/common.fbs") -set(OUTPUT_DIR ${CMAKE_CURRENT_LIST_DIR}/format/) +set(OUTPUT_DIR ${CMAKE_CURRENT_LIST_DIR}/) set(PLASMA_FBS_OUTPUT_FILES "${OUTPUT_DIR}/common_generated.h" @@ -69,8 +66,6 @@ endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") -set_source_files_properties(extension.cc PROPERTIES COMPILE_FLAGS -Wno-strict-aliasing) - set(PLASMA_SRCS client.cc common.cc @@ -97,17 +92,33 @@ set_source_files_properties(malloc.cc PROPERTIES COMPILE_FLAGS "-Wno-error -O3") add_executable(plasma_store store.cc) target_link_libraries(plasma_store plasma_static) +# Headers: top level +install(FILES + common.h + common_generated.h + client.h + events.h + plasma.h + plasma_generated.h + protocol.h + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/plasma") + +# Plasma store +install(TARGETS plasma_store DESTINATION ${CMAKE_INSTALL_BINDIR}) + +# pkg-config support +configure_file(plasma.pc.in + "${CMAKE_CURRENT_BINARY_DIR}/plasma.pc" + @ONLY) +install( + FILES "${CMAKE_CURRENT_BINARY_DIR}/plasma.pc" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig/") + +####################################### +# Unit tests +####################################### + ADD_ARROW_TEST(test/serialization_tests) ARROW_TEST_LINK_LIBRARIES(test/serialization_tests plasma_static) ADD_ARROW_TEST(test/client_tests) ARROW_TEST_LINK_LIBRARIES(test/client_tests plasma_static) - -if(PLASMA_PYTHON) - add_library(plasma_extension SHARED extension.cc) - - if(APPLE) - target_link_libraries(plasma_extension plasma_static "-undefined dynamic_lookup") - else(APPLE) - target_link_libraries(plasma_extension plasma_static -Wl,--whole-archive ${FLATBUFFERS_STATIC_LIB} -Wl,--no-whole-archive) - endif(APPLE) -endif() diff --git a/cpp/src/plasma/client.cc b/cpp/src/plasma/client.cc index dcb78e7ec52c6..62bfbec21c466 100644 --- a/cpp/src/plasma/client.cc +++ b/cpp/src/plasma/client.cc @@ -51,11 +51,31 @@ #define XXH64_DEFAULT_SEED 0 +namespace plasma { + // Number of threads used for memcopy and hash computations. constexpr int64_t kThreadPoolSize = 8; constexpr int64_t kBytesInMB = 1 << 20; static std::vector threadpool_(kThreadPoolSize); +struct ObjectInUseEntry { + /// A count of the number of times this client has called PlasmaClient::Create + /// or + /// PlasmaClient::Get on this object ID minus the number of calls to + /// PlasmaClient::Release. + /// When this count reaches zero, we remove the entry from the ObjectsInUse + /// and decrement a count in the relevant ClientMmapTableEntry. + int count; + /// Cached information to read the object. + PlasmaObject object; + /// A flag representing whether the object has been sealed. + bool is_sealed; +}; + +PlasmaClient::PlasmaClient() {} + +PlasmaClient::~PlasmaClient() {} + // If the file descriptor fd has been mmapped in this client process before, // return the pointer that was returned by mmap, otherwise mmap it and store the // pointer in a hash table. @@ -300,6 +320,10 @@ Status PlasmaClient::PerformRelease(const ObjectID& object_id) { } Status PlasmaClient::Release(const ObjectID& object_id) { + // If the client is already disconnected, ignore release requests. + if (store_conn_ < 0) { + return Status::OK(); + } // Add the new object to the release history. release_history_.push_front(object_id); // If there are too many bytes in use by the client or if there are too many @@ -386,22 +410,6 @@ static uint64_t compute_object_hash(const ObjectBuffer& obj_buffer) { return XXH64_digest(&hash_state); } -bool plasma_compute_object_hash( - PlasmaClient* conn, ObjectID object_id, unsigned char* digest) { - // Get the plasma object data. We pass in a timeout of 0 to indicate that - // the operation should timeout immediately. - ObjectBuffer object_buffer; - ARROW_CHECK_OK(conn->Get(&object_id, 1, 0, &object_buffer)); - // If the object was not retrieved, return false. - if (object_buffer.data_size == -1) { return false; } - // Compute the hash. - uint64_t hash = compute_object_hash(object_buffer); - memcpy(digest, &hash, sizeof(hash)); - // Release the plasma object. - ARROW_CHECK_OK(conn->Release(object_id)); - return true; -} - Status PlasmaClient::Seal(const ObjectID& object_id) { // Make sure this client has a reference to the object before sending the // request to Plasma. @@ -413,7 +421,7 @@ Status PlasmaClient::Seal(const ObjectID& object_id) { object_entry->second->is_sealed = true; /// Send the seal request to Plasma. static unsigned char digest[kDigestSize]; - ARROW_CHECK(plasma_compute_object_hash(this, object_id, &digest[0])); + RETURN_NOT_OK(Hash(object_id, &digest[0])); RETURN_NOT_OK(SendSealRequest(store_conn_, object_id, &digest[0])); // We call PlasmaClient::Release to decrement the number of instances of this // object @@ -439,6 +447,22 @@ Status PlasmaClient::Evict(int64_t num_bytes, int64_t& num_bytes_evicted) { return ReadEvictReply(buffer.data(), num_bytes_evicted); } +Status PlasmaClient::Hash(const ObjectID& object_id, uint8_t* digest) { + // Get the plasma object data. We pass in a timeout of 0 to indicate that + // the operation should timeout immediately. + ObjectBuffer object_buffer; + RETURN_NOT_OK(Get(&object_id, 1, 0, &object_buffer)); + // If the object was not retrieved, return false. + if (object_buffer.data_size == -1) { + return Status::PlasmaObjectNonexistent("Object not found"); + } + // Compute the hash. + uint64_t hash = compute_object_hash(object_buffer); + memcpy(digest, &hash, sizeof(hash)); + // Release the plasma object. + return Release(object_id); +} + Status PlasmaClient::Subscribe(int* fd) { int sock[2]; // Create a non-blocking socket pair. This will only be used to send @@ -459,6 +483,26 @@ Status PlasmaClient::Subscribe(int* fd) { return Status::OK(); } +Status PlasmaClient::GetNotification( + int fd, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size) { + uint8_t* notification = read_message_async(fd); + if (notification == NULL) { + return Status::IOError("Failed to read object notification from Plasma socket"); + } + auto object_info = flatbuffers::GetRoot(notification); + ARROW_CHECK(object_info->object_id()->size() == sizeof(ObjectID)); + memcpy(object_id, object_info->object_id()->data(), sizeof(ObjectID)); + if (object_info->is_deletion()) { + *data_size = -1; + *metadata_size = -1; + } else { + *data_size = object_info->data_size(); + *metadata_size = object_info->metadata_size(); + } + delete[] notification; + return Status::OK(); +} + Status PlasmaClient::Connect(const std::string& store_socket_name, const std::string& manager_socket_name, int release_delay) { store_conn_ = connect_ipc_sock_retry(store_socket_name, -1, -1); @@ -485,7 +529,11 @@ Status PlasmaClient::Disconnect() { // Close the connections to Plasma. The Plasma store will release the objects // that were in use by us when handling the SIGPIPE. close(store_conn_); - if (manager_conn_ >= 0) { close(manager_conn_); } + store_conn_ = -1; + if (manager_conn_ >= 0) { + close(manager_conn_); + manager_conn_ = -1; + } return Status::OK(); } @@ -555,3 +603,5 @@ Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_req } return Status::OK(); } + +} // namespace plasma diff --git a/cpp/src/plasma/client.h b/cpp/src/plasma/client.h index fb3a161795d47..d9ed9f7c26698 100644 --- a/cpp/src/plasma/client.h +++ b/cpp/src/plasma/client.h @@ -22,12 +22,18 @@ #include #include +#include #include +#include -#include "plasma/plasma.h" +#include "arrow/status.h" +#include "arrow/util/visibility.h" +#include "plasma/common.h" using arrow::Status; +namespace plasma { + #define PLASMA_DEFAULT_RELEASE_DELAY 64 // Use 100MB as an overestimate of the L3 cache size. @@ -63,22 +69,16 @@ struct ClientMmapTableEntry { int count; }; -struct ObjectInUseEntry { - /// A count of the number of times this client has called PlasmaClient::Create - /// or - /// PlasmaClient::Get on this object ID minus the number of calls to - /// PlasmaClient::Release. - /// When this count reaches zero, we remove the entry from the ObjectsInUse - /// and decrement a count in the relevant ClientMmapTableEntry. - int count; - /// Cached information to read the object. - PlasmaObject object; - /// A flag representing whether the object has been sealed. - bool is_sealed; -}; +struct ObjectInUseEntry; +struct ObjectRequest; +struct PlasmaObject; -class PlasmaClient { +class ARROW_EXPORT PlasmaClient { public: + PlasmaClient(); + + ~PlasmaClient(); + /// Connect to the local plasma store and plasma manager. Return /// the resulting connection. /// @@ -177,10 +177,18 @@ class PlasmaClient { /// @return The return status. Status Evict(int64_t num_bytes, int64_t& num_bytes_evicted); + /// Compute the hash of an object in the object store. + /// + /// @param conn The object containing the connection state. + /// @param object_id The ID of the object we want to hash. + /// @param digest A pointer at which to return the hash digest of the object. + /// The pointer must have at least kDigestSize bytes allocated. + /// @return The return status. + Status Hash(const ObjectID& object_id, uint8_t* digest); + /// Subscribe to notifications when objects are sealed in the object store. /// Whenever an object is sealed, a message will be written to the client - /// socket - /// that is returned by this method. + /// socket that is returned by this method. /// /// @param fd Out parameter for the file descriptor the client should use to /// read notifications @@ -188,6 +196,16 @@ class PlasmaClient { /// @return The return status. Status Subscribe(int* fd); + /// Receive next object notification for this client if Subscribe has been called. + /// + /// @param fd The file descriptor we are reading the notification from. + /// @param object_id Out parameter, the object_id of the object that was sealed. + /// @param data_size Out parameter, the data size of the object that was sealed. + /// @param metadata_size Out parameter, the metadata size of the object that was sealed. + /// @return The return status. + Status GetNotification( + int fd, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size); + /// Disconnect from the local plasma instance, including the local store and /// manager. /// @@ -330,14 +348,6 @@ class PlasmaClient { int64_t store_capacity_; }; -/// Compute the hash of an object in the object store. -/// -/// @param conn The object containing the connection state. -/// @param object_id The ID of the object we want to hash. -/// @param digest A pointer at which to return the hash digest of the object. -/// The pointer must have at least DIGEST_SIZE bytes allocated. -/// @return A boolean representing whether the hash operation succeeded. -bool plasma_compute_object_hash( - PlasmaClient* conn, ObjectID object_id, unsigned char* digest); +} // namespace plasma #endif // PLASMA_CLIENT_H diff --git a/cpp/src/plasma/common.cc b/cpp/src/plasma/common.cc index a09a963fa4769..a5f530e202ff4 100644 --- a/cpp/src/plasma/common.cc +++ b/cpp/src/plasma/common.cc @@ -19,7 +19,9 @@ #include -#include "format/plasma_generated.h" +#include "plasma/plasma_generated.h" + +namespace plasma { using arrow::Status; @@ -81,3 +83,8 @@ Status plasma_error_status(int plasma_error) { } return Status::OK(); } + +ARROW_EXPORT int ObjectStatusLocal = ObjectStatus_Local; +ARROW_EXPORT int ObjectStatusRemote = ObjectStatus_Remote; + +} // namespace plasma diff --git a/cpp/src/plasma/common.h b/cpp/src/plasma/common.h index 85dc74bf86e0d..6f2d4dd841b88 100644 --- a/cpp/src/plasma/common.h +++ b/cpp/src/plasma/common.h @@ -29,9 +29,11 @@ #include "arrow/status.h" #include "arrow/util/logging.h" +namespace plasma { + constexpr int64_t kUniqueIDSize = 20; -class UniqueID { +class ARROW_EXPORT UniqueID { public: static UniqueID from_random(); static UniqueID from_binary(const std::string& binary); @@ -60,4 +62,39 @@ typedef UniqueID ObjectID; arrow::Status plasma_error_status(int plasma_error); +/// Size of object hash digests. +constexpr int64_t kDigestSize = sizeof(uint64_t); + +/// Object request data structure. Used for Wait. +struct ObjectRequest { + /// The ID of the requested object. If ID_NIL request any object. + ObjectID object_id; + /// Request associated to the object. It can take one of the following values: + /// - PLASMA_QUERY_LOCAL: return if or when the object is available in the + /// local Plasma Store. + /// - PLASMA_QUERY_ANYWHERE: return if or when the object is available in + /// the system (i.e., either in the local or a remote Plasma Store). + int type; + /// Object status. Same as the status returned by plasma_status() function + /// call. This is filled in by plasma_wait_for_objects1(): + /// - ObjectStatus_Local: object is ready at the local Plasma Store. + /// - ObjectStatus_Remote: object is ready at a remote Plasma Store. + /// - ObjectStatus_Nonexistent: object does not exist in the system. + /// - PLASMA_CLIENT_IN_TRANSFER, if the object is currently being scheduled + /// for being transferred or it is transferring. + int status; +}; + +enum ObjectRequestType { + /// Query for object in the local plasma store. + PLASMA_QUERY_LOCAL = 1, + /// Query for object in the local plasma store or in a remote plasma store. + PLASMA_QUERY_ANYWHERE +}; + +extern int ObjectStatusLocal; +extern int ObjectStatusRemote; + +} // namespace plasma + #endif // PLASMA_COMMON_H diff --git a/cpp/src/plasma/events.cc b/cpp/src/plasma/events.cc index a9f7356e1f67e..675424d5c2f1c 100644 --- a/cpp/src/plasma/events.cc +++ b/cpp/src/plasma/events.cc @@ -19,6 +19,8 @@ #include +namespace plasma { + void EventLoop::file_event_callback( aeEventLoop* loop, int fd, void* context, int events) { FileCallback* callback = reinterpret_cast(context); @@ -79,3 +81,5 @@ int EventLoop::remove_timer(int64_t timer_id) { timer_callbacks_.erase(timer_id); return err; } + +} // namespace plasma diff --git a/cpp/src/plasma/events.h b/cpp/src/plasma/events.h index bd93d6bb2a6fd..b989b7fac2476 100644 --- a/cpp/src/plasma/events.h +++ b/cpp/src/plasma/events.h @@ -26,6 +26,8 @@ extern "C" { #include "ae/ae.h" } +namespace plasma { + /// Constant specifying that the timer is done and it will be removed. constexpr int kEventLoopTimerDone = AE_NOMORE; @@ -96,4 +98,6 @@ class EventLoop { std::unordered_map> timer_callbacks_; }; +} // namespace plasma + #endif // PLASMA_EVENTS diff --git a/cpp/src/plasma/eviction_policy.cc b/cpp/src/plasma/eviction_policy.cc index 4ae6384d42543..ef18e33372998 100644 --- a/cpp/src/plasma/eviction_policy.cc +++ b/cpp/src/plasma/eviction_policy.cc @@ -19,6 +19,8 @@ #include +namespace plasma { + void LRUCache::add(const ObjectID& key, int64_t size) { auto it = item_map_.find(key); ARROW_CHECK(it == item_map_.end()); @@ -105,3 +107,5 @@ void EvictionPolicy::end_object_access( /* Add the object to the LRU cache.*/ cache_.add(object_id, entry->info.data_size + entry->info.metadata_size); } + +} // namespace plasma diff --git a/cpp/src/plasma/eviction_policy.h b/cpp/src/plasma/eviction_policy.h index 3815fc6652f0c..c4f218328312d 100644 --- a/cpp/src/plasma/eviction_policy.h +++ b/cpp/src/plasma/eviction_policy.h @@ -26,6 +26,8 @@ #include "plasma/common.h" #include "plasma/plasma.h" +namespace plasma { + // ==== The eviction policy ==== // // This file contains declaration for all functions and data structures that @@ -131,4 +133,6 @@ class EvictionPolicy { LRUCache cache_; }; +} // namespace plasma + #endif // PLASMA_EVICTION_POLICY_H diff --git a/cpp/src/plasma/extension.cc b/cpp/src/plasma/extension.cc deleted file mode 100644 index 5d61e337c108d..0000000000000 --- a/cpp/src/plasma/extension.cc +++ /dev/null @@ -1,456 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "plasma/extension.h" - -#include -#include - -#include "plasma/client.h" -#include "plasma/common.h" -#include "plasma/io.h" -#include "plasma/protocol.h" - -PyObject* PlasmaOutOfMemoryError; -PyObject* PlasmaObjectExistsError; - -PyObject* PyPlasma_connect(PyObject* self, PyObject* args) { - const char* store_socket_name; - const char* manager_socket_name; - int release_delay; - if (!PyArg_ParseTuple( - args, "ssi", &store_socket_name, &manager_socket_name, &release_delay)) { - return NULL; - } - PlasmaClient* client = new PlasmaClient(); - ARROW_CHECK_OK(client->Connect(store_socket_name, manager_socket_name, release_delay)); - - return PyCapsule_New(client, "plasma", NULL); -} - -PyObject* PyPlasma_disconnect(PyObject* self, PyObject* args) { - PyObject* client_capsule; - if (!PyArg_ParseTuple(args, "O", &client_capsule)) { return NULL; } - PlasmaClient* client; - ARROW_CHECK(PyObjectToPlasmaClient(client_capsule, &client)); - ARROW_CHECK_OK(client->Disconnect()); - /* We use the context of the connection capsule to indicate if the connection - * is still active (if the context is NULL) or if it is closed (if the context - * is (void*) 0x1). This is neccessary because the primary pointer of the - * capsule cannot be NULL. */ - PyCapsule_SetContext(client_capsule, reinterpret_cast(0x1)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_create(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - Py_ssize_t size; - PyObject* metadata; - if (!PyArg_ParseTuple(args, "O&O&nO", PyObjectToPlasmaClient, &client, - PyStringToUniqueID, &object_id, &size, &metadata)) { - return NULL; - } - if (!PyByteArray_Check(metadata)) { - PyErr_SetString(PyExc_TypeError, "metadata must be a bytearray"); - return NULL; - } - uint8_t* data; - Status s = client->Create(object_id, size, - reinterpret_cast(PyByteArray_AsString(metadata)), - PyByteArray_Size(metadata), &data); - if (s.IsPlasmaObjectExists()) { - PyErr_SetString(PlasmaObjectExistsError, - "An object with this ID already exists in the plasma " - "store."); - return NULL; - } - if (s.IsPlasmaStoreFull()) { - PyErr_SetString(PlasmaOutOfMemoryError, - "The plasma store ran out of memory and could not create " - "this object."); - return NULL; - } - ARROW_CHECK(s.ok()); - -#if PY_MAJOR_VERSION >= 3 - return PyMemoryView_FromMemory(reinterpret_cast(data), size, PyBUF_WRITE); -#else - return PyBuffer_FromReadWriteMemory(reinterpret_cast(data), size); -#endif -} - -PyObject* PyPlasma_hash(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - unsigned char digest[kDigestSize]; - bool success = plasma_compute_object_hash(client, object_id, digest); - if (success) { - PyObject* digest_string = - PyBytes_FromStringAndSize(reinterpret_cast(digest), kDigestSize); - return digest_string; - } else { - Py_RETURN_NONE; - } -} - -PyObject* PyPlasma_seal(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - ARROW_CHECK_OK(client->Seal(object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_release(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - ARROW_CHECK_OK(client->Release(object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_get(PyObject* self, PyObject* args) { - PlasmaClient* client; - PyObject* object_id_list; - Py_ssize_t timeout_ms; - if (!PyArg_ParseTuple( - args, "O&On", PyObjectToPlasmaClient, &client, &object_id_list, &timeout_ms)) { - return NULL; - } - - Py_ssize_t num_object_ids = PyList_Size(object_id_list); - std::vector object_ids(num_object_ids); - std::vector object_buffers(num_object_ids); - - for (int i = 0; i < num_object_ids; ++i) { - PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]); - } - - Py_BEGIN_ALLOW_THREADS; - ARROW_CHECK_OK( - client->Get(object_ids.data(), num_object_ids, timeout_ms, object_buffers.data())); - Py_END_ALLOW_THREADS; - - PyObject* returns = PyList_New(num_object_ids); - for (int i = 0; i < num_object_ids; ++i) { - if (object_buffers[i].data_size != -1) { - /* The object was retrieved, so return the object. */ - PyObject* t = PyTuple_New(2); - Py_ssize_t data_size = static_cast(object_buffers[i].data_size); - Py_ssize_t metadata_size = static_cast(object_buffers[i].metadata_size); -#if PY_MAJOR_VERSION >= 3 - char* data = reinterpret_cast(object_buffers[i].data); - char* metadata = reinterpret_cast(object_buffers[i].metadata); - PyTuple_SET_ITEM(t, 0, PyMemoryView_FromMemory(data, data_size, PyBUF_READ)); - PyTuple_SET_ITEM( - t, 1, PyMemoryView_FromMemory(metadata, metadata_size, PyBUF_READ)); -#else - void* data = reinterpret_cast(object_buffers[i].data); - void* metadata = reinterpret_cast(object_buffers[i].metadata); - PyTuple_SET_ITEM(t, 0, PyBuffer_FromMemory(data, data_size)); - PyTuple_SET_ITEM(t, 1, PyBuffer_FromMemory(metadata, metadata_size)); -#endif - ARROW_CHECK(PyList_SetItem(returns, i, t) == 0); - } else { - /* The object was not retrieved, so just add None to the list of return - * values. */ - Py_INCREF(Py_None); - ARROW_CHECK(PyList_SetItem(returns, i, Py_None) == 0); - } - } - return returns; -} - -PyObject* PyPlasma_contains(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - bool has_object; - ARROW_CHECK_OK(client->Contains(object_id, &has_object)); - - if (has_object) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } -} - -PyObject* PyPlasma_fetch(PyObject* self, PyObject* args) { - PlasmaClient* client; - PyObject* object_id_list; - if (!PyArg_ParseTuple(args, "O&O", PyObjectToPlasmaClient, &client, &object_id_list)) { - return NULL; - } - if (client->get_manager_fd() == -1) { - PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); - return NULL; - } - Py_ssize_t n = PyList_Size(object_id_list); - ObjectID* object_ids = new ObjectID[n]; - for (int i = 0; i < n; ++i) { - PyStringToUniqueID(PyList_GetItem(object_id_list, i), &object_ids[i]); - } - ARROW_CHECK_OK(client->Fetch(static_cast(n), object_ids)); - delete[] object_ids; - Py_RETURN_NONE; -} - -PyObject* PyPlasma_wait(PyObject* self, PyObject* args) { - PlasmaClient* client; - PyObject* object_id_list; - Py_ssize_t timeout; - int num_returns; - if (!PyArg_ParseTuple(args, "O&Oni", PyObjectToPlasmaClient, &client, &object_id_list, - &timeout, &num_returns)) { - return NULL; - } - Py_ssize_t n = PyList_Size(object_id_list); - - if (client->get_manager_fd() == -1) { - PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); - return NULL; - } - if (num_returns < 0) { - PyErr_SetString( - PyExc_RuntimeError, "The argument num_returns cannot be less than zero."); - return NULL; - } - if (num_returns > n) { - PyErr_SetString(PyExc_RuntimeError, - "The argument num_returns cannot be greater than len(object_ids)"); - return NULL; - } - int64_t threshold = 1 << 30; - if (timeout > threshold) { - PyErr_SetString( - PyExc_RuntimeError, "The argument timeout cannot be greater than 2 ** 30."); - return NULL; - } - - std::vector object_requests(n); - for (int i = 0; i < n; ++i) { - ARROW_CHECK(PyStringToUniqueID(PyList_GetItem(object_id_list, i), - &object_requests[i].object_id) == 1); - object_requests[i].type = PLASMA_QUERY_ANYWHERE; - } - /* Drop the global interpreter lock while we are waiting, so other threads can - * run. */ - int num_return_objects; - Py_BEGIN_ALLOW_THREADS; - ARROW_CHECK_OK( - client->Wait(n, object_requests.data(), num_returns, timeout, &num_return_objects)); - Py_END_ALLOW_THREADS; - - int num_to_return = std::min(num_return_objects, num_returns); - PyObject* ready_ids = PyList_New(num_to_return); - PyObject* waiting_ids = PySet_New(object_id_list); - int num_returned = 0; - for (int i = 0; i < n; ++i) { - if (num_returned == num_to_return) { break; } - if (object_requests[i].status == ObjectStatus_Local || - object_requests[i].status == ObjectStatus_Remote) { - PyObject* ready = PyBytes_FromStringAndSize( - reinterpret_cast(&object_requests[i].object_id), - sizeof(object_requests[i].object_id)); - PyList_SetItem(ready_ids, num_returned, ready); - PySet_Discard(waiting_ids, ready); - num_returned += 1; - } else { - ARROW_CHECK(object_requests[i].status == ObjectStatus_Nonexistent); - } - } - ARROW_CHECK(num_returned == num_to_return); - /* Return both the ready IDs and the remaining IDs. */ - PyObject* t = PyTuple_New(2); - PyTuple_SetItem(t, 0, ready_ids); - PyTuple_SetItem(t, 1, waiting_ids); - return t; -} - -PyObject* PyPlasma_evict(PyObject* self, PyObject* args) { - PlasmaClient* client; - Py_ssize_t num_bytes; - if (!PyArg_ParseTuple(args, "O&n", PyObjectToPlasmaClient, &client, &num_bytes)) { - return NULL; - } - int64_t evicted_bytes; - ARROW_CHECK_OK(client->Evict(static_cast(num_bytes), evicted_bytes)); - return PyLong_FromSsize_t(static_cast(evicted_bytes)); -} - -PyObject* PyPlasma_delete(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - if (!PyArg_ParseTuple(args, "O&O&", PyObjectToPlasmaClient, &client, PyStringToUniqueID, - &object_id)) { - return NULL; - } - ARROW_CHECK_OK(client->Delete(object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_transfer(PyObject* self, PyObject* args) { - PlasmaClient* client; - ObjectID object_id; - const char* addr; - int port; - if (!PyArg_ParseTuple(args, "O&O&si", PyObjectToPlasmaClient, &client, - PyStringToUniqueID, &object_id, &addr, &port)) { - return NULL; - } - - if (client->get_manager_fd() == -1) { - PyErr_SetString(PyExc_RuntimeError, "Not connected to the plasma manager"); - return NULL; - } - - ARROW_CHECK_OK(client->Transfer(addr, port, object_id)); - Py_RETURN_NONE; -} - -PyObject* PyPlasma_subscribe(PyObject* self, PyObject* args) { - PlasmaClient* client; - if (!PyArg_ParseTuple(args, "O&", PyObjectToPlasmaClient, &client)) { return NULL; } - - int sock; - ARROW_CHECK_OK(client->Subscribe(&sock)); - return PyLong_FromLong(sock); -} - -PyObject* PyPlasma_receive_notification(PyObject* self, PyObject* args) { - int plasma_sock; - - if (!PyArg_ParseTuple(args, "i", &plasma_sock)) { return NULL; } - /* Receive object notification from the plasma connection socket. If the - * object was added, return a tuple of its fields: ObjectID, data_size, - * metadata_size. If the object was deleted, data_size and metadata_size will - * be set to -1. */ - uint8_t* notification = read_message_async(plasma_sock); - if (notification == NULL) { - PyErr_SetString( - PyExc_RuntimeError, "Failed to read object notification from Plasma socket"); - return NULL; - } - auto object_info = flatbuffers::GetRoot(notification); - /* Construct a tuple from object_info and return. */ - PyObject* t = PyTuple_New(3); - PyTuple_SetItem(t, 0, PyBytes_FromStringAndSize(object_info->object_id()->data(), - object_info->object_id()->size())); - if (object_info->is_deletion()) { - PyTuple_SetItem(t, 1, PyLong_FromLong(-1)); - PyTuple_SetItem(t, 2, PyLong_FromLong(-1)); - } else { - PyTuple_SetItem(t, 1, PyLong_FromLong(object_info->data_size())); - PyTuple_SetItem(t, 2, PyLong_FromLong(object_info->metadata_size())); - } - - delete[] notification; - return t; -} - -static PyMethodDef plasma_methods[] = { - {"connect", PyPlasma_connect, METH_VARARGS, "Connect to plasma."}, - {"disconnect", PyPlasma_disconnect, METH_VARARGS, "Disconnect from plasma."}, - {"create", PyPlasma_create, METH_VARARGS, "Create a new plasma object."}, - {"hash", PyPlasma_hash, METH_VARARGS, "Compute the hash of a plasma object."}, - {"seal", PyPlasma_seal, METH_VARARGS, "Seal a plasma object."}, - {"get", PyPlasma_get, METH_VARARGS, "Get a plasma object."}, - {"contains", PyPlasma_contains, METH_VARARGS, - "Does the plasma store contain this plasma object?"}, - {"fetch", PyPlasma_fetch, METH_VARARGS, - "Fetch the object from another plasma manager instance."}, - {"wait", PyPlasma_wait, METH_VARARGS, - "Wait until num_returns objects in object_ids are ready."}, - {"evict", PyPlasma_evict, METH_VARARGS, - "Evict some objects until we recover some number of bytes."}, - {"release", PyPlasma_release, METH_VARARGS, "Release the plasma object."}, - {"delete", PyPlasma_delete, METH_VARARGS, "Delete a plasma object."}, - {"transfer", PyPlasma_transfer, METH_VARARGS, - "Transfer object to another plasma manager."}, - {"subscribe", PyPlasma_subscribe, METH_VARARGS, - "Subscribe to the plasma notification socket."}, - {"receive_notification", PyPlasma_receive_notification, METH_VARARGS, - "Receive next notification from plasma notification socket."}, - {NULL} /* Sentinel */ -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, "libplasma", /* m_name */ - "A Python client library for plasma.", /* m_doc */ - 0, /* m_size */ - plasma_methods, /* m_methods */ - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL, /* m_free */ -}; -#endif - -#if PY_MAJOR_VERSION >= 3 -#define INITERROR return NULL -#else -#define INITERROR return -#endif - -#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ -#define PyMODINIT_FUNC void -#endif - -#if PY_MAJOR_VERSION >= 3 -#define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void) -#else -#define MOD_INIT(name) PyMODINIT_FUNC init##name(void) -#endif - -MOD_INIT(libplasma) { -#if PY_MAJOR_VERSION >= 3 - PyObject* m = PyModule_Create(&moduledef); -#else - PyObject* m = - Py_InitModule3("libplasma", plasma_methods, "A Python client library for plasma."); -#endif - - /* Create a custom exception for when an object ID is reused. */ - char plasma_object_exists_error[] = "plasma_object_exists.error"; - PlasmaObjectExistsError = PyErr_NewException(plasma_object_exists_error, NULL, NULL); - Py_INCREF(PlasmaObjectExistsError); - PyModule_AddObject(m, "plasma_object_exists_error", PlasmaObjectExistsError); - /* Create a custom exception for when the plasma store is out of memory. */ - char plasma_out_of_memory_error[] = "plasma_out_of_memory.error"; - PlasmaOutOfMemoryError = PyErr_NewException(plasma_out_of_memory_error, NULL, NULL); - Py_INCREF(PlasmaOutOfMemoryError); - PyModule_AddObject(m, "plasma_out_of_memory_error", PlasmaOutOfMemoryError); - -#if PY_MAJOR_VERSION >= 3 - return m; -#endif -} diff --git a/cpp/src/plasma/extension.h b/cpp/src/plasma/extension.h deleted file mode 100644 index cee30abb3592d..0000000000000 --- a/cpp/src/plasma/extension.h +++ /dev/null @@ -1,50 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef PLASMA_EXTENSION_H -#define PLASMA_EXTENSION_H - -#undef _XOPEN_SOURCE -#undef _POSIX_C_SOURCE -#include - -#include "bytesobject.h" // NOLINT - -#include "plasma/client.h" -#include "plasma/common.h" - -static int PyObjectToPlasmaClient(PyObject* object, PlasmaClient** client) { - if (PyCapsule_IsValid(object, "plasma")) { - *client = reinterpret_cast(PyCapsule_GetPointer(object, "plasma")); - return 1; - } else { - PyErr_SetString(PyExc_TypeError, "must be a 'plasma' capsule"); - return 0; - } -} - -int PyStringToUniqueID(PyObject* object, ObjectID* object_id) { - if (PyBytes_Check(object)) { - memcpy(object_id, PyBytes_AsString(object), sizeof(ObjectID)); - return 1; - } else { - PyErr_SetString(PyExc_TypeError, "must be a 20 character string"); - return 0; - } -} - -#endif // PLASMA_EXTENSION_H diff --git a/cpp/src/plasma/plasma.cc b/cpp/src/plasma/plasma.cc index 559d8e7f2a65e..bfed5009b6157 100644 --- a/cpp/src/plasma/plasma.cc +++ b/cpp/src/plasma/plasma.cc @@ -24,6 +24,8 @@ #include "plasma/common.h" #include "plasma/protocol.h" +namespace plasma { + int warn_if_sigpipe(int status, int client_sock) { if (status >= 0) { return 0; } if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) { @@ -62,3 +64,5 @@ ObjectTableEntry* get_object_table_entry( if (it == store_info->objects.end()) { return NULL; } return it->second.get(); } + +} // namespace plasma diff --git a/cpp/src/plasma/plasma.h b/cpp/src/plasma/plasma.h index 275d0c7a41687..db8669ff0ddc2 100644 --- a/cpp/src/plasma/plasma.h +++ b/cpp/src/plasma/plasma.h @@ -32,8 +32,10 @@ #include "arrow/status.h" #include "arrow/util/logging.h" -#include "format/common_generated.h" #include "plasma/common.h" +#include "plasma/common_generated.h" + +namespace plasma { #define HANDLE_SIGPIPE(s, fd_) \ do { \ @@ -54,47 +56,23 @@ /// Allocation granularity used in plasma for object allocation. #define BLOCK_SIZE 64 -/// Size of object hash digests. -constexpr int64_t kDigestSize = sizeof(uint64_t); - struct Client; -/// Object request data structure. Used in the plasma_wait_for_objects() -/// argument. -typedef struct { - /// The ID of the requested object. If ID_NIL request any object. - ObjectID object_id; - /// Request associated to the object. It can take one of the following values: - /// - PLASMA_QUERY_LOCAL: return if or when the object is available in the - /// local Plasma Store. - /// - PLASMA_QUERY_ANYWHERE: return if or when the object is available in - /// the system (i.e., either in the local or a remote Plasma Store). - int type; - /// Object status. Same as the status returned by plasma_status() function - /// call. This is filled in by plasma_wait_for_objects1(): - /// - ObjectStatus_Local: object is ready at the local Plasma Store. - /// - ObjectStatus_Remote: object is ready at a remote Plasma Store. - /// - ObjectStatus_Nonexistent: object does not exist in the system. - /// - PLASMA_CLIENT_IN_TRANSFER, if the object is currently being scheduled - /// for being transferred or it is transferring. - int status; -} ObjectRequest; - /// Mapping from object IDs to type and status of the request. typedef std::unordered_map ObjectRequestMap; /// Handle to access memory mapped file and map it into client address space. -typedef struct { +struct object_handle { /// The file descriptor of the memory mapped file in the store. It is used as /// a unique identifier of the file in the client to look up the corresponding /// file descriptor on the client's side. int store_fd; /// The size in bytes of the memory mapped file. int64_t mmap_size; -} object_handle; +}; // TODO(pcm): Replace this by the flatbuffers message PlasmaObjectSpec. -typedef struct { +struct PlasmaObject { /// Handle for memory mapped file the object is stored in. object_handle handle; /// The offset in bytes in the memory mapped file of the data. @@ -105,28 +83,21 @@ typedef struct { int64_t data_size; /// The size in bytes of the metadata. int64_t metadata_size; -} PlasmaObject; +}; -typedef enum { +enum object_state { /// Object was created but not sealed in the local Plasma Store. PLASMA_CREATED = 1, /// Object is sealed and stored in the local Plasma Store. PLASMA_SEALED -} object_state; +}; -typedef enum { +enum object_status { /// The object was not found. OBJECT_NOT_FOUND = 0, /// The object was found. OBJECT_FOUND = 1 -} object_status; - -typedef enum { - /// Query for object in the local plasma store. - PLASMA_QUERY_LOCAL = 1, - /// Query for object in the local plasma store or in a remote plasma store. - PLASMA_QUERY_ANYWHERE -} object_request_type; +}; /// This type is used by the Plasma store. It is here because it is exposed to /// the eviction policy. @@ -188,4 +159,6 @@ int warn_if_sigpipe(int status, int client_sock); uint8_t* create_object_info_buffer(ObjectInfoT* object_info); +} // namespace plasma + #endif // PLASMA_PLASMA_H diff --git a/cpp/src/plasma/plasma.pc.in b/cpp/src/plasma/plasma.pc.in new file mode 100644 index 0000000000000..d86868939f363 --- /dev/null +++ b/cpp/src/plasma/plasma.pc.in @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +prefix=@CMAKE_INSTALL_PREFIX@ +libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ +includedir=${prefix}/include + +so_version=@PLASMA_SO_VERSION@ +abi_version=@PLASMA_ABI_VERSION@ +executable=${prefix}/@CMAKE_INSTALL_BINDIR@/plasma_store + +Name: Plasma +Description: Plasma is an in-memory object store and cache for big data. +Version: @PLASMA_VERSION@ +Libs: -L${libdir} -lplasma +Cflags: -I${includedir} diff --git a/cpp/src/plasma/protocol.cc b/cpp/src/plasma/protocol.cc index 246aa29736056..2998c68b82785 100644 --- a/cpp/src/plasma/protocol.cc +++ b/cpp/src/plasma/protocol.cc @@ -18,11 +18,13 @@ #include "plasma/protocol.h" #include "flatbuffers/flatbuffers.h" -#include "format/plasma_generated.h" +#include "plasma/plasma_generated.h" #include "plasma/common.h" #include "plasma/io.h" +namespace plasma { + using flatbuffers::uoffset_t; flatbuffers::Offset>> @@ -500,3 +502,5 @@ Status ReadDataReply( *metadata_size = (int64_t)message->metadata_size(); return Status::OK(); } + +} // namespace plasma diff --git a/cpp/src/plasma/protocol.h b/cpp/src/plasma/protocol.h index 5d9d13675144f..835c5a0b58978 100644 --- a/cpp/src/plasma/protocol.h +++ b/cpp/src/plasma/protocol.h @@ -21,9 +21,11 @@ #include #include "arrow/status.h" -#include "format/plasma_generated.h" +#include "plasma/plasma_generated.h" #include "plasma/plasma.h" +namespace plasma { + using arrow::Status; /* Plasma receive message. */ @@ -167,4 +169,6 @@ Status SendDataReply( Status ReadDataReply( uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size); +} // namespace plasma + #endif /* PLASMA_PROTOCOL */ diff --git a/cpp/src/plasma/store.cc b/cpp/src/plasma/store.cc index 9394e3de310b2..8d4fb106f5367 100644 --- a/cpp/src/plasma/store.cc +++ b/cpp/src/plasma/store.cc @@ -49,12 +49,14 @@ #include #include -#include "format/common_generated.h" +#include "plasma/common_generated.h" #include "plasma/common.h" #include "plasma/fling.h" #include "plasma/io.h" #include "plasma/malloc.h" +namespace plasma { + extern "C" { void* dlmalloc(size_t bytes); void* dlmemalign(size_t alignment, size_t bytes); @@ -625,8 +627,10 @@ void start_server(char* socket_name, int64_t system_memory) { loop.run(); } +} // namespace plasma + int main(int argc, char* argv[]) { - signal(SIGTERM, signal_handler); + signal(SIGTERM, plasma::signal_handler); char* socket_name = NULL; int64_t system_memory = -1; int c; @@ -677,7 +681,7 @@ int main(int argc, char* argv[]) { #endif // Make it so dlmalloc fails if we try to request more memory than is // available. - dlmalloc_set_footprint_limit((size_t)system_memory); + plasma::dlmalloc_set_footprint_limit((size_t)system_memory); ARROW_LOG(DEBUG) << "starting server listening on " << socket_name; - start_server(socket_name, system_memory); + plasma::start_server(socket_name, system_memory); } diff --git a/cpp/src/plasma/store.h b/cpp/src/plasma/store.h index 8bd94265410f6..27c3813da8e7a 100644 --- a/cpp/src/plasma/store.h +++ b/cpp/src/plasma/store.h @@ -27,6 +27,8 @@ #include "plasma/plasma.h" #include "plasma/protocol.h" +namespace plasma { + struct GetRequest; struct NotificationQueue { @@ -166,4 +168,6 @@ class PlasmaStore { std::unordered_map pending_notifications_; }; +} // namespace plasma + #endif // PLASMA_STORE_H diff --git a/cpp/src/plasma/test/client_tests.cc b/cpp/src/plasma/test/client_tests.cc index 29b5b135144c3..6dc558e77078b 100644 --- a/cpp/src/plasma/test/client_tests.cc +++ b/cpp/src/plasma/test/client_tests.cc @@ -29,7 +29,9 @@ #include "plasma/plasma.h" #include "plasma/protocol.h" -std::string g_test_executable; // NOLINT +namespace plasma { + +std::string test_executable; // NOLINT class TestPlasmaStore : public ::testing::Test { public: @@ -37,7 +39,7 @@ class TestPlasmaStore : public ::testing::Test { // stdout of the object store. Consider changing that. void SetUp() { std::string plasma_directory = - g_test_executable.substr(0, g_test_executable.find_last_of("/")); + test_executable.substr(0, test_executable.find_last_of("/")); std::string plasma_command = plasma_directory + "/plasma_store -m 1000000000 -s /tmp/store 1> /dev/null 2> /dev/null &"; @@ -125,8 +127,10 @@ TEST_F(TestPlasmaStore, MultipleGetTest) { ASSERT_EQ(object_buffer[1].data[0], 2); } +} // namespace plasma + int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); - g_test_executable = std::string(argv[0]); + plasma::test_executable = std::string(argv[0]); return RUN_ALL_TESTS(); } diff --git a/cpp/src/plasma/test/serialization_tests.cc b/cpp/src/plasma/test/serialization_tests.cc index 325cead06e770..13938cd6fb042 100644 --- a/cpp/src/plasma/test/serialization_tests.cc +++ b/cpp/src/plasma/test/serialization_tests.cc @@ -25,6 +25,8 @@ #include "plasma/plasma.h" #include "plasma/protocol.h" +namespace plasma { + /** * Create a temporary file. Needs to be closed by the caller. * @@ -386,3 +388,5 @@ TEST(PlasmaSerialization, DataReply) { ASSERT_EQ(object_size1, object_size2); ASSERT_EQ(metadata_size1, metadata_size2); } + +} // namespace plasma diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 224147d8b5c3b..6ff66462958ef 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -220,6 +220,12 @@ include_directories(SYSTEM find_package(Arrow REQUIRED) include_directories(SYSTEM ${ARROW_INCLUDE_DIR}) +## Plasma +find_package(Plasma) +if (PLASMA_FOUND) + include_directories(SYSTEM ${PLASMA_INCLUDE_DIR}) +endif() + function(bundle_arrow_lib library_path) get_filename_component(LIBRARY_DIR ${${library_path}} DIRECTORY) get_filename_component(LIBRARY_NAME ${${library_path}} NAME_WE) @@ -252,6 +258,9 @@ if (PYARROW_BUNDLE_ARROW_CPP) file(COPY ${ARROW_INCLUDE_DIR}/arrow DESTINATION ${BUILD_OUTPUT_ROOT_DIRECTORY}/include) bundle_arrow_lib(ARROW_SHARED_LIB) bundle_arrow_lib(ARROW_PYTHON_SHARED_LIB) + if (PLASMA_FOUND) + bundle_arrow_lib(PLASMA_SHARED_LIB) + endif() endif() if (MSVC) @@ -278,9 +287,14 @@ set(CYTHON_EXTENSIONS lib ) +if (PLASMA_FOUND) + set(CYTHON_EXTENSIONS ${CYTHON_EXTENSIONS} plasma) +endif() + set(LINK_LIBS arrow_shared arrow_python_shared + ${PLASMA_SHARED_LIB} ) if (PYARROW_BUILD_PARQUET) @@ -379,3 +393,7 @@ foreach(module ${CYTHON_EXTENSIONS}) target_link_libraries(${module_name} ${LINK_LIBS}) endforeach(module) + +if (PLASMA_FOUND) + file(COPY ${PLASMA_EXECUTABLE} DESTINATION ${BUILD_OUTPUT_ROOT_DIRECTORY}) +endif() diff --git a/python/cmake_modules/FindPlasma.cmake b/python/cmake_modules/FindPlasma.cmake new file mode 100644 index 0000000000000..3acaa348bffa8 --- /dev/null +++ b/python/cmake_modules/FindPlasma.cmake @@ -0,0 +1,99 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# - Find PLASMA (plasma/client.h, libplasma.a, libplasma.so) +# This module defines +# PLASMA_INCLUDE_DIR, directory containing headers +# PLASMA_LIBS, directory containing plasma libraries +# PLASMA_STATIC_LIB, path to libplasma.a +# PLASMA_SHARED_LIB, path to libplasma's shared library +# PLASMA_SHARED_IMP_LIB, path to libplasma's import library (MSVC only) +# PLASMA_FOUND, whether plasma has been found + +include(FindPkgConfig) + +if ("$ENV{ARROW_HOME}" STREQUAL "") + pkg_check_modules(PLASMA plasma) + if (PLASMA_FOUND) + pkg_get_variable(PLASMA_EXECUTABLE plasma executable) + pkg_get_variable(PLASMA_ABI_VERSION plasma abi_version) + message(STATUS "Plasma ABI version: ${PLASMA_ABI_VERSION}") + pkg_get_variable(PLASMA_SO_VERSION plasma so_version) + message(STATUS "Plasma SO version: ${PLASMA_SO_VERSION}") + set(PLASMA_INCLUDE_DIR ${PLASMA_INCLUDE_DIRS}) + set(PLASMA_LIBS ${PLASMA_LIBRARY_DIRS}) + set(PLASMA_SEARCH_LIB_PATH ${PLASMA_LIBRARY_DIRS}) + endif() +else() + set(PLASMA_HOME "$ENV{ARROW_HOME}") + + set(PLASMA_EXECUTABLE ${PLASMA_HOME}/bin/plasma_store) + + set(PLASMA_SEARCH_HEADER_PATHS + ${PLASMA_HOME}/include + ) + + set(PLASMA_SEARCH_LIB_PATH + ${PLASMA_HOME}/lib + ) + + find_path(PLASMA_INCLUDE_DIR plasma/client.h PATHS + ${PLASMA_SEARCH_HEADER_PATHS} + # make sure we don't accidentally pick up a different version + NO_DEFAULT_PATH + ) +endif() + +find_library(PLASMA_LIB_PATH NAMES plasma + PATHS + ${PLASMA_SEARCH_LIB_PATH} + NO_DEFAULT_PATH) +get_filename_component(PLASMA_LIBS ${PLASMA_LIB_PATH} DIRECTORY) + +if (PLASMA_INCLUDE_DIR AND PLASMA_LIBS) + set(PLASMA_FOUND TRUE) + set(PLASMA_LIB_NAME plasma) + + set(PLASMA_STATIC_LIB ${PLASMA_LIBS}/lib${PLASMA_LIB_NAME}.a) + + set(PLASMA_SHARED_LIB ${PLASMA_LIBS}/lib${PLASMA_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}) +endif() + +if (PLASMA_FOUND) + if (NOT Plasma_FIND_QUIETLY) + message(STATUS "Found the Plasma core library: ${PLASMA_LIB_PATH}") + message(STATUS "Found Plasma executable: ${PLASMA_EXECUTABLE}") + endif () +else () + if (NOT Plasma_FIND_QUIETLY) + set(PLASMA_ERR_MSG "Could not find the Plasma library. Looked for headers") + set(PLASMA_ERR_MSG "${PLASMA_ERR_MSG} in ${PLASMA_SEARCH_HEADER_PATHS}, and for libs") + set(PLASMA_ERR_MSG "${PLASMA_ERR_MSG} in ${PLASMA_SEARCH_LIB_PATH}") + if (Plasma_FIND_REQUIRED) + message(FATAL_ERROR "${PLASMA_ERR_MSG}") + else (Plasma_FIND_REQUIRED) + message(STATUS "${PLASMA_ERR_MSG}") + endif (Plasma_FIND_REQUIRED) + endif () + set(PLASMA_FOUND FALSE) +endif () + +mark_as_advanced( + PLASMA_INCLUDE_DIR + PLASMA_STATIC_LIB + PLASMA_SHARED_LIB +) diff --git a/python/doc/source/api.rst b/python/doc/source/api.rst index c52d400cef1c7..780aa4839610f 100644 --- a/python/doc/source/api.rst +++ b/python/doc/source/api.rst @@ -212,6 +212,21 @@ Type Classes Field Schema +.. currentmodule:: pyarrow.plasma + +.. _api.plasma: + +In-Memory Object Store +---------------------- + +.. autosummary:: + :toctree: generated/ + + ObjectID + PlasmaClient + PlasmaBuffer + MutablePlasmaBuffer + .. currentmodule:: pyarrow.parquet .. _api.parquet: diff --git a/python/manylinux1/build_arrow.sh b/python/manylinux1/build_arrow.sh index 8c6bda9550e87..85c096a5c11d0 100755 --- a/python/manylinux1/build_arrow.sh +++ b/python/manylinux1/build_arrow.sh @@ -35,6 +35,7 @@ cd /arrow/python # PyArrow build configuration export PYARROW_BUILD_TYPE='release' export PYARROW_WITH_PARQUET=1 +export PYARROW_WITH_PLASMA=1 export PYARROW_BUNDLE_ARROW_CPP=1 # Need as otherwise arrow_io is sometimes not linked export LDFLAGS="-Wl,--no-as-needed" @@ -52,7 +53,7 @@ for PYTHON in ${PYTHON_VERSIONS}; do ARROW_BUILD_DIR=/arrow/cpp/build-PY${PYTHON} mkdir -p "${ARROW_BUILD_DIR}" pushd "${ARROW_BUILD_DIR}" - PATH="$(cpython_path $PYTHON)/bin:$PATH" cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/arrow-dist -DARROW_BUILD_TESTS=OFF -DARROW_BUILD_SHARED=ON -DARROW_BOOST_USE_SHARED=OFF -DARROW_JEMALLOC=ON -DARROW_RPATH_ORIGIN=ON -DARROW_JEMALLOC_USE_SHARED=OFF -DARROW_PYTHON=ON -DPythonInterp_FIND_VERSION=${PYTHON} .. + PATH="$(cpython_path $PYTHON)/bin:$PATH" cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/arrow-dist -DARROW_BUILD_TESTS=OFF -DARROW_BUILD_SHARED=ON -DARROW_BOOST_USE_SHARED=OFF -DARROW_JEMALLOC=ON -DARROW_RPATH_ORIGIN=ON -DARROW_JEMALLOC_USE_SHARED=OFF -DARROW_PYTHON=ON -DPythonInterp_FIND_VERSION=${PYTHON} -DARROW_PLASMA=ON .. make -j5 install popd @@ -65,6 +66,7 @@ for PYTHON in ${PYTHON_VERSIONS}; do echo "=== (${PYTHON}) Test the existence of optional modules ===" $PIPI_IO -r requirements.txt PATH="$PATH:$(cpython_path $PYTHON)/bin" $PYTHON_INTERPRETER -c "import pyarrow.parquet" + PATH="$PATH:$(cpython_path $PYTHON)/bin" $PYTHON_INTERPRETER -c "import pyarrow.plasma" echo "=== (${PYTHON}) Tag the wheel with manylinux1 ===" mkdir -p repaired_wheels/ @@ -78,4 +80,3 @@ for PYTHON in ${PYTHON_VERSIONS}; do mv repaired_wheels/*.whl /io/dist done - diff --git a/python/pyarrow/__init__.py b/python/pyarrow/__init__.py index e3d783aee58b4..6d0ce204382e3 100644 --- a/python/pyarrow/__init__.py +++ b/python/pyarrow/__init__.py @@ -68,6 +68,7 @@ Date32Value, Date64Value, TimestampValue) from pyarrow.lib import (HdfsFile, NativeFile, PythonFile, + FixedSizeBufferOutputStream, Buffer, BufferReader, BufferOutputStream, OSFile, MemoryMappedFile, memory_map, frombuffer, @@ -99,7 +100,6 @@ open_file, serialize_pandas, deserialize_pandas) - localfs = LocalFilesystem.get_instance() diff --git a/python/pyarrow/error.pxi b/python/pyarrow/error.pxi index 259aeb074e3c2..8a3f57d209ac0 100644 --- a/python/pyarrow/error.pxi +++ b/python/pyarrow/error.pxi @@ -48,6 +48,18 @@ class ArrowNotImplementedError(NotImplementedError, ArrowException): pass +class PlasmaObjectExists(ArrowException): + pass + + +class PlasmaObjectNonexistent(ArrowException): + pass + + +class PlasmaStoreFull(ArrowException): + pass + + cdef int check_status(const CStatus& status) nogil except -1: if status.ok(): return 0 @@ -66,5 +78,11 @@ cdef int check_status(const CStatus& status) nogil except -1: raise ArrowNotImplementedError(message) elif status.IsTypeError(): raise ArrowTypeError(message) + elif status.IsPlasmaObjectExists(): + raise PlasmaObjectExists(message) + elif status.IsPlasmaObjectNonexistent(): + raise PlasmaObjectNonexistent(message) + elif status.IsPlasmaStoreFull(): + raise PlasmaStoreFull(message) else: raise ArrowException(message) diff --git a/python/pyarrow/includes/common.pxd b/python/pyarrow/includes/common.pxd index 3487d48ce9b52..637a133afb02b 100644 --- a/python/pyarrow/includes/common.pxd +++ b/python/pyarrow/includes/common.pxd @@ -50,6 +50,9 @@ cdef extern from "arrow/api.h" namespace "arrow" nogil: c_bool IsKeyError() c_bool IsNotImplemented() c_bool IsTypeError() + c_bool IsPlasmaObjectExists() + c_bool IsPlasmaObjectNonexistent() + c_bool IsPlasmaStoreFull() cdef inline object PyObject_to_object(PyObject* o): diff --git a/python/pyarrow/includes/libarrow.pxd b/python/pyarrow/includes/libarrow.pxd index edf50ad54e787..ffe867b0af0f5 100644 --- a/python/pyarrow/includes/libarrow.pxd +++ b/python/pyarrow/includes/libarrow.pxd @@ -148,9 +148,15 @@ cdef extern from "arrow/api.h" namespace "arrow" nogil: CLoggingMemoryPool(CMemoryPool*) cdef cppclass CBuffer" arrow::Buffer": + CBuffer(const uint8_t* data, int64_t size) uint8_t* data() int64_t size() shared_ptr[CBuffer] parent() + c_bool is_mutable() const + + cdef cppclass CMutableBuffer" arrow::MutableBuffer"(CBuffer): + CMutableBuffer(const uint8_t* data, int64_t size) + uint8_t* mutable_data() cdef cppclass ResizableBuffer(CBuffer): CStatus Resize(int64_t nbytes) @@ -558,6 +564,9 @@ cdef extern from "arrow/io/memory.h" namespace "arrow::io" nogil: CMockOutputStream() int64_t GetExtentBytesWritten() + cdef cppclass CFixedSizeBufferWriter" arrow::io::FixedSizeBufferWriter"(WriteableFile): + CFixedSizeBufferWriter(const shared_ptr[CBuffer]& buffer) + cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil: enum MessageType" arrow::ipc::Message::Type": diff --git a/python/pyarrow/io.pxi b/python/pyarrow/io.pxi index 8b213a33053d4..181b0b18a712f 100644 --- a/python/pyarrow/io.pxi +++ b/python/pyarrow/io.pxi @@ -473,6 +473,15 @@ cdef class OSFile(NativeFile): self.wr_file = handle +cdef class FixedSizeBufferOutputStream(NativeFile): + + def __cinit__(self, Buffer buffer): + self.wr_file.reset(new CFixedSizeBufferWriter(buffer.buffer)) + self.is_readable = 0 + self.is_writeable = 1 + self.is_open = True + + # ---------------------------------------------------------------------- # Arrow buffers @@ -523,7 +532,10 @@ cdef class Buffer: buffer.len = self.size buffer.ndim = 1 buffer.obj = self - buffer.readonly = 1 + if self.buffer.get().is_mutable(): + buffer.readonly = 0 + else: + buffer.readonly = 1 buffer.shape = self.shape buffer.strides = self.strides buffer.suboffsets = NULL @@ -540,6 +552,15 @@ cdef class Buffer: p[0] = self.buffer.get().data() return self.size + def __getwritebuffer__(self, Py_ssize_t idx, void **p): + if not self.buffer.get().is_mutable(): + raise SystemError("trying to write an immutable buffer") + if idx != 0: + raise SystemError("accessing non-existent buffer segment") + if p != NULL: + p[0] = self.buffer.get().data() + return self.size + cdef shared_ptr[PoolBuffer] allocate_buffer(CMemoryPool* pool): cdef shared_ptr[PoolBuffer] result diff --git a/python/pyarrow/plasma.pyx b/python/pyarrow/plasma.pyx new file mode 100644 index 0000000000000..bb17685277af1 --- /dev/null +++ b/python/pyarrow/plasma.pyx @@ -0,0 +1,560 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: embedsignature = True + +from libcpp cimport bool as c_bool, nullptr +from libcpp.memory cimport shared_ptr, unique_ptr, make_shared +from libcpp.string cimport string as c_string +from libcpp.vector cimport vector as c_vector +from libc.stdint cimport int64_t, uint8_t, uintptr_t +from cpython.pycapsule cimport * + +from pyarrow.lib cimport Buffer, NativeFile, check_status +from pyarrow.includes.libarrow cimport (CMutableBuffer, CBuffer, + CFixedSizeBufferWriter, CStatus) + + +PLASMA_WAIT_TIMEOUT = 2 ** 30 + + +cdef extern from "plasma/common.h" nogil: + + cdef cppclass CUniqueID" plasma::UniqueID": + + @staticmethod + CUniqueID from_binary(const c_string& binary) + + c_bool operator==(const CUniqueID& rhs) const + + c_string hex() const + + c_string binary() const + + cdef struct CObjectRequest" plasma::ObjectRequest": + CUniqueID object_id + int type + int status + + +cdef extern from "plasma/common.h": + cdef int64_t kDigestSize" plasma::kDigestSize" + + cdef enum ObjectRequestType: + PLASMA_QUERY_LOCAL"plasma::PLASMA_QUERY_LOCAL", + PLASMA_QUERY_ANYWHERE"plasma::PLASMA_QUERY_ANYWHERE" + + cdef int ObjectStatusLocal"plasma::ObjectStatusLocal"; + cdef int ObjectStatusRemote"plasma::ObjectStatusRemote"; + +cdef extern from "plasma/client.h" nogil: + + cdef cppclass CPlasmaClient" plasma::PlasmaClient": + + CPlasmaClient() + + CStatus Connect(const c_string& store_socket_name, + const c_string& manager_socket_name, int release_delay) + + CStatus Create(const CUniqueID& object_id, int64_t data_size, + const uint8_t* metadata, int64_t metadata_size, + uint8_t** data) + + CStatus Get(const CUniqueID* object_ids, int64_t num_objects, + int64_t timeout_ms, CObjectBuffer* object_buffers) + + CStatus Seal(const CUniqueID& object_id) + + CStatus Evict(int64_t num_bytes, int64_t& num_bytes_evicted) + + CStatus Hash(const CUniqueID& object_id, uint8_t* digest) + + CStatus Release(const CUniqueID& object_id) + + CStatus Contains(const CUniqueID& object_id, c_bool* has_object) + + CStatus Subscribe(int* fd) + + CStatus GetNotification(int fd, CUniqueID* object_id, + int64_t* data_size, int64_t* metadata_size) + + CStatus Disconnect() + + CStatus Fetch(int num_object_ids, const CUniqueID* object_ids) + + CStatus Wait(int64_t num_object_requests, CObjectRequest* object_requests, + int num_ready_objects, int64_t timeout_ms, int* num_objects_ready); + + CStatus Transfer(const char* addr, int port, const CUniqueID& object_id) + + +cdef extern from "plasma/client.h" nogil: + + cdef struct CObjectBuffer" plasma::ObjectBuffer": + int64_t data_size + uint8_t* data + int64_t metadata_size + uint8_t* metadata + + +def make_object_id(object_id): + return ObjectID(object_id) + + +cdef class ObjectID: + """ + An ObjectID represents a string of bytes used to identify Plasma objects. + """ + + cdef: + CUniqueID data + + def __cinit__(self, object_id): + self.data = CUniqueID.from_binary(object_id) + + def __richcmp__(ObjectID self, ObjectID object_id, operation): + if operation != 2: + raise ValueError("operation != 2 (only equality is supported)") + return self.data == object_id.data + + def __hash__(self): + return hash(self.data.binary()) + + def __repr__(self): + return "ObjectID(" + self.data.hex().decode() + ")" + + def __reduce__(self): + return (make_object_id, (self.data.binary(),)) + + def binary(self): + """ + Return the binary representation of this ObjectID. + + Returns + ------- + bytes + Binary representation of the ObjectID. + """ + return self.data.binary() + + +cdef class PlasmaBuffer(Buffer): + """ + This is the type returned by calls to get with a PlasmaClient. + + We define our own class instead of directly returning a buffer object so + that we can add a custom destructor which notifies Plasma that the object + is no longer being used, so the memory in the Plasma store backing the + object can potentially be freed. + + Attributes + ---------- + object_id : ObjectID + The ID of the object in the buffer. + client : PlasmaClient + The PlasmaClient that we use to communicate with the store and manager. + """ + + cdef: + ObjectID object_id + PlasmaClient client + + def __cinit__(self, ObjectID object_id, PlasmaClient client): + """ + Initialize a PlasmaBuffer. + """ + self.object_id = object_id + self.client = client + + def __dealloc__(self): + """ + Notify Plasma that the object is no longer needed. + + If the plasma client has been shut down, then don't do anything. + """ + self.client.release(self.object_id) + + +cdef class PlasmaClient: + """ + The PlasmaClient is used to interface with a plasma store and manager. + + The PlasmaClient can ask the PlasmaStore to allocate a new buffer, seal a + buffer, and get a buffer. Buffers are referred to by object IDs, which are + strings. + """ + + cdef: + shared_ptr[CPlasmaClient] client + int notification_fd + c_string store_socket_name + c_string manager_socket_name + + def __cinit__(self, store_socket_name, manager_socket_name, int release_delay): + """ + Create a new PlasmaClient that is connected to a plasma store + and optionally a plasma manager. + + Parameters + ---------- + store_socket_name : str + Name of the socket the plasma store is listening at. + manager_socket_name : str + Name of the socket the plasma manager is listening at. + release_delay : int + The maximum number of objects that the client will keep and + delay releasing (for caching reasons). + """ + self.client.reset(new CPlasmaClient()) + self.notification_fd = -1 + self.store_socket_name = store_socket_name.encode() + self.manager_socket_name = manager_socket_name.encode() + with nogil: + check_status(self.client.get().Connect(self.store_socket_name, + self.manager_socket_name, release_delay)) + + cdef _get_object_buffers(self, object_ids, int64_t timeout_ms, + c_vector[CObjectBuffer]* result): + cdef c_vector[CUniqueID] ids + cdef ObjectID object_id + for object_id in object_ids: + ids.push_back(object_id.data) + result[0].resize(ids.size()) + with nogil: + check_status(self.client.get().Get(ids.data(), ids.size(), + timeout_ms, result[0].data())) + + cdef _make_plasma_buffer(self, ObjectID object_id, uint8_t* data, + int64_t size): + cdef shared_ptr[CBuffer] buffer + buffer.reset(new CBuffer(data, size)) + result = PlasmaBuffer(object_id, self) + result.init(buffer) + return result + + cdef _make_mutable_plasma_buffer(self, ObjectID object_id, uint8_t* data, + int64_t size): + cdef shared_ptr[CBuffer] buffer + buffer.reset(new CMutableBuffer(data, size)) + result = PlasmaBuffer(object_id, self) + result.init(buffer) + return result + + @property + def store_socket_name(self): + return self.store_socket_name.decode() + + @property + def manager_socket_name(self): + return self.manager_socket_name.decode() + + def create(self, ObjectID object_id, int64_t data_size, c_string metadata=b""): + """ + Create a new buffer in the PlasmaStore for a particular object ID. + + The returned buffer is mutable until seal is called. + + Parameters + ---------- + object_id : ObjectID + The object ID used to identify an object. + size : int + The size in bytes of the created buffer. + metadata : bytes + An optional string of bytes encoding whatever metadata the user + wishes to encode. + + Raises + ------ + PlasmaObjectExists + This exception is raised if the object could not be created because + there already is an object with the same ID in the plasma store. + + PlasmaStoreFull: This exception is raised if the object could + not be created because the plasma store is unable to evict + enough objects to create room for it. + """ + cdef uint8_t* data + with nogil: + check_status(self.client.get().Create(object_id.data, data_size, + (metadata.data()), + metadata.size(), &data)) + return self._make_mutable_plasma_buffer(object_id, data, data_size) + + def get(self, object_ids, timeout_ms=-1): + """ + Returns data buffer from the PlasmaStore based on object ID. + + If the object has not been sealed yet, this call will block. The + retrieved buffer is immutable. + + Parameters + ---------- + object_ids : list + A list of ObjectIDs used to identify some objects. + timeout_ms :int + The number of milliseconds that the get call should block before + timing out and returning. Pass -1 if the call should block and 0 + if the call should return immediately. + + Returns + ------- + list + List of PlasmaBuffers for the data associated with the object_ids + and None if the object was not available. + """ + cdef c_vector[CObjectBuffer] object_buffers + self._get_object_buffers(object_ids, timeout_ms, &object_buffers) + result = [] + for i in range(object_buffers.size()): + if object_buffers[i].data_size != -1: + result.append(self._make_plasma_buffer( + object_ids[i], object_buffers[i].data, + object_buffers[i].data_size)) + else: + result.append(None) + return result + + def get_metadata(self, object_ids, timeout_ms=-1): + """ + Returns metadata buffer from the PlasmaStore based on object ID. + + If the object has not been sealed yet, this call will block. The + retrieved buffer is immutable. + + Parameters + ---------- + object_ids : list + A list of ObjectIDs used to identify some objects. + timeout_ms : int + The number of milliseconds that the get call should block before + timing out and returning. Pass -1 if the call should block and 0 + if the call should return immediately. + + Returns + ------- + list + List of PlasmaBuffers for the metadata associated with the + object_ids and None if the object was not available. + """ + cdef c_vector[CObjectBuffer] object_buffers + self._get_object_buffers(object_ids, timeout_ms, &object_buffers) + result = [] + for i in range(object_buffers.size()): + result.append(self._make_plasma_buffer( + object_ids[i], object_buffers[i].metadata, + object_buffers[i].metadata_size)) + return result + + def seal(self, ObjectID object_id): + """ + Seal the buffer in the PlasmaStore for a particular object ID. + + Once a buffer has been sealed, the buffer is immutable and can only be + accessed through get. + + Parameters + ---------- + object_id : ObjectID + A string used to identify an object. + """ + with nogil: + check_status(self.client.get().Seal(object_id.data)) + + def release(self, ObjectID object_id): + """ + Notify Plasma that the object is no longer needed. + + Parameters + ---------- + object_id : ObjectID + A string used to identify an object. + """ + with nogil: + check_status(self.client.get().Release(object_id.data)) + + def contains(self, ObjectID object_id): + """ + Check if the object is present and sealed in the PlasmaStore. + + Parameters + ---------- + object_id : ObjectID + A string used to identify an object. + """ + cdef c_bool is_contained + with nogil: + check_status(self.client.get().Contains(object_id.data, + &is_contained)) + return is_contained + + def hash(self, ObjectID object_id): + """ + Compute the checksum of an object in the object store. + + Parameters + ---------- + object_id : ObjectID + A string used to identify an object. + + Returns + ------- + bytes + A digest string object's hash. If the object isn't in the object + store, the string will have length zero. + """ + cdef c_vector[uint8_t] digest = c_vector[uint8_t](kDigestSize) + with nogil: + check_status(self.client.get().Hash(object_id.data, + digest.data())) + return bytes(digest[:]) + + def evict(self, int64_t num_bytes): + """ + Evict some objects until to recover some bytes. + + Recover at least num_bytes bytes if possible. + + Parameters + ---------- + num_bytes : int + The number of bytes to attempt to recover. + """ + cdef int64_t num_bytes_evicted = -1 + with nogil: + check_status(self.client.get().Evict(num_bytes, num_bytes_evicted)) + return num_bytes_evicted + + def transfer(self, address, int port, ObjectID object_id): + """ + Transfer local object with id object_id to another plasma instance + + Parameters + ---------- + addr : str + IPv4 address of the plasma instance the object is sent to. + port : int + Port number of the plasma instance the object is sent to. + object_id : str + A string used to identify an object. + """ + cdef c_string addr = address.encode() + with nogil: + check_status(self.client.get().Transfer(addr.c_str(), port, object_id.data)) + + def fetch(self, object_ids): + """ + Fetch the objects with the given IDs from other plasma managers. + + Parameters + ---------- + object_ids : list + A list of strings used to identify the objects. + """ + cdef c_vector[CUniqueID] ids + cdef ObjectID object_id + for object_id in object_ids: + ids.push_back(object_id.data) + with nogil: + check_status(self.client.get().Fetch(ids.size(), ids.data())) + + def wait(self, object_ids, int64_t timeout=PLASMA_WAIT_TIMEOUT, int num_returns=1): + """ + Wait until num_returns objects in object_ids are ready. + Currently, the object ID arguments to wait must be unique. + + Parameters + ---------- + object_ids : list + List of object IDs to wait for. + timeout :int + Return to the caller after timeout milliseconds. + num_returns : int + We are waiting for this number of objects to be ready. + + Returns + ------- + list + List of object IDs that are ready. + list + List of object IDs we might still wait on. + """ + # Check that the object ID arguments are unique. The plasma manager + # currently crashes if given duplicate object IDs. + if len(object_ids) != len(set(object_ids)): + raise Exception("Wait requires a list of unique object IDs.") + cdef int64_t num_object_requests = len(object_ids) + cdef c_vector[CObjectRequest] object_requests = c_vector[CObjectRequest](num_object_requests) + cdef int num_objects_ready = 0 + cdef ObjectID object_id + for i, object_id in enumerate(object_ids): + object_requests[i].object_id = object_id.data + object_requests[i].type = PLASMA_QUERY_ANYWHERE + with nogil: + check_status(self.client.get().Wait(num_object_requests, object_requests.data(), num_returns, timeout, &num_objects_ready)) + cdef int num_to_return = min(num_objects_ready, num_returns); + ready_ids = [] + waiting_ids = set(object_ids) + cdef int num_returned = 0 + for i in range(len(object_ids)): + if num_returned == num_to_return: + break + if object_requests[i].status == ObjectStatusLocal or object_requests[i].status == ObjectStatusRemote: + ready_ids.append(ObjectID(object_requests[i].object_id.binary())) + waiting_ids.discard(ObjectID(object_requests[i].object_id.binary())) + num_returned += 1 + return ready_ids, list(waiting_ids) + + def subscribe(self): + """Subscribe to notifications about sealed objects.""" + with nogil: + check_status(self.client.get().Subscribe(&self.notification_fd)) + + def get_next_notification(self): + """ + Get the next notification from the notification socket. + + Returns + ------- + ObjectID + The object ID of the object that was stored. + int + The data size of the object that was stored. + int + The metadata size of the object that was stored. + """ + cdef ObjectID object_id = ObjectID(20 * b"\0") + cdef int64_t data_size + cdef int64_t metadata_size + with nogil: + check_status(self.client.get().GetNotification(self.notification_fd, + &object_id.data, + &data_size, + &metadata_size)) + return object_id, data_size, metadata_size + + def to_capsule(self): + return PyCapsule_New(self.client.get(), "plasma", NULL) + + def disconnect(self): + """ + Disconnect this client from the Plasma store. + """ + with nogil: + check_status(self.client.get().Disconnect()) diff --git a/python/pyarrow/tests/conftest.py b/python/pyarrow/tests/conftest.py index 2aeeab7294ccc..21288e4f35e74 100644 --- a/python/pyarrow/tests/conftest.py +++ b/python/pyarrow/tests/conftest.py @@ -18,11 +18,12 @@ from pytest import skip -groups = ['hdfs', 'parquet', 'large_memory'] +groups = ['hdfs', 'parquet', 'plasma', 'large_memory'] defaults = { 'hdfs': False, 'parquet': False, + 'plasma': False, 'large_memory': False } @@ -32,6 +33,11 @@ except ImportError: pass +try: + import pyarrow.plasma as plasma + defaults['plasma'] = True +except ImportError: + pass def pytest_configure(config): pass diff --git a/python/pyarrow/tests/test_plasma.py b/python/pyarrow/tests/test_plasma.py new file mode 100644 index 0000000000000..ce684e3e41f1b --- /dev/null +++ b/python/pyarrow/tests/test_plasma.py @@ -0,0 +1,683 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import glob +import numpy as np +import os +import pytest +import random +import signal +import subprocess +import sys +import time +import unittest + +import pyarrow as pa +import pandas as pd + +DEFAULT_PLASMA_STORE_MEMORY = 10 ** 9 + +def random_name(): + return str(random.randint(0, 99999999)) + + +def random_object_id(): + import pyarrow.plasma as plasma + return plasma.ObjectID(np.random.bytes(20)) + + +def generate_metadata(length): + metadata = bytearray(length) + if length > 0: + metadata[0] = random.randint(0, 255) + metadata[-1] = random.randint(0, 255) + for _ in range(100): + metadata[random.randint(0, length - 1)] = random.randint(0, 255) + return metadata + + +def write_to_data_buffer(buff, length): + array = np.frombuffer(buff, dtype="uint8") + if length > 0: + array[0] = random.randint(0, 255) + array[-1] = random.randint(0, 255) + for _ in range(100): + array[random.randint(0, length - 1)] = random.randint(0, 255) + + +def create_object_with_id(client, object_id, data_size, metadata_size, + seal=True): + metadata = generate_metadata(metadata_size) + memory_buffer = client.create(object_id, data_size, metadata) + write_to_data_buffer(memory_buffer, data_size) + if seal: + client.seal(object_id) + return memory_buffer, metadata + + +def create_object(client, data_size, metadata_size, seal=True): + object_id = random_object_id() + memory_buffer, metadata = create_object_with_id(client, object_id, + data_size, metadata_size, + seal=seal) + return object_id, memory_buffer, metadata + + +def assert_get_object_equal(unit_test, client1, client2, object_id, + memory_buffer=None, metadata=None): + import pyarrow.plasma as plasma + client1_buff = client1.get([object_id])[0] + client2_buff = client2.get([object_id])[0] + client1_metadata = client1.get_metadata([object_id])[0] + client2_metadata = client2.get_metadata([object_id])[0] + assert len(client1_buff) == len(client2_buff) + assert len(client1_metadata) == len(client2_metadata) + # Check that the buffers from the two clients are the same. + assert plasma.buffers_equal(client1_buff, client2_buff) + # Check that the metadata buffers from the two clients are the same. + assert plasma.buffers_equal(client1_metadata, client2_metadata) + # If a reference buffer was provided, check that it is the same as well. + if memory_buffer is not None: + assert plasma.buffers_equal(memory_buffer, client1_buff) + # If reference metadata was provided, check that it is the same as well. + if metadata is not None: + assert plasma.buffers_equal(metadata, client1_metadata) + + +def start_plasma_store(plasma_store_memory=DEFAULT_PLASMA_STORE_MEMORY, + use_valgrind=False, use_profiler=False, + stdout_file=None, stderr_file=None): + """Start a plasma store process. + Args: + use_valgrind (bool): True if the plasma store should be started inside + of valgrind. If this is True, use_profiler must be False. + use_profiler (bool): True if the plasma store should be started inside + a profiler. If this is True, use_valgrind must be False. + stdout_file: A file handle opened for writing to redirect stdout to. If + no redirection should happen, then this should be None. + stderr_file: A file handle opened for writing to redirect stderr to. If + no redirection should happen, then this should be None. + Return: + A tuple of the name of the plasma store socket and the process ID of + the plasma store process. + """ + if use_valgrind and use_profiler: + raise Exception("Cannot use valgrind and profiler at the same time.") + plasma_store_executable = os.path.join(pa.__path__[0], "plasma_store") + plasma_store_name = "/tmp/plasma_store{}".format(random_name()) + command = [plasma_store_executable, + "-s", plasma_store_name, + "-m", str(plasma_store_memory)] + if use_valgrind: + pid = subprocess.Popen(["valgrind", + "--track-origins=yes", + "--leak-check=full", + "--show-leak-kinds=all", + "--leak-check-heuristics=stdstring", + "--error-exitcode=1"] + command, + stdout=stdout_file, stderr=stderr_file) + time.sleep(1.0) + elif use_profiler: + pid = subprocess.Popen(["valgrind", "--tool=callgrind"] + command, + stdout=stdout_file, stderr=stderr_file) + time.sleep(1.0) + else: + pid = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file) + time.sleep(0.1) + return plasma_store_name, pid + + +@pytest.mark.plasma +class TestPlasmaClient(object): + + def setup_method(self, test_method): + import pyarrow.plasma as plasma + # Start Plasma store. + plasma_store_name, self.p = start_plasma_store( + use_valgrind=os.getenv("PLASMA_VALGRIND") == "1") + # Connect to Plasma. + self.plasma_client = plasma.PlasmaClient(plasma_store_name, "", 64) + # For the eviction test + self.plasma_client2 = plasma.PlasmaClient(plasma_store_name, "", 0) + + def teardown_method(self, test_method): + # Check that the Plasma store is still alive. + assert self.p.poll() == None + # Kill the plasma store process. + if os.getenv("PLASMA_VALGRIND") == "1": + self.p.send_signal(signal.SIGTERM) + self.p.wait() + if self.p.returncode != 0: + assert False + else: + self.p.kill() + + def test_create(self): + # Create an object id string. + object_id = random_object_id() + # Create a new buffer and write to it. + length = 50 + memory_buffer = np.frombuffer(self.plasma_client.create(object_id, + length), + dtype="uint8") + for i in range(length): + memory_buffer[i] = i % 256 + # Seal the object. + self.plasma_client.seal(object_id) + # Get the object. + memory_buffer = np.frombuffer(self.plasma_client.get([object_id])[0], + dtype="uint8") + for i in range(length): + assert memory_buffer[i] == i % 256 + + def test_create_with_metadata(self): + for length in range(1000): + # Create an object id string. + object_id = random_object_id() + # Create a random metadata string. + metadata = generate_metadata(length) + # Create a new buffer and write to it. + memory_buffer = np.frombuffer(self.plasma_client.create(object_id, + length, + metadata), + dtype="uint8") + for i in range(length): + memory_buffer[i] = i % 256 + # Seal the object. + self.plasma_client.seal(object_id) + # Get the object. + memory_buffer = np.frombuffer( + self.plasma_client.get([object_id])[0], dtype="uint8") + for i in range(length): + assert memory_buffer[i] == i % 256 + # Get the metadata. + metadata_buffer = np.frombuffer( + self.plasma_client.get_metadata([object_id])[0], dtype="uint8") + assert len(metadata) == len(metadata_buffer) + for i in range(len(metadata)): + assert metadata[i] == metadata_buffer[i] + + def test_create_existing(self): + # This test is partially used to test the code path in which we create + # an object with an ID that already exists + length = 100 + for _ in range(1000): + object_id = random_object_id() + self.plasma_client.create(object_id, length, + generate_metadata(length)) + try: + self.plasma_client.create(object_id, length, + generate_metadata(length)) + # TODO(pcm): Introduce a more specific error type here. + except pa.lib.ArrowException as e: + pass + else: + assert False + + def test_get(self): + num_object_ids = 100 + # Test timing out of get with various timeouts. + for timeout in [0, 10, 100, 1000]: + object_ids = [random_object_id() for _ in range(num_object_ids)] + results = self.plasma_client.get(object_ids, timeout_ms=timeout) + assert results == num_object_ids * [None] + + data_buffers = [] + metadata_buffers = [] + for i in range(num_object_ids): + if i % 2 == 0: + data_buffer, metadata_buffer = create_object_with_id( + self.plasma_client, object_ids[i], 2000, 2000) + data_buffers.append(data_buffer) + metadata_buffers.append(metadata_buffer) + + # Test timing out from some but not all get calls with various + # timeouts. + for timeout in [0, 10, 100, 1000]: + data_results = self.plasma_client.get(object_ids, + timeout_ms=timeout) + # metadata_results = self.plasma_client.get_metadata( + # object_ids, timeout_ms=timeout) + for i in range(num_object_ids): + if i % 2 == 0: + array1 = np.frombuffer(data_buffers[i // 2], dtype="uint8") + array2 = np.frombuffer(data_results[i], dtype="uint8") + np.testing.assert_equal(array1, array2) + # TODO(rkn): We should compare the metadata as well. But + # currently the types are different (e.g., memoryview + # versus bytearray). + # assert plasma.buffers_equal( + # metadata_buffers[i // 2], metadata_results[i]) + else: + assert results[i] is None + + def test_store_arrow_objects(self): + import pyarrow.plasma as plasma + data = np.random.randn(10, 4) + # Write an arrow object. + object_id = random_object_id() + tensor = pa.Tensor.from_numpy(data) + data_size = pa.get_tensor_size(tensor) + buf = self.plasma_client.create(object_id, data_size) + stream = pa.FixedSizeBufferOutputStream(buf) + pa.write_tensor(tensor, stream) + self.plasma_client.seal(object_id) + # Read the arrow object. + [tensor] = self.plasma_client.get([object_id]) + reader = pa.BufferReader(tensor) + array = pa.read_tensor(reader).to_numpy() + # Assert that they are equal. + np.testing.assert_equal(data, array) + + def test_store_pandas_dataframe(self): + import pyarrow.plasma as plasma + d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']), + 'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])} + df = pd.DataFrame(d) + + # Write the DataFrame. + record_batch = pa.RecordBatch.from_pandas(df) + # Determine the size. + s = pa.MockOutputStream() + stream_writer = pa.RecordBatchStreamWriter(s, record_batch.schema) + stream_writer.write_batch(record_batch) + data_size = s.size() + object_id = plasma.ObjectID(np.random.bytes(20)) + + buf = self.plasma_client.create(object_id, data_size) + stream = pa.FixedSizeBufferOutputStream(buf) + stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema) + stream_writer.write_batch(record_batch) + + self.plasma_client.seal(object_id) + + # Read the DataFrame. + [data] = self.plasma_client.get([object_id]) + reader = pa.RecordBatchStreamReader(pa.BufferReader(data)) + result = reader.get_next_batch().to_pandas() + + pd.util.testing.assert_frame_equal(df, result) + + def test_pickle_object_ids(self): + # This can be used for sharing object IDs between processes. + import pickle + object_id = random_object_id() + data = pickle.dumps(object_id) + object_id2 = pickle.loads(data) + assert object_id == object_id2 + + def test_store_full(self): + # The store is started with 1GB, so make sure that create throws an + # exception when it is full. + def assert_create_raises_plasma_full(unit_test, size): + partial_size = np.random.randint(size) + try: + _, memory_buffer, _ = create_object(unit_test.plasma_client, + partial_size, + size - partial_size) + # TODO(pcm): More specific error here. + except pa.lib.ArrowException as e: + pass + else: + # For some reason the above didn't throw an exception, so fail. + assert False + + # Create a list to keep some of the buffers in scope. + memory_buffers = [] + _, memory_buffer, _ = create_object(self.plasma_client, 5 * 10 ** 8, 0) + memory_buffers.append(memory_buffer) + # Remaining space is 5 * 10 ** 8. Make sure that we can't create an + # object of size 5 * 10 ** 8 + 1, but we can create one of size + # 2 * 10 ** 8. + assert_create_raises_plasma_full(self, 5 * 10 ** 8 + 1) + _, memory_buffer, _ = create_object(self.plasma_client, 2 * 10 ** 8, 0) + del memory_buffer + _, memory_buffer, _ = create_object(self.plasma_client, 2 * 10 ** 8, 0) + del memory_buffer + assert_create_raises_plasma_full(self, 5 * 10 ** 8 + 1) + + _, memory_buffer, _ = create_object(self.plasma_client, 2 * 10 ** 8, 0) + memory_buffers.append(memory_buffer) + # Remaining space is 3 * 10 ** 8. + assert_create_raises_plasma_full(self, 3 * 10 ** 8 + 1) + + _, memory_buffer, _ = create_object(self.plasma_client, 10 ** 8, 0) + memory_buffers.append(memory_buffer) + # Remaining space is 2 * 10 ** 8. + assert_create_raises_plasma_full(self, 2 * 10 ** 8 + 1) + + def test_contains(self): + fake_object_ids = [random_object_id() for _ in range(100)] + real_object_ids = [random_object_id() for _ in range(100)] + for object_id in real_object_ids: + assert self.plasma_client.contains(object_id) == False + self.plasma_client.create(object_id, 100) + self.plasma_client.seal(object_id) + assert self.plasma_client.contains(object_id) + for object_id in fake_object_ids: + assert not self.plasma_client.contains(object_id) + for object_id in real_object_ids: + assert self.plasma_client.contains(object_id) + + def test_hash(self): + # Check the hash of an object that doesn't exist. + object_id1 = random_object_id() + try: + self.plasma_client.hash(object_id1) + # TODO(pcm): Introduce a more specific error type here + except pa.lib.ArrowException as e: + pass + else: + assert False + + length = 1000 + # Create a random object, and check that the hash function always + # returns the same value. + metadata = generate_metadata(length) + memory_buffer = np.frombuffer(self.plasma_client.create(object_id1, + length, + metadata), + dtype="uint8") + for i in range(length): + memory_buffer[i] = i % 256 + self.plasma_client.seal(object_id1) + assert (self.plasma_client.hash(object_id1) == + self.plasma_client.hash(object_id1)) + + # Create a second object with the same value as the first, and check + # that their hashes are equal. + object_id2 = random_object_id() + memory_buffer = np.frombuffer(self.plasma_client.create(object_id2, + length, + metadata), + dtype="uint8") + for i in range(length): + memory_buffer[i] = i % 256 + self.plasma_client.seal(object_id2) + assert (self.plasma_client.hash(object_id1) == + self.plasma_client.hash(object_id2)) + + # Create a third object with a different value from the first two, and + # check that its hash is different. + object_id3 = random_object_id() + metadata = generate_metadata(length) + memory_buffer = np.frombuffer(self.plasma_client.create(object_id3, + length, + metadata), + dtype="uint8") + for i in range(length): + memory_buffer[i] = (i + 1) % 256 + self.plasma_client.seal(object_id3) + assert (self.plasma_client.hash(object_id1) != + self.plasma_client.hash(object_id3)) + + # Create a fourth object with the same value as the third, but + # different metadata. Check that its hash is different from any of the + # previous three. + object_id4 = random_object_id() + metadata4 = generate_metadata(length) + memory_buffer = np.frombuffer(self.plasma_client.create(object_id4, + length, + metadata4), + dtype="uint8") + for i in range(length): + memory_buffer[i] = (i + 1) % 256 + self.plasma_client.seal(object_id4) + assert (self.plasma_client.hash(object_id1) != + self.plasma_client.hash(object_id4)) + assert (self.plasma_client.hash(object_id3) != + self.plasma_client.hash(object_id4)) + + def test_many_hashes(self): + hashes = [] + length = 2 ** 10 + + for i in range(256): + object_id = random_object_id() + memory_buffer = np.frombuffer(self.plasma_client.create(object_id, + length), + dtype="uint8") + for j in range(length): + memory_buffer[j] = i + self.plasma_client.seal(object_id) + hashes.append(self.plasma_client.hash(object_id)) + + # Create objects of varying length. Each pair has two bits different. + for i in range(length): + object_id = random_object_id() + memory_buffer = np.frombuffer(self.plasma_client.create(object_id, + length), + dtype="uint8") + for j in range(length): + memory_buffer[j] = 0 + memory_buffer[i] = 1 + self.plasma_client.seal(object_id) + hashes.append(self.plasma_client.hash(object_id)) + + # Create objects of varying length, all with value 0. + for i in range(length): + object_id = random_object_id() + memory_buffer = np.frombuffer(self.plasma_client.create(object_id, + i), + dtype="uint8") + for j in range(i): + memory_buffer[j] = 0 + self.plasma_client.seal(object_id) + hashes.append(self.plasma_client.hash(object_id)) + + # Check that all hashes were unique. + assert len(set(hashes)) == 256 + length + length + + # def test_individual_delete(self): + # length = 100 + # # Create an object id string. + # object_id = random_object_id() + # # Create a random metadata string. + # metadata = generate_metadata(100) + # # Create a new buffer and write to it. + # memory_buffer = self.plasma_client.create(object_id, length, + # metadata) + # for i in range(length): + # memory_buffer[i] = chr(i % 256) + # # Seal the object. + # self.plasma_client.seal(object_id) + # # Check that the object is present. + # assert self.plasma_client.contains(object_id) + # # Delete the object. + # self.plasma_client.delete(object_id) + # # Make sure the object is no longer present. + # self.assertFalse(self.plasma_client.contains(object_id)) + # + # def test_delete(self): + # # Create some objects. + # object_ids = [random_object_id() for _ in range(100)] + # for object_id in object_ids: + # length = 100 + # # Create a random metadata string. + # metadata = generate_metadata(100) + # # Create a new buffer and write to it. + # memory_buffer = self.plasma_client.create(object_id, length, + # metadata) + # for i in range(length): + # memory_buffer[i] = chr(i % 256) + # # Seal the object. + # self.plasma_client.seal(object_id) + # # Check that the object is present. + # assert self.plasma_client.contains(object_id) + # + # # Delete the objects and make sure they are no longer present. + # for object_id in object_ids: + # # Delete the object. + # self.plasma_client.delete(object_id) + # # Make sure the object is no longer present. + # self.assertFalse(self.plasma_client.contains(object_id)) + + def test_illegal_functionality(self): + # Create an object id string. + object_id = random_object_id() + # Create a new buffer and write to it. + length = 1000 + memory_buffer = self.plasma_client.create(object_id, length) + # Make sure we cannot access memory out of bounds. + with pytest.raises(Exception): + memory_buffer[length] + # Seal the object. + self.plasma_client.seal(object_id) + # This test is commented out because it currently fails. + # # Make sure the object is ready only now. + # def illegal_assignment(): + # memory_buffer[0] = chr(0) + # with pytest.raises(Exception): + # illegal_assignment() + # Get the object. + memory_buffer = self.plasma_client.get([object_id])[0] + + # Make sure the object is read only. + def illegal_assignment(): + memory_buffer[0] = chr(0) + with pytest.raises(Exception): + illegal_assignment() + + def test_evict(self): + client = self.plasma_client2 + object_id1 = random_object_id() + b1 = client.create(object_id1, 1000) + client.seal(object_id1) + del b1 + assert client.evict(1) == 1000 + + object_id2 = random_object_id() + object_id3 = random_object_id() + b2 = client.create(object_id2, 999) + b3 = client.create(object_id3, 998) + client.seal(object_id3) + del b3 + assert client.evict(1000) == 998 + + object_id4 = random_object_id() + b4 = client.create(object_id4, 997) + client.seal(object_id4) + del b4 + client.seal(object_id2) + del b2 + assert client.evict(1) == 997 + assert client.evict(1) == 999 + + object_id5 = random_object_id() + object_id6 = random_object_id() + object_id7 = random_object_id() + b5 = client.create(object_id5, 996) + b6 = client.create(object_id6, 995) + b7 = client.create(object_id7, 994) + client.seal(object_id5) + client.seal(object_id6) + client.seal(object_id7) + del b5 + del b6 + del b7 + assert client.evict(2000) == 996 + 995 + 994 + + def test_subscribe(self): + # Subscribe to notifications from the Plasma Store. + self.plasma_client.subscribe() + for i in [1, 10, 100, 1000, 10000, 100000]: + object_ids = [random_object_id() for _ in range(i)] + metadata_sizes = [np.random.randint(1000) for _ in range(i)] + data_sizes = [np.random.randint(1000) for _ in range(i)] + for j in range(i): + self.plasma_client.create( + object_ids[j], data_sizes[j], + metadata=bytearray(np.random.bytes(metadata_sizes[j]))) + self.plasma_client.seal(object_ids[j]) + # Check that we received notifications for all of the objects. + for j in range(i): + notification_info = self.plasma_client.get_next_notification() + recv_objid, recv_dsize, recv_msize = notification_info + assert object_ids[j] == recv_objid + assert data_sizes[j] == recv_dsize + assert metadata_sizes[j] == recv_msize + + def test_subscribe_deletions(self): + # Subscribe to notifications from the Plasma Store. We use + # plasma_client2 to make sure that all used objects will get evicted + # properly. + self.plasma_client2.subscribe() + for i in [1, 10, 100, 1000, 10000, 100000]: + object_ids = [random_object_id() for _ in range(i)] + # Add 1 to the sizes to make sure we have nonzero object sizes. + metadata_sizes = [np.random.randint(1000) + 1 for _ in range(i)] + data_sizes = [np.random.randint(1000) + 1 for _ in range(i)] + for j in range(i): + x = self.plasma_client2.create( + object_ids[j], data_sizes[j], + metadata=bytearray(np.random.bytes(metadata_sizes[j]))) + self.plasma_client2.seal(object_ids[j]) + del x + # Check that we received notifications for creating all of the + # objects. + for j in range(i): + notification_info = self.plasma_client2.get_next_notification() + recv_objid, recv_dsize, recv_msize = notification_info + assert object_ids[j] == recv_objid + assert data_sizes[j] == recv_dsize + assert metadata_sizes[j] == recv_msize + + # Check that we receive notifications for deleting all objects, as + # we evict them. + for j in range(i): + assert (self.plasma_client2.evict(1) == + data_sizes[j] + metadata_sizes[j]) + notification_info = self.plasma_client2.get_next_notification() + recv_objid, recv_dsize, recv_msize = notification_info + assert object_ids[j] == recv_objid + assert -1 == recv_dsize + assert -1 == recv_msize + + # Test multiple deletion notifications. The first 9 object IDs have + # size 0, and the last has a nonzero size. When Plasma evicts 1 byte, + # it will evict all objects, so we should receive deletion + # notifications for each. + num_object_ids = 10 + object_ids = [random_object_id() for _ in range(num_object_ids)] + metadata_sizes = [0] * (num_object_ids - 1) + data_sizes = [0] * (num_object_ids - 1) + metadata_sizes.append(np.random.randint(1000)) + data_sizes.append(np.random.randint(1000)) + for i in range(num_object_ids): + x = self.plasma_client2.create( + object_ids[i], data_sizes[i], + metadata=bytearray(np.random.bytes(metadata_sizes[i]))) + self.plasma_client2.seal(object_ids[i]) + del x + for i in range(num_object_ids): + notification_info = self.plasma_client2.get_next_notification() + recv_objid, recv_dsize, recv_msize = notification_info + assert object_ids[i] == recv_objid + assert data_sizes[i] == recv_dsize + assert metadata_sizes[i] == recv_msize + assert (self.plasma_client2.evict(1) == + data_sizes[-1] + metadata_sizes[-1]) + for i in range(num_object_ids): + notification_info = self.plasma_client2.get_next_notification() + recv_objid, recv_dsize, recv_msize = notification_info + assert object_ids[i] == recv_objid + assert -1 == recv_dsize + assert -1 == recv_msize diff --git a/python/setup.py b/python/setup.py index 1ea57ae2d858d..7425b71916001 100644 --- a/python/setup.py +++ b/python/setup.py @@ -99,6 +99,10 @@ def initialize_options(self): self.with_parquet = strtobool( os.environ.get('PYARROW_WITH_PARQUET', '0')) + self.with_plasma = strtobool( + os.environ.get('PYARROW_WITH_PLASMA', '0')) + if self.with_plasma and "plasma" not in self.CYTHON_MODULE_NAMES: + self.CYTHON_MODULE_NAMES.append("plasma") self.bundle_arrow_cpp = strtobool( os.environ.get('PYARROW_BUNDLE_ARROW_CPP', '0')) @@ -242,6 +246,8 @@ def move_lib(lib_name): shutil.move(pjoin(build_prefix, 'include'), pjoin(build_lib, 'pyarrow')) move_lib("arrow") move_lib("arrow_python") + if self.with_plasma: + move_lib("plasma") if self.with_parquet: move_lib("parquet") @@ -270,11 +276,20 @@ def move_lib(lib_name): shutil.move(self.get_ext_built_api_header(name), pjoin(os.path.dirname(ext_path), name + '_api.h')) + # Move the plasma store + if self.with_plasma: + build_py = self.get_finalized_command('build_py') + source = os.path.join(self.build_type, "plasma_store") + target = os.path.join(build_lib, build_py.get_package_dir('pyarrow'), "plasma_store") + shutil.move(source, target) + os.chdir(saved_cwd) def _failure_permitted(self, name): if name == '_parquet' and not self.with_parquet: return True + if name == 'plasma' and not self.with_plasma: + return True return False def _get_inplace_dir(self): From 6042c48952306bbc091055de812ad5b1f1b56818 Mon Sep 17 00:00:00 2001 From: Max Risuhin Date: Mon, 24 Jul 2017 17:27:09 -0400 Subject: [PATCH 10/16] =?UTF-8?q?ARROW-1195:=20[C++]=20CpuInfo=20init=20wi?= =?UTF-8?q?th=20cores=20number,=20frequency=20and=20cache=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … sizes on Windows Author: Max Risuhin Closes #877 from MaxRis/ARROW-1195 and squashes the following commits: 675d5846 [Max Risuhin] ARROW-1195: [C++] CpuInfo init with cores number, frequency and cache sizes on Windows --- cpp/src/arrow/util/cpu-info.cc | 87 +++++++++++++++++++++++++++++----- cpp/src/arrow/util/cpu-info.h | 3 ++ 2 files changed, 78 insertions(+), 12 deletions(-) diff --git a/cpp/src/arrow/util/cpu-info.cc b/cpp/src/arrow/util/cpu-info.cc index c0fc8bdddf4bf..dcd6b4027d966 100644 --- a/cpp/src/arrow/util/cpu-info.cc +++ b/cpp/src/arrow/util/cpu-info.cc @@ -30,6 +30,10 @@ #include #endif +#ifdef _WIN32 +#include +#endif + #include #include @@ -79,6 +83,45 @@ int64_t ParseCPUFlags(const string& values) { return flags; } +#ifdef _WIN32 +bool RetrieveCacheSize(int64_t* cache_sizes) { + if (!cache_sizes) { return false; } + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = nullptr; + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer_position = nullptr; + DWORD buffer_size = 0; + DWORD offset = 0; + typedef BOOL(WINAPI * GetLogicalProcessorInformationFuncPointer)(void*, void*); + GetLogicalProcessorInformationFuncPointer func_pointer = + (GetLogicalProcessorInformationFuncPointer)GetProcAddress( + GetModuleHandle("kernel32"), "GetLogicalProcessorInformation"); + + if (!func_pointer) { return false; } + + // Get buffer size + if (func_pointer(buffer, &buffer_size) && GetLastError() != ERROR_INSUFFICIENT_BUFFER) + return false; + + buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)malloc(buffer_size); + + if (!buffer || !func_pointer(buffer, &buffer_size)) { return false; } + + buffer_position = buffer; + while (offset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= buffer_size) { + if (RelationCache == buffer_position->Relationship) { + PCACHE_DESCRIPTOR cache = &buffer_position->Cache; + if (cache->Level >= 1 && cache->Level <= 3) { + cache_sizes[cache->Level - 1] += cache->Size; + } + } + offset += sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); + buffer_position++; + } + + if (buffer) { free(buffer); } + return true; +} +#endif + void CpuInfo::Init() { std::lock_guard cpuinfo_lock(cpuinfo_mutex); @@ -93,6 +136,16 @@ void CpuInfo::Init() { memset(&cache_sizes_, 0, sizeof(cache_sizes_)); +#ifdef _WIN32 + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + num_cores = system_info.dwNumberOfProcessors; + + LARGE_INTEGER performance_frequency; + if (QueryPerformanceFrequency(&performance_frequency)) { + max_mhz = static_cast(performance_frequency.QuadPart); + } +#else // Read from /proc/cpuinfo std::ifstream cpuinfo("/proc/cpuinfo", std::ios::in); while (cpuinfo) { @@ -120,6 +173,7 @@ void CpuInfo::Init() { } } if (cpuinfo.is_open()) cpuinfo.close(); +#endif #ifdef __APPLE__ // On Mac OS X use sysctl() to get the cache sizes @@ -131,22 +185,17 @@ void CpuInfo::Init() { for (size_t i = 0; i < 3; ++i) { cache_sizes_[i] = data[i]; } +#elif _WIN32 + if (!RetrieveCacheSize(cache_sizes_)) { SetDefaultCacheSize(); } #else -#ifndef _SC_LEVEL1_DCACHE_SIZE - // Provide reasonable default values if no info - cache_sizes_[0] = 32 * 1024; // Level 1: 32k - cache_sizes_[1] = 256 * 1024; // Level 2: 256k - cache_sizes_[2] = 3072 * 1024; // Level 3: 3M -#else - // Call sysconf to query for the cache sizes - cache_sizes_[0] = sysconf(_SC_LEVEL1_DCACHE_SIZE); - cache_sizes_[1] = sysconf(_SC_LEVEL2_CACHE_SIZE); - cache_sizes_[2] = sysconf(_SC_LEVEL3_CACHE_SIZE); -#endif + SetDefaultCacheSize(); #endif if (max_mhz != 0) { - cycles_per_ms_ = static_cast(max_mhz) * 1000; + cycles_per_ms_ = static_cast(max_mhz); +#ifndef _WIN32 + cycles_per_ms_ *= 1000; +#endif } else { cycles_per_ms_ = 1000000; } @@ -203,4 +252,18 @@ std::string CpuInfo::model_name() { return model_name_; } +void CpuInfo::SetDefaultCacheSize() { +#ifndef _SC_LEVEL1_DCACHE_SIZE + // Provide reasonable default values if no info + cache_sizes_[0] = 32 * 1024; // Level 1: 32k + cache_sizes_[1] = 256 * 1024; // Level 2: 256k + cache_sizes_[2] = 3072 * 1024; // Level 3: 3M +#else + // Call sysconf to query for the cache sizes + cache_sizes_[0] = sysconf(_SC_LEVEL1_DCACHE_SIZE); + cache_sizes_[1] = sysconf(_SC_LEVEL2_CACHE_SIZE); + cache_sizes_[2] = sysconf(_SC_LEVEL3_CACHE_SIZE); +#endif +} + } // namespace arrow diff --git a/cpp/src/arrow/util/cpu-info.h b/cpp/src/arrow/util/cpu-info.h index 06800fc275572..f4bc8c35e3447 100644 --- a/cpp/src/arrow/util/cpu-info.h +++ b/cpp/src/arrow/util/cpu-info.h @@ -78,6 +78,9 @@ class ARROW_EXPORT CpuInfo { static bool initialized() { return initialized_; } private: + /// Inits CPU cache size variables with default values + static void SetDefaultCacheSize(); + static bool initialized_; static int64_t hardware_flags_; static int64_t original_hardware_flags_; From ecdc86ba45c9dc1739587ec664080e9f2a9b479f Mon Sep 17 00:00:00 2001 From: siddharth Date: Mon, 24 Jul 2017 21:39:02 -0400 Subject: [PATCH 11/16] ARROW-1249: [JAVA] expose fillEmpties from Nullable variable length vectors This will allow us to do some cleanup in Dremio where we have written wrapper routines using Reflection to access the fillEmpties method of mutator. Unit tests have been added. Author: siddharth Closes #880 from siddharthteotia/ARROW-1249 and squashes the following commits: e0532c5c [siddharth] Merge branch 'ARROW-1249' of https://github.com/siddharthteotia/arrow into ARROW-1249 dc052061 [siddharth] ARROW-1249: Expose fillEmpties() from Nullable Variable Length Vectors f24d8f11 [siddharth] ARROW-1249: expose fillEmpties from Nullable variable length vectors --- .../templates/NullableValueVectors.java | 2 +- .../apache/arrow/vector/TestValueVector.java | 97 +++++++++++++++++++ 2 files changed, 98 insertions(+), 1 deletion(-) diff --git a/java/vector/src/main/codegen/templates/NullableValueVectors.java b/java/vector/src/main/codegen/templates/NullableValueVectors.java index 1decd0b313802..5b993678012b5 100644 --- a/java/vector/src/main/codegen/templates/NullableValueVectors.java +++ b/java/vector/src/main/codegen/templates/NullableValueVectors.java @@ -540,7 +540,7 @@ public void set(int index, <#if type.major == "VarLen">byte[]<#elseif (type.widt <#if type.major == "VarLen"> - private void fillEmpties(int index){ + public void fillEmpties(int index){ final ${valuesName}.Mutator valuesMutator = values.getMutator(); for (int i = lastSet + 1; i < index; i++) { valuesMutator.setSafe(i, emptyByteArray); diff --git a/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java b/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java index f5508aab2ce1d..0f41c2dd790e1 100644 --- a/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java +++ b/java/vector/src/test/java/org/apache/arrow/vector/TestValueVector.java @@ -639,6 +639,103 @@ public void testVectorLoadUnload() { } } + @Test + public void testFillEmptiesUsage() { + try (final NullableVarCharVector vector = new NullableVarCharVector("myvector", allocator)) { + + final NullableVarCharVector.Mutator mutator = vector.getMutator(); + + vector.allocateNew(1024 * 10, 1024); + + setBytes(0, STR1, vector); + setBytes(1, STR2, vector); + setBytes(2, STR3, vector); + setBytes(3, STR4, vector); + setBytes(4, STR5, vector); + setBytes(5, STR6, vector); + + /* Check current lastSet */ + assertEquals(Integer.toString(-1), Integer.toString(mutator.getLastSet())); + + /* Check the vector output */ + final NullableVarCharVector.Accessor accessor = vector.getAccessor(); + assertArrayEquals(STR1, accessor.get(0)); + assertArrayEquals(STR2, accessor.get(1)); + assertArrayEquals(STR3, accessor.get(2)); + assertArrayEquals(STR4, accessor.get(3)); + assertArrayEquals(STR5, accessor.get(4)); + assertArrayEquals(STR6, accessor.get(5)); + + mutator.setLastSet(5); + /* fill empty byte arrays from index [6, 9] */ + mutator.fillEmpties(10); + + /* Check current lastSet */ + assertEquals(Integer.toString(9), Integer.toString(mutator.getLastSet())); + + /* Check the vector output */ + assertArrayEquals(STR1, accessor.get(0)); + assertArrayEquals(STR2, accessor.get(1)); + assertArrayEquals(STR3, accessor.get(2)); + assertArrayEquals(STR4, accessor.get(3)); + assertArrayEquals(STR5, accessor.get(4)); + assertArrayEquals(STR6, accessor.get(5)); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(6))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(7))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(8))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(9))); + + setBytes(10, STR1, vector); + setBytes(11, STR2, vector); + + mutator.setLastSet(11); + /* fill empty byte arrays from index [12, 14] */ + mutator.setValueCount(15); + + /* Check current lastSet */ + assertEquals(Integer.toString(14), Integer.toString(mutator.getLastSet())); + + /* Check the vector output */ + assertArrayEquals(STR1, accessor.get(0)); + assertArrayEquals(STR2, accessor.get(1)); + assertArrayEquals(STR3, accessor.get(2)); + assertArrayEquals(STR4, accessor.get(3)); + assertArrayEquals(STR5, accessor.get(4)); + assertArrayEquals(STR6, accessor.get(5)); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(6))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(7))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(8))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(9))); + assertArrayEquals(STR1, accessor.get(10)); + assertArrayEquals(STR2, accessor.get(11)); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(12))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(13))); + assertEquals(Integer.toString(0), Integer.toString(accessor.getValueLength(14))); + + /* Check offsets */ + final UInt4Vector.Accessor offsetAccessor = vector.values.offsetVector.getAccessor(); + assertEquals(Integer.toString(0), Integer.toString(offsetAccessor.get(0))); + assertEquals(Integer.toString(6), Integer.toString(offsetAccessor.get(1))); + assertEquals(Integer.toString(16), Integer.toString(offsetAccessor.get(2))); + assertEquals(Integer.toString(21), Integer.toString(offsetAccessor.get(3))); + assertEquals(Integer.toString(30), Integer.toString(offsetAccessor.get(4))); + assertEquals(Integer.toString(34), Integer.toString(offsetAccessor.get(5))); + + assertEquals(Integer.toString(40), Integer.toString(offsetAccessor.get(6))); + assertEquals(Integer.toString(40), Integer.toString(offsetAccessor.get(7))); + assertEquals(Integer.toString(40), Integer.toString(offsetAccessor.get(8))); + assertEquals(Integer.toString(40), Integer.toString(offsetAccessor.get(9))); + assertEquals(Integer.toString(40), Integer.toString(offsetAccessor.get(10))); + + assertEquals(Integer.toString(46), Integer.toString(offsetAccessor.get(11))); + assertEquals(Integer.toString(56), Integer.toString(offsetAccessor.get(12))); + + assertEquals(Integer.toString(56), Integer.toString(offsetAccessor.get(13))); + assertEquals(Integer.toString(56), Integer.toString(offsetAccessor.get(14))); + assertEquals(Integer.toString(56), Integer.toString(offsetAccessor.get(15))); + } + } + public static void setBytes(int index, byte[] bytes, NullableVarCharVector vector) { final int currentOffset = vector.values.offsetVector.getAccessor().get(index); From 886e2af77d29166b5f45829e0d76e33529e00f15 Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Mon, 24 Jul 2017 21:41:08 -0400 Subject: [PATCH 12/16] ARROW-1259: [Plasma] Speed up plasma tests Author: Philipp Moritz Closes #882 from pcmoritz/plasma-test-speedup and squashes the following commits: 09ffbdfd [Philipp Moritz] speed up plasma tests --- python/pyarrow/tests/test_plasma.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/pyarrow/tests/test_plasma.py b/python/pyarrow/tests/test_plasma.py index ce684e3e41f1b..ab64463e93111 100644 --- a/python/pyarrow/tests/test_plasma.py +++ b/python/pyarrow/tests/test_plasma.py @@ -598,7 +598,7 @@ def test_evict(self): def test_subscribe(self): # Subscribe to notifications from the Plasma Store. self.plasma_client.subscribe() - for i in [1, 10, 100, 1000, 10000, 100000]: + for i in [1, 10, 100, 1000, 10000]: object_ids = [random_object_id() for _ in range(i)] metadata_sizes = [np.random.randint(1000) for _ in range(i)] data_sizes = [np.random.randint(1000) for _ in range(i)] @@ -620,7 +620,7 @@ def test_subscribe_deletions(self): # plasma_client2 to make sure that all used objects will get evicted # properly. self.plasma_client2.subscribe() - for i in [1, 10, 100, 1000, 10000, 100000]: + for i in [1, 10, 100, 1000, 10000]: object_ids = [random_object_id() for _ in range(i)] # Add 1 to the sizes to make sure we have nonzero object sizes. metadata_sizes = [np.random.randint(1000) + 1 for _ in range(i)] From 9e692af8b267c9c676e568baa5f45bdb435b7b62 Mon Sep 17 00:00:00 2001 From: Bryan Cutler Date: Mon, 24 Jul 2017 21:41:58 -0400 Subject: [PATCH 13/16] ARROW-1245: [Integration] Enable JavaTester in Integration tests JavaTester was commented out, probably accidentally from a previous commit, this re-enables it. Author: Bryan Cutler Closes #875 from BryanCutler/enable-java-integration-ARROW-1245 and squashes the following commits: c08c6e23 [Bryan Cutler] enabled JavaTester in integration tests --- integration/integration_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/integration_test.py b/integration/integration_test.py index 215ba58232a00..b7f1609935e79 100644 --- a/integration/integration_test.py +++ b/integration/integration_test.py @@ -945,7 +945,7 @@ def get_static_json_files(): def run_all_tests(debug=False): - testers = [CPPTester(debug=debug)] # , JavaTester(debug=debug)] + testers = [CPPTester(debug=debug), JavaTester(debug=debug)] static_json_files = get_static_json_files() generated_json_files = get_generated_json_files() json_files = static_json_files + generated_json_files From 11c92bf282de5dad5b40f0e483a241876b807ddd Mon Sep 17 00:00:00 2001 From: Wes McKinney Date: Mon, 24 Jul 2017 21:54:30 -0400 Subject: [PATCH 14/16] ARROW-1246: [Format] Draft Flatbuffer metadata description for Map Author: Wes McKinney Closes #876 from wesm/ARROW-1246 and squashes the following commits: 98790dfc [Wes McKinney] Review feedback to clarify nullability of map components 346b48dd [Wes McKinney] Typo 06ae8ebf [Wes McKinney] Draft Flatbuffer metadata for Map --- format/Schema.fbs | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/format/Schema.fbs b/format/Schema.fbs index a7e802b9dcba6..186f8e362bde2 100644 --- a/format/Schema.fbs +++ b/format/Schema.fbs @@ -44,6 +44,35 @@ table FixedSizeList { listSize: int; } +/// A Map is a logical nested type that is represented as +/// +/// List> +/// +/// In this layout, the keys and values are each respectively contiguous. We do +/// not constrain the key and value types, so the application is responsible +/// for ensuring that the keys are hashable and unique. Whether the keys are sorted +/// may be set in the metadata for this field +/// +/// In a Field with Map type, the Field has a child Struct field, which then +/// has two children: key type and the second the value type. The names of the +/// child fields may be respectively "entry", "key", and "value", but this is +/// not enforced +/// +/// Map +/// - child[0] entry: Struct +/// - child[0] key: K +/// - child[1] value: V +/// +/// Neither the "entry" field nor the "key" field may be nullable. +/// +/// The metadata is structured so that Arrow systems without special handling +/// for Map can make Map an alias for List. The "layout" attribute for the Map +/// field must have the same contents as a List. +table Map { + /// Set to true if the keys within each value are sorted + keysSorted: bool; +} + enum UnionMode:short { Sparse, Dense } /// A union is a complex type with children in Field @@ -170,7 +199,8 @@ union Type { Struct_, Union, FixedSizeBinary, - FixedSizeList + FixedSizeList, + Map } /// ---------------------------------------------------------------------- From 204f148bf560dfb1291f5d7e4d237e3b6c120430 Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Mon, 24 Jul 2017 22:53:11 -0400 Subject: [PATCH 15/16] ARROW-1260: [Plasma] Use factory method to create Python PlasmaClient Author: Philipp Moritz Closes #883 from pcmoritz/plasma-client-connect and squashes the following commits: 667629f3 [Philipp Moritz] cleanup 886beabb [Philipp Moritz] create factory method for connecting to the plasma client 09ffbdfd [Philipp Moritz] speed up plasma tests --- python/pyarrow/plasma.pyx | 46 ++++++++++++++++------------- python/pyarrow/tests/test_plasma.py | 4 +-- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/python/pyarrow/plasma.pyx b/python/pyarrow/plasma.pyx index bb17685277af1..8aaca9963c131 100644 --- a/python/pyarrow/plasma.pyx +++ b/python/pyarrow/plasma.pyx @@ -206,28 +206,11 @@ cdef class PlasmaClient: c_string store_socket_name c_string manager_socket_name - def __cinit__(self, store_socket_name, manager_socket_name, int release_delay): - """ - Create a new PlasmaClient that is connected to a plasma store - and optionally a plasma manager. - - Parameters - ---------- - store_socket_name : str - Name of the socket the plasma store is listening at. - manager_socket_name : str - Name of the socket the plasma manager is listening at. - release_delay : int - The maximum number of objects that the client will keep and - delay releasing (for caching reasons). - """ + def __cinit__(self): self.client.reset(new CPlasmaClient()) self.notification_fd = -1 - self.store_socket_name = store_socket_name.encode() - self.manager_socket_name = manager_socket_name.encode() - with nogil: - check_status(self.client.get().Connect(self.store_socket_name, - self.manager_socket_name, release_delay)) + self.store_socket_name = "" + self.manager_socket_name = "" cdef _get_object_buffers(self, object_ids, int64_t timeout_ms, c_vector[CObjectBuffer]* result): @@ -558,3 +541,26 @@ cdef class PlasmaClient: """ with nogil: check_status(self.client.get().Disconnect()) + +def connect(store_socket_name, manager_socket_name, int release_delay): + """ + Return a new PlasmaClient that is connected a plasma store and + optionally a manager. + + Parameters + ---------- + store_socket_name : str + Name of the socket the plasma store is listening at. + manager_socket_name : str + Name of the socket the plasma manager is listening at. + release_delay : int + The maximum number of objects that the client will keep and + delay releasing (for caching reasons). + """ + cdef PlasmaClient result = PlasmaClient() + result.store_socket_name = store_socket_name.encode() + result.manager_socket_name = manager_socket_name.encode() + with nogil: + check_status(result.client.get().Connect(result.store_socket_name, + result.manager_socket_name, release_delay)) + return result diff --git a/python/pyarrow/tests/test_plasma.py b/python/pyarrow/tests/test_plasma.py index ab64463e93111..8f8d5b5ed607b 100644 --- a/python/pyarrow/tests/test_plasma.py +++ b/python/pyarrow/tests/test_plasma.py @@ -154,9 +154,9 @@ def setup_method(self, test_method): plasma_store_name, self.p = start_plasma_store( use_valgrind=os.getenv("PLASMA_VALGRIND") == "1") # Connect to Plasma. - self.plasma_client = plasma.PlasmaClient(plasma_store_name, "", 64) + self.plasma_client = plasma.connect(plasma_store_name, "", 64) # For the eviction test - self.plasma_client2 = plasma.PlasmaClient(plasma_store_name, "", 0) + self.plasma_client2 = plasma.connect(plasma_store_name, "", 0) def teardown_method(self, test_method): # Check that the Plasma store is still alive. From 07b89bf3aaa6639303014bc173e3e371aed363c1 Mon Sep 17 00:00:00 2001 From: Wes McKinney Date: Mon, 24 Jul 2017 22:55:51 -0400 Subject: [PATCH 16/16] ARROW-1219: [C++] Use Google C++ code formatting Our coding style guide has a ton of rules. I put up this patch to discuss since we don't have too many patches outstanding right now. This uses the exact Google style used in TensorFlow and other projects, but relaxes the column limit to 90 characters. The main change is horizontal alignment in function signatures: ```diff void ValidateBasicStructArray(const StructArray* result, - const vector& struct_is_valid, const vector& list_values, - const vector& list_is_valid, const vector& list_lengths, - const vector& list_offsets, const vector& int_values) { + const vector& struct_is_valid, + const vector& list_values, + const vector& list_is_valid, + const vector& list_lengths, + const vector& list_offsets, + const vector& int_values) { ``` I find the paren-aligned version a bit more readable, but it's a matter of taste Author: Wes McKinney Closes #848 from wesm/google-style and squashes the following commits: 9e8fe4fe [Wes McKinney] Move import to platform.h 8690257d [Wes McKinney] Use Google C++ code formatting --- cpp/.clang-format | 65 +----- cpp/src/arrow/allocator-test.cc | 2 +- cpp/src/arrow/array-decimal-test.cc | 73 ++++--- cpp/src/arrow/array-test.cc | 108 +++++----- cpp/src/arrow/array.cc | 141 ++++++++----- cpp/src/arrow/array.h | 110 +++++----- cpp/src/arrow/buffer.cc | 41 ++-- cpp/src/arrow/buffer.h | 62 +++--- cpp/src/arrow/builder-benchmark.cc | 16 +- cpp/src/arrow/builder.cc | 196 +++++++++-------- cpp/src/arrow/builder.h | 63 +++--- cpp/src/arrow/compare.cc | 168 +++++++++------ cpp/src/arrow/compare.h | 15 +- cpp/src/arrow/io/file.cc | 146 ++++++------- cpp/src/arrow/io/file.h | 12 +- cpp/src/arrow/io/hdfs-internal.cc | 66 +++--- cpp/src/arrow/io/hdfs-internal.h | 12 +- cpp/src/arrow/io/hdfs.cc | 155 +++++++------- cpp/src/arrow/io/hdfs.h | 18 +- cpp/src/arrow/io/interfaces.cc | 20 +- cpp/src/arrow/io/interfaces.h | 4 +- cpp/src/arrow/io/io-file-test.cc | 29 ++- cpp/src/arrow/io/io-hdfs-test.cc | 29 ++- cpp/src/arrow/io/memory.cc | 20 +- cpp/src/arrow/io/memory.h | 2 +- cpp/src/arrow/io/test-common.h | 4 +- cpp/src/arrow/ipc/feather-internal.h | 8 +- cpp/src/arrow/ipc/feather-test.cc | 7 +- cpp/src/arrow/ipc/feather.cc | 146 ++++++------- cpp/src/arrow/ipc/feather.h | 6 +- cpp/src/arrow/ipc/file-to-stream.cc | 2 +- cpp/src/arrow/ipc/ipc-json-test.cc | 42 ++-- cpp/src/arrow/ipc/ipc-read-write-benchmark.cc | 4 +- cpp/src/arrow/ipc/ipc-read-write-test.cc | 92 ++++---- cpp/src/arrow/ipc/json-integration-test.cc | 35 ++-- cpp/src/arrow/ipc/json-internal.cc | 92 ++++---- cpp/src/arrow/ipc/json-internal.h | 10 +- cpp/src/arrow/ipc/json.cc | 32 ++- cpp/src/arrow/ipc/json.h | 10 +- cpp/src/arrow/ipc/metadata.cc | 198 +++++++++--------- cpp/src/arrow/ipc/metadata.h | 45 ++-- cpp/src/arrow/ipc/reader.cc | 92 ++++---- cpp/src/arrow/ipc/reader.h | 29 +-- cpp/src/arrow/ipc/stream-to-file.cc | 2 +- cpp/src/arrow/ipc/test-common.h | 87 ++++---- cpp/src/arrow/ipc/writer.cc | 159 +++++++------- cpp/src/arrow/ipc/writer.h | 23 +- cpp/src/arrow/memory_pool-test.cc | 10 +- cpp/src/arrow/memory_pool.cc | 30 ++- cpp/src/arrow/pretty_print-test.cc | 2 +- cpp/src/arrow/pretty_print.cc | 62 +++--- cpp/src/arrow/python/arrow_to_pandas.cc | 86 ++++---- cpp/src/arrow/python/arrow_to_pandas.h | 12 +- cpp/src/arrow/python/builtin_convert.cc | 39 ++-- cpp/src/arrow/python/builtin_convert.h | 19 +- cpp/src/arrow/python/config.cc | 2 - cpp/src/arrow/python/helpers.cc | 16 +- cpp/src/arrow/python/helpers.h | 13 +- cpp/src/arrow/python/init.cc | 4 +- cpp/src/arrow/python/io.cc | 24 +-- cpp/src/arrow/python/numpy_convert.cc | 43 ++-- cpp/src/arrow/python/numpy_convert.h | 4 +- cpp/src/arrow/python/pandas_to_arrow.cc | 77 ++++--- cpp/src/arrow/python/pandas_to_arrow.h | 5 +- cpp/src/arrow/python/platform.h | 1 + cpp/src/arrow/python/pyarrow.cc | 40 +--- cpp/src/arrow/python/pyarrow.h | 4 +- cpp/src/arrow/python/python-test.cc | 11 +- cpp/src/arrow/python/util/datetime.h | 8 +- cpp/src/arrow/status.cc | 8 +- cpp/src/arrow/status.h | 24 ++- cpp/src/arrow/table-test.cc | 10 +- cpp/src/arrow/table.cc | 99 ++++++--- cpp/src/arrow/table.h | 23 +- cpp/src/arrow/tensor.cc | 26 +-- cpp/src/arrow/tensor.h | 8 +- cpp/src/arrow/test-util.h | 74 ++++--- cpp/src/arrow/type-test.cc | 8 +- cpp/src/arrow/type.cc | 118 +++++------ cpp/src/arrow/type.h | 36 ++-- cpp/src/arrow/type_traits.h | 7 +- cpp/src/arrow/util/bit-stream-utils.h | 25 ++- cpp/src/arrow/util/bit-util-test.cc | 24 ++- cpp/src/arrow/util/bit-util.cc | 30 ++- cpp/src/arrow/util/bit-util.h | 179 +++++----------- cpp/src/arrow/util/bpacking.h | 99 +++------ cpp/src/arrow/util/compression-test.cc | 38 ++-- cpp/src/arrow/util/compression.h | 5 +- cpp/src/arrow/util/compression_brotli.cc | 9 +- cpp/src/arrow/util/compression_brotli.h | 4 +- cpp/src/arrow/util/compression_lz4.cc | 27 ++- cpp/src/arrow/util/compression_lz4.h | 4 +- cpp/src/arrow/util/compression_snappy.cc | 14 +- cpp/src/arrow/util/compression_snappy.h | 4 +- cpp/src/arrow/util/compression_zlib.cc | 35 ++-- cpp/src/arrow/util/compression_zlib.h | 4 +- cpp/src/arrow/util/compression_zstd.cc | 14 +- cpp/src/arrow/util/compression_zstd.h | 4 +- cpp/src/arrow/util/cpu-info.cc | 32 ++- cpp/src/arrow/util/decimal.cc | 60 ++++-- cpp/src/arrow/util/decimal.h | 22 +- cpp/src/arrow/util/key_value_metadata.cc | 4 +- cpp/src/arrow/util/key_value_metadata.h | 4 +- cpp/src/arrow/util/logging.h | 33 ++- cpp/src/arrow/util/memory.h | 10 +- cpp/src/arrow/util/random.h | 14 +- cpp/src/arrow/util/rle-encoding-test.cc | 31 ++- cpp/src/arrow/util/rle-encoding.h | 26 +-- cpp/src/arrow/util/sse-util.h | 4 +- cpp/src/arrow/util/stl.h | 4 +- cpp/src/arrow/util/string.h | 4 +- cpp/src/plasma/client.cc | 70 ++++--- cpp/src/plasma/client.h | 18 +- cpp/src/plasma/common.cc | 10 +- cpp/src/plasma/common.h | 2 +- cpp/src/plasma/events.cc | 22 +- cpp/src/plasma/events.h | 2 +- cpp/src/plasma/eviction_policy.cc | 22 +- cpp/src/plasma/eviction_policy.h | 18 +- cpp/src/plasma/io.cc | 40 ++-- cpp/src/plasma/malloc.cc | 20 +- cpp/src/plasma/plasma.cc | 14 +- cpp/src/plasma/plasma.h | 6 +- cpp/src/plasma/protocol.cc | 116 +++++----- cpp/src/plasma/protocol.h | 53 ++--- cpp/src/plasma/store.cc | 68 +++--- cpp/src/plasma/store.h | 8 +- cpp/src/plasma/test/client_tests.cc | 2 +- cpp/src/plasma/test/serialization_tests.cc | 18 +- 129 files changed, 2565 insertions(+), 2362 deletions(-) diff --git a/cpp/.clang-format b/cpp/.clang-format index 33f282a20de20..06453dfbb25b7 100644 --- a/cpp/.clang-format +++ b/cpp/.clang-format @@ -15,67 +15,6 @@ # specific language governing permissions and limitations # under the License. --- -Language: Cpp -# BasedOnStyle: Google -AccessModifierOffset: -1 -AlignAfterOpenBracket: false -AlignConsecutiveAssignments: false -AlignEscapedNewlinesLeft: true -AlignOperands: true -AlignTrailingComments: true -AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: true -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: Inline -AllowShortIfStatementsOnASingleLine: true -AllowShortLoopsOnASingleLine: false -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakBeforeMultilineStrings: true -AlwaysBreakTemplateDeclarations: true -BinPackArguments: true -BinPackParameters: true -BreakBeforeBinaryOperators: None -BreakBeforeBraces: Attach -BreakBeforeTernaryOperators: true -BreakConstructorInitializersBeforeComma: false -ColumnLimit: 90 -CommentPragmas: '^ IWYU pragma:' -ConstructorInitializerAllOnOneLineOrOnePerLine: true -ConstructorInitializerIndentWidth: 4 -ContinuationIndentWidth: 4 -Cpp11BracedListStyle: true +BasedOnStyle: Google DerivePointerAlignment: false -DisableFormat: false -ExperimentalAutoDetectBinPacking: false -ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] -IndentCaseLabels: true -IndentWidth: 2 -IndentWrappedFunctionNames: false -KeepEmptyLinesAtTheStartOfBlocks: false -MacroBlockBegin: '' -MacroBlockEnd: '' -MaxEmptyLinesToKeep: 1 -NamespaceIndentation: None -ObjCBlockIndentWidth: 2 -ObjCSpaceAfterProperty: false -ObjCSpaceBeforeProtocolList: false -PenaltyBreakBeforeFirstCallParameter: 1000 -PenaltyBreakComment: 300 -PenaltyBreakFirstLessLess: 120 -PenaltyBreakString: 1000 -PenaltyExcessCharacter: 1000000 -PenaltyReturnTypeOnItsOwnLine: 200 -PointerAlignment: Left -SpaceAfterCStyleCast: false -SpaceBeforeAssignmentOperators: true -SpaceBeforeParens: ControlStatements -SpaceInEmptyParentheses: false -SpacesBeforeTrailingComments: 2 -SpacesInAngles: false -SpacesInContainerLiterals: true -SpacesInCStyleCastParentheses: false -SpacesInParentheses: false -SpacesInSquareBrackets: false -Standard: Cpp11 -TabWidth: 8 -UseTab: Never +ColumnLimit: 90 diff --git a/cpp/src/arrow/allocator-test.cc b/cpp/src/arrow/allocator-test.cc index 5a4e98d76600f..f3a80cdae818b 100644 --- a/cpp/src/arrow/allocator-test.cc +++ b/cpp/src/arrow/allocator-test.cc @@ -48,7 +48,7 @@ TEST(stl_allocator, FreeLargeMemory) { #ifndef NDEBUG EXPECT_EXIT(alloc.deallocate(data, 120), ::testing::ExitedWithCode(1), - ".*Check failed: \\(bytes_allocated_\\) >= \\(size\\)"); + ".*Check failed: \\(bytes_allocated_\\) >= \\(size\\)"); #endif alloc.deallocate(data, 100); diff --git a/cpp/src/arrow/array-decimal-test.cc b/cpp/src/arrow/array-decimal-test.cc index 0959d686498d5..436ce9cf7c312 100644 --- a/cpp/src/arrow/array-decimal-test.cc +++ b/cpp/src/arrow/array-decimal-test.cc @@ -28,12 +28,12 @@ namespace decimal { template class DecimalTestBase { public: - virtual std::vector data( - const std::vector& input, size_t byte_width) const = 0; + virtual std::vector data(const std::vector& input, + size_t byte_width) const = 0; void test(int precision, const std::vector& draw, - const std::vector& valid_bytes, - const std::vector& sign_bitmap = {}, int64_t offset = 0) const { + const std::vector& valid_bytes, + const std::vector& sign_bitmap = {}, int64_t offset = 0) const { auto type = std::make_shared(precision, 4); int byte_width = type->byte_width(); auto pool = default_memory_pool(); @@ -63,8 +63,9 @@ class DecimalTestBase { ASSERT_OK(BitUtil::BytesToBits(valid_bytes, &expected_null_bitmap)); int64_t expected_null_count = test::null_count(valid_bytes); - auto expected = std::make_shared(type, size, expected_data, - expected_null_bitmap, expected_null_count, offset, expected_sign_bitmap); + auto expected = + std::make_shared(type, size, expected_data, expected_null_bitmap, + expected_null_count, offset, expected_sign_bitmap); std::shared_ptr out; ASSERT_OK(builder->Finish(&out)); @@ -75,8 +76,8 @@ class DecimalTestBase { template class DecimalTest : public DecimalTestBase { public: - std::vector data( - const std::vector& input, size_t byte_width) const override { + std::vector data(const std::vector& input, + size_t byte_width) const override { std::vector result(input.size() * byte_width); // TODO(phillipc): There's probably a better way to do this constexpr static const size_t bytes_per_element = sizeof(T); @@ -90,8 +91,8 @@ class DecimalTest : public DecimalTestBase { template <> class DecimalTest : public DecimalTestBase { public: - std::vector data( - const std::vector& input, size_t byte_width) const override { + std::vector data(const std::vector& input, + size_t byte_width) const override { std::vector result; result.reserve(input.size() * byte_width); constexpr static const size_t bytes_per_element = 16; @@ -120,24 +121,24 @@ class Decimal128BuilderTest : public ::testing::TestWithParam, TEST_P(Decimal32BuilderTest, NoNulls) { int precision = GetParam(); - std::vector draw = { - Decimal32(1), Decimal32(2), Decimal32(2389), Decimal32(4), Decimal32(-12348)}; + std::vector draw = {Decimal32(1), Decimal32(2), Decimal32(2389), + Decimal32(4), Decimal32(-12348)}; std::vector valid_bytes = {true, true, true, true, true}; this->test(precision, draw, valid_bytes); } TEST_P(Decimal64BuilderTest, NoNulls) { int precision = GetParam(); - std::vector draw = { - Decimal64(1), Decimal64(2), Decimal64(2389), Decimal64(4), Decimal64(-12348)}; + std::vector draw = {Decimal64(1), Decimal64(2), Decimal64(2389), + Decimal64(4), Decimal64(-12348)}; std::vector valid_bytes = {true, true, true, true, true}; this->test(precision, draw, valid_bytes); } TEST_P(Decimal128BuilderTest, NoNulls) { int precision = GetParam(); - std::vector draw = { - Decimal128(1), Decimal128(-2), Decimal128(2389), Decimal128(4), Decimal128(-12348)}; + std::vector draw = {Decimal128(1), Decimal128(-2), Decimal128(2389), + Decimal128(4), Decimal128(-12348)}; std::vector valid_bytes = {true, true, true, true, true}; std::vector sign_bitmap = {false, true, false, false, true}; this->test(precision, draw, valid_bytes, sign_bitmap); @@ -145,41 +146,47 @@ TEST_P(Decimal128BuilderTest, NoNulls) { TEST_P(Decimal32BuilderTest, WithNulls) { int precision = GetParam(); - std::vector draw = { - Decimal32(1), Decimal32(2), Decimal32(-1), Decimal32(4), Decimal32(-1)}; + std::vector draw = {Decimal32(1), Decimal32(2), Decimal32(-1), Decimal32(4), + Decimal32(-1)}; std::vector valid_bytes = {true, true, false, true, false}; this->test(precision, draw, valid_bytes); } TEST_P(Decimal64BuilderTest, WithNulls) { int precision = GetParam(); - std::vector draw = { - Decimal64(-1), Decimal64(2), Decimal64(-1), Decimal64(4), Decimal64(-1)}; + std::vector draw = {Decimal64(-1), Decimal64(2), Decimal64(-1), Decimal64(4), + Decimal64(-1)}; std::vector valid_bytes = {true, true, false, true, false}; this->test(precision, draw, valid_bytes); } TEST_P(Decimal128BuilderTest, WithNulls) { int precision = GetParam(); - std::vector draw = {Decimal128(1), Decimal128(2), Decimal128(-1), - Decimal128(4), Decimal128(-1), Decimal128(1), Decimal128(2), - Decimal128("230342903942.234234"), Decimal128("-23049302932.235234")}; - std::vector valid_bytes = { - true, true, false, true, false, true, true, true, true}; - std::vector sign_bitmap = { - false, false, false, false, false, false, false, false, true}; + std::vector draw = {Decimal128(1), + Decimal128(2), + Decimal128(-1), + Decimal128(4), + Decimal128(-1), + Decimal128(1), + Decimal128(2), + Decimal128("230342903942.234234"), + Decimal128("-23049302932.235234")}; + std::vector valid_bytes = {true, true, false, true, false, + true, true, true, true}; + std::vector sign_bitmap = {false, false, false, false, false, + false, false, false, true}; this->test(precision, draw, valid_bytes, sign_bitmap); } INSTANTIATE_TEST_CASE_P(Decimal32BuilderTest, Decimal32BuilderTest, - ::testing::Range( - DecimalPrecision::minimum, DecimalPrecision::maximum)); + ::testing::Range(DecimalPrecision::minimum, + DecimalPrecision::maximum)); INSTANTIATE_TEST_CASE_P(Decimal64BuilderTest, Decimal64BuilderTest, - ::testing::Range( - DecimalPrecision::minimum, DecimalPrecision::maximum)); + ::testing::Range(DecimalPrecision::minimum, + DecimalPrecision::maximum)); INSTANTIATE_TEST_CASE_P(Decimal128BuilderTest, Decimal128BuilderTest, - ::testing::Range( - DecimalPrecision::minimum, DecimalPrecision::maximum)); + ::testing::Range(DecimalPrecision::minimum, + DecimalPrecision::maximum)); } // namespace decimal } // namespace arrow diff --git a/cpp/src/arrow/array-test.cc b/cpp/src/arrow/array-test.cc index acb4819dd0949..5d63d921cdd52 100644 --- a/cpp/src/arrow/array-test.cc +++ b/cpp/src/arrow/array-test.cc @@ -64,8 +64,8 @@ TEST_F(TestArray, TestLength) { ASSERT_EQ(arr->length(), 100); } -Status MakeArrayFromValidBytes( - const vector& v, MemoryPool* pool, std::shared_ptr* out) { +Status MakeArrayFromValidBytes(const vector& v, MemoryPool* pool, + std::shared_ptr* out) { int64_t null_count = v.size() - std::accumulate(v.begin(), v.end(), 0); std::shared_ptr null_buf; @@ -147,7 +147,9 @@ TEST_F(TestArray, TestIsNull) { // clang-format on int64_t null_count = 0; for (uint8_t x : null_bitmap) { - if (x == 0) { ++null_count; } + if (x == 0) { + ++null_count; + } } std::shared_ptr null_buf; @@ -223,8 +225,8 @@ class TestPrimitiveBuilder : public TestBuilder { void Check(const std::unique_ptr& builder, bool nullable) { int64_t size = builder->length(); - auto ex_data = std::make_shared( - reinterpret_cast(draws_.data()), size * sizeof(T)); + auto ex_data = std::make_shared(reinterpret_cast(draws_.data()), + size * sizeof(T)); std::shared_ptr ex_null_bitmap; int64_t ex_null_count = 0; @@ -316,8 +318,8 @@ void TestPrimitiveBuilder::RandomData(int64_t N, double pct_null) { } template <> -void TestPrimitiveBuilder::Check( - const std::unique_ptr& builder, bool nullable) { +void TestPrimitiveBuilder::Check(const std::unique_ptr& builder, + bool nullable) { int64_t size = builder->length(); std::shared_ptr ex_data; @@ -351,7 +353,9 @@ void TestPrimitiveBuilder::Check( ASSERT_EQ(expected->length(), result->length()); for (int64_t i = 0; i < result->length(); ++i) { - if (nullable) { ASSERT_EQ(valid_bytes_[i] == 0, result->IsNull(i)) << i; } + if (nullable) { + ASSERT_EQ(valid_bytes_[i] == 0, result->IsNull(i)) << i; + } bool actual = BitUtil::GetBit(result->values()->data(), i); ASSERT_EQ(draws_[i] != 0, actual) << i; } @@ -359,7 +363,7 @@ void TestPrimitiveBuilder::Check( } typedef ::testing::Types + PInt32, PInt64, PFloat, PDouble> Primitives; TYPED_TEST_CASE(TestPrimitiveBuilder, Primitives); @@ -377,7 +381,7 @@ TYPED_TEST(TestPrimitiveBuilder, TestInit) { ASSERT_OK(this->builder_->Reserve(n)); ASSERT_EQ(BitUtil::NextPower2(n), this->builder_->capacity()); ASSERT_EQ(BitUtil::NextPower2(TypeTraits::bytes_required(n)), - this->builder_->data()->size()); + this->builder_->data()->size()); // unsure if this should go in all builder classes ASSERT_EQ(0, this->builder_->num_children()); @@ -440,8 +444,8 @@ TYPED_TEST(TestPrimitiveBuilder, Equality) { ASSERT_OK(MakeArray(valid_bytes, draws, size, builder, &equal_array)); // Make the not equal array by negating the first valid element with itself. - const auto first_valid = std::find_if( - valid_bytes.begin(), valid_bytes.end(), [](uint8_t valid) { return valid > 0; }); + const auto first_valid = std::find_if(valid_bytes.begin(), valid_bytes.end(), + [](uint8_t valid) { return valid > 0; }); const int64_t first_valid_idx = std::distance(valid_bytes.begin(), first_valid); // This should be true with a very high probability, but might introduce flakiness ASSERT_LT(first_valid_idx, size - 1); @@ -679,8 +683,8 @@ class TestStringArray : public ::testing::Test { ASSERT_OK(BitUtil::BytesToBits(valid_bytes_, &null_bitmap_)); null_count_ = test::null_count(valid_bytes_); - strings_ = std::make_shared( - length_, offsets_buf_, value_buf_, null_bitmap_, null_count_); + strings_ = std::make_shared(length_, offsets_buf_, value_buf_, + null_bitmap_, null_count_); } protected: @@ -723,8 +727,8 @@ TEST_F(TestStringArray, TestListFunctions) { } TEST_F(TestStringArray, TestDestructor) { - auto arr = std::make_shared( - length_, offsets_buf_, value_buf_, null_bitmap_, null_count_); + auto arr = std::make_shared(length_, offsets_buf_, value_buf_, + null_bitmap_, null_count_); } TEST_F(TestStringArray, TestGetString) { @@ -742,10 +746,10 @@ TEST_F(TestStringArray, TestEmptyStringComparison) { offsets_buf_ = test::GetBufferFromVector(offsets_); length_ = static_cast(offsets_.size() - 1); - auto strings_a = std::make_shared( - length_, offsets_buf_, nullptr, null_bitmap_, null_count_); - auto strings_b = std::make_shared( - length_, offsets_buf_, nullptr, null_bitmap_, null_count_); + auto strings_a = std::make_shared(length_, offsets_buf_, nullptr, + null_bitmap_, null_count_); + auto strings_b = std::make_shared(length_, offsets_buf_, nullptr, + null_bitmap_, null_count_); ASSERT_TRUE(strings_a->Equals(strings_b)); } @@ -893,8 +897,8 @@ class TestBinaryArray : public ::testing::Test { ASSERT_OK(BitUtil::BytesToBits(valid_bytes_, &null_bitmap_)); null_count_ = test::null_count(valid_bytes_); - strings_ = std::make_shared( - length_, offsets_buf_, value_buf_, null_bitmap_, null_count_); + strings_ = std::make_shared(length_, offsets_buf_, value_buf_, + null_bitmap_, null_count_); } protected: @@ -937,8 +941,8 @@ TEST_F(TestBinaryArray, TestListFunctions) { } TEST_F(TestBinaryArray, TestDestructor) { - auto arr = std::make_shared( - length_, offsets_buf_, value_buf_, null_bitmap_, null_count_); + auto arr = std::make_shared(length_, offsets_buf_, value_buf_, + null_bitmap_, null_count_); } TEST_F(TestBinaryArray, TestGetValue) { @@ -965,8 +969,9 @@ TEST_F(TestBinaryArray, TestEqualsEmptyStrings) { ASSERT_OK(builder.Finish(&left_arr)); const BinaryArray& left = static_cast(*left_arr); - std::shared_ptr right = std::make_shared(left.length(), - left.value_offsets(), nullptr, left.null_bitmap(), left.null_count()); + std::shared_ptr right = + std::make_shared(left.length(), left.value_offsets(), nullptr, + left.null_bitmap(), left.null_count()); ASSERT_TRUE(left.Equals(right)); ASSERT_TRUE(left.RangeEquals(0, left.length(), 0, right)); @@ -1082,17 +1087,11 @@ void CheckSliceEquality() { ASSERT_TRUE(array->RangeEquals(5, 25, 0, slice)); } -TEST_F(TestBinaryArray, TestSliceEquality) { - CheckSliceEquality(); -} +TEST_F(TestBinaryArray, TestSliceEquality) { CheckSliceEquality(); } -TEST_F(TestStringArray, TestSliceEquality) { - CheckSliceEquality(); -} +TEST_F(TestStringArray, TestSliceEquality) { CheckSliceEquality(); } -TEST_F(TestBinaryArray, LengthZeroCtor) { - BinaryArray array(0, nullptr, nullptr); -} +TEST_F(TestBinaryArray, LengthZeroCtor) { BinaryArray array(0, nullptr, nullptr); } // ---------------------------------------------------------------------- // FixedSizeBinary tests @@ -1126,8 +1125,8 @@ TEST_F(TestFWBinaryArray, Builder) { std::shared_ptr result; - auto CheckResult = [this, &length, &is_valid, &raw_data, &byte_width]( - const Array& result) { + auto CheckResult = [this, &length, &is_valid, &raw_data, + &byte_width](const Array& result) { // Verify output const auto& fw_result = static_cast(result); @@ -1135,8 +1134,8 @@ TEST_F(TestFWBinaryArray, Builder) { for (int64_t i = 0; i < result.length(); ++i) { if (is_valid[i]) { - ASSERT_EQ( - 0, memcmp(raw_data + byte_width * i, fw_result.GetValue(i), byte_width)); + ASSERT_EQ(0, + memcmp(raw_data + byte_width * i, fw_result.GetValue(i), byte_width)); } else { ASSERT_TRUE(fw_result.IsNull(i)); } @@ -1323,8 +1322,8 @@ TEST_F(TestAdaptiveIntBuilder, TestInt16) { SetUp(); ASSERT_OK(builder_->Append(std::numeric_limits::max())); ASSERT_OK(builder_->Append(std::numeric_limits::min())); - expected_values = { - std::numeric_limits::max(), std::numeric_limits::min()}; + expected_values = {std::numeric_limits::max(), + std::numeric_limits::min()}; Done(); ArrayFromVector(expected_values, &expected_); @@ -1354,8 +1353,8 @@ TEST_F(TestAdaptiveIntBuilder, TestInt32) { SetUp(); ASSERT_OK(builder_->Append(std::numeric_limits::max())); ASSERT_OK(builder_->Append(std::numeric_limits::min())); - expected_values = { - std::numeric_limits::max(), std::numeric_limits::min()}; + expected_values = {std::numeric_limits::max(), + std::numeric_limits::min()}; Done(); ArrayFromVector(expected_values, &expected_); @@ -1385,8 +1384,8 @@ TEST_F(TestAdaptiveIntBuilder, TestInt64) { SetUp(); ASSERT_OK(builder_->Append(std::numeric_limits::max())); ASSERT_OK(builder_->Append(std::numeric_limits::min())); - expected_values = { - std::numeric_limits::max(), std::numeric_limits::min()}; + expected_values = {std::numeric_limits::max(), + std::numeric_limits::min()}; Done(); ArrayFromVector(expected_values, &expected_); @@ -1505,7 +1504,7 @@ template class TestDictionaryBuilder : public TestBuilder {}; typedef ::testing::Types + UInt32Type, Int64Type, UInt64Type, FloatType, DoubleType> PrimitiveDictionaries; TYPED_TEST_CASE(TestDictionaryBuilder, PrimitiveDictionaries); @@ -1784,7 +1783,7 @@ TEST_F(TestListBuilder, TestAppendNull) { } void ValidateBasicListArray(const ListArray* result, const vector& values, - const vector& is_valid) { + const vector& is_valid) { ASSERT_OK(ValidateArray(*result)); ASSERT_EQ(1, result->null_count()); ASSERT_EQ(0, result->values()->null_count()); @@ -1997,9 +1996,12 @@ TEST(TestDictionary, Validate) { // Struct tests void ValidateBasicStructArray(const StructArray* result, - const vector& struct_is_valid, const vector& list_values, - const vector& list_is_valid, const vector& list_lengths, - const vector& list_offsets, const vector& int_values) { + const vector& struct_is_valid, + const vector& list_values, + const vector& list_is_valid, + const vector& list_lengths, + const vector& list_offsets, + const vector& int_values) { ASSERT_EQ(4, result->length()); ASSERT_OK(ValidateArray(*result)); @@ -2134,7 +2136,7 @@ TEST_F(TestStructBuilder, TestBasics) { Done(); ValidateBasicStructArray(result_.get(), struct_is_valid, list_values, list_is_valid, - list_lengths, list_offsets, int_values); + list_lengths, list_offsets, int_values); } TEST_F(TestStructBuilder, BulkAppend) { @@ -2166,7 +2168,7 @@ TEST_F(TestStructBuilder, BulkAppend) { Done(); ValidateBasicStructArray(result_.get(), struct_is_valid, list_values, list_is_valid, - list_lengths, list_offsets, int_values); + list_lengths, list_offsets, int_values); } TEST_F(TestStructBuilder, BulkAppendInvalid) { @@ -2280,7 +2282,7 @@ TEST_F(TestStructBuilder, TestEquality) { // setup an unequal one with unequal offsets ASSERT_OK(builder_->Append(struct_is_valid.size(), struct_is_valid.data())); ASSERT_OK(list_vb->Append(unequal_list_offsets.data(), unequal_list_offsets.size(), - unequal_list_is_valid.data())); + unequal_list_is_valid.data())); for (int8_t value : list_values) { char_vb->UnsafeAppend(value); } diff --git a/cpp/src/arrow/array.cc b/cpp/src/arrow/array.cc index 4a405f24342fb..61791c9457756 100644 --- a/cpp/src/arrow/array.cc +++ b/cpp/src/arrow/array.cc @@ -57,45 +57,57 @@ int64_t Array::null_count() const { bool Array::Equals(const Array& arr) const { bool are_equal = false; Status error = ArrayEquals(*this, arr, &are_equal); - if (!error.ok()) { DCHECK(false) << "Arrays not comparable: " << error.ToString(); } + if (!error.ok()) { + DCHECK(false) << "Arrays not comparable: " << error.ToString(); + } return are_equal; } bool Array::Equals(const std::shared_ptr& arr) const { - if (!arr) { return false; } + if (!arr) { + return false; + } return Equals(*arr); } bool Array::ApproxEquals(const Array& arr) const { bool are_equal = false; Status error = ArrayApproxEquals(*this, arr, &are_equal); - if (!error.ok()) { DCHECK(false) << "Arrays not comparable: " << error.ToString(); } + if (!error.ok()) { + DCHECK(false) << "Arrays not comparable: " << error.ToString(); + } return are_equal; } bool Array::ApproxEquals(const std::shared_ptr& arr) const { - if (!arr) { return false; } + if (!arr) { + return false; + } return ApproxEquals(*arr); } bool Array::RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, - const std::shared_ptr& other) const { - if (!other) { return false; } + const std::shared_ptr& other) const { + if (!other) { + return false; + } return RangeEquals(*other, start_idx, end_idx, other_start_idx); } bool Array::RangeEquals(const Array& other, int64_t start_idx, int64_t end_idx, - int64_t other_start_idx) const { + int64_t other_start_idx) const { bool are_equal = false; Status error = ArrayRangeEquals(*this, other, start_idx, end_idx, other_start_idx, &are_equal); - if (!error.ok()) { DCHECK(false) << "Arrays not comparable: " << error.ToString(); } + if (!error.ok()) { + DCHECK(false) << "Arrays not comparable: " << error.ToString(); + } return are_equal; } // Last two parameters are in-out parameters -static inline void ConformSliceParams( - int64_t array_offset, int64_t array_length, int64_t* offset, int64_t* length) { +static inline void ConformSliceParams(int64_t array_offset, int64_t array_length, + int64_t* offset, int64_t* length) { DCHECK_LE(*offset, array_length); DCHECK_NE(offset, nullptr); *length = std::min(array_length - *offset, *length); @@ -113,8 +125,8 @@ std::string Array::ToString() const { return ss.str(); } -static inline std::shared_ptr SliceData( - const ArrayData& data, int64_t offset, int64_t length) { +static inline std::shared_ptr SliceData(const ArrayData& data, int64_t offset, + int64_t length) { ConformSliceParams(data.offset, data.length, &offset, &length); auto new_data = data.ShallowCopy(); @@ -139,8 +151,9 @@ std::shared_ptr NullArray::Slice(int64_t offset, int64_t length) const { // Primitive array base PrimitiveArray::PrimitiveArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& data, const std::shared_ptr& null_bitmap, - int64_t null_count, int64_t offset) { + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, + int64_t null_count, int64_t offset) { BufferVector buffers = {null_bitmap, data}; SetData( std::make_shared(type, length, std::move(buffers), null_count, offset)); @@ -166,7 +179,8 @@ BooleanArray::BooleanArray(const std::shared_ptr& data) } BooleanArray::BooleanArray(int64_t length, const std::shared_ptr& data, - const std::shared_ptr& null_bitmap, int64_t null_count, int64_t offset) + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset) : PrimitiveArray(boolean(), length, data, null_bitmap, null_count, offset) {} std::shared_ptr BooleanArray::Slice(int64_t offset, int64_t length) const { @@ -182,8 +196,10 @@ ListArray::ListArray(const std::shared_ptr& data) { } ListArray::ListArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& value_offsets, const std::shared_ptr& values, - const std::shared_ptr& null_bitmap, int64_t null_count, int64_t offset) { + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset) { BufferVector buffers = {null_bitmap, value_offsets}; auto internal_data = std::make_shared(type, length, std::move(buffers), null_count, offset); @@ -192,7 +208,7 @@ ListArray::ListArray(const std::shared_ptr& type, int64_t length, } Status ListArray::FromArrays(const Array& offsets, const Array& values, MemoryPool* pool, - std::shared_ptr* out) { + std::shared_ptr* out) { if (ARROW_PREDICT_FALSE(offsets.length() == 0)) { return Status::Invalid("List offsets must have non-zero length"); } @@ -205,12 +221,13 @@ Status ListArray::FromArrays(const Array& offsets, const Array& values, MemoryPo return Status::Invalid("List offsets must be signed int32"); } - BufferVector buffers = { - offsets.null_bitmap(), static_cast(offsets).values()}; + BufferVector buffers = {offsets.null_bitmap(), + static_cast(offsets).values()}; auto list_type = list(values.type()); - auto internal_data = std::make_shared(list_type, - offsets.length() - 1, std::move(buffers), offsets.null_count(), offsets.offset()); + auto internal_data = std::make_shared( + list_type, offsets.length() - 1, std::move(buffers), offsets.null_count(), + offsets.offset()); internal_data->child_data.push_back(values.data()); *out = std::make_shared(internal_data); @@ -230,14 +247,12 @@ std::shared_ptr ListArray::value_type() const { return static_cast(*type()).value_type(); } -std::shared_ptr ListArray::values() const { - return values_; -} +std::shared_ptr ListArray::values() const { return values_; } std::shared_ptr ListArray::Slice(int64_t offset, int64_t length) const { ConformSliceParams(data_->offset, data_->length, &offset, &length); return std::make_shared(type(), length, value_offsets(), values(), - null_bitmap(), kUnknownNullCount, offset); + null_bitmap(), kUnknownNullCount, offset); } // ---------------------------------------------------------------------- @@ -262,14 +277,17 @@ void BinaryArray::SetData(const std::shared_ptr& data) { } BinaryArray::BinaryArray(int64_t length, const std::shared_ptr& value_offsets, - const std::shared_ptr& data, const std::shared_ptr& null_bitmap, - int64_t null_count, int64_t offset) + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset) : BinaryArray(kBinary, length, value_offsets, data, null_bitmap, null_count, offset) { } BinaryArray::BinaryArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& value_offsets, const std::shared_ptr& data, - const std::shared_ptr& null_bitmap, int64_t null_count, int64_t offset) { + const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset) { BufferVector buffers = {null_bitmap, value_offsets, data}; SetData( std::make_shared(type, length, std::move(buffers), null_count, offset)); @@ -285,8 +303,9 @@ StringArray::StringArray(const std::shared_ptr& data) { } StringArray::StringArray(int64_t length, const std::shared_ptr& value_offsets, - const std::shared_ptr& data, const std::shared_ptr& null_bitmap, - int64_t null_count, int64_t offset) + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset) : BinaryArray(kString, length, value_offsets, data, null_bitmap, null_count, offset) { } @@ -304,8 +323,10 @@ FixedSizeBinaryArray::FixedSizeBinaryArray( } FixedSizeBinaryArray::FixedSizeBinaryArray(const std::shared_ptr& type, - int64_t length, const std::shared_ptr& data, - const std::shared_ptr& null_bitmap, int64_t null_count, int64_t offset) + int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, + int64_t null_count, int64_t offset) : PrimitiveArray(type, length, data, null_bitmap, null_count, offset), byte_width_(static_cast(*type).byte_width()) {} @@ -335,8 +356,9 @@ void DecimalArray::SetData(const std::shared_ptr& data) { } DecimalArray::DecimalArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& data, const std::shared_ptr& null_bitmap, - int64_t null_count, int64_t offset, const std::shared_ptr& sign_bitmap) { + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset, const std::shared_ptr& sign_bitmap) { BufferVector buffers = {null_bitmap, data, sign_bitmap}; SetData( std::make_shared(type, length, std::move(buffers), null_count, offset)); @@ -392,8 +414,9 @@ StructArray::StructArray(const std::shared_ptr& data) { } StructArray::StructArray(const std::shared_ptr& type, int64_t length, - const std::vector>& children, - std::shared_ptr null_bitmap, int64_t null_count, int64_t offset) { + const std::vector>& children, + std::shared_ptr null_bitmap, int64_t null_count, + int64_t offset) { BufferVector buffers = {null_bitmap}; SetData( std::make_shared(type, length, std::move(buffers), null_count, offset)); @@ -433,9 +456,11 @@ UnionArray::UnionArray(const std::shared_ptr& data) { } UnionArray::UnionArray(const std::shared_ptr& type, int64_t length, - const std::vector>& children, - const std::shared_ptr& type_ids, const std::shared_ptr& value_offsets, - const std::shared_ptr& null_bitmap, int64_t null_count, int64_t offset) { + const std::vector>& children, + const std::shared_ptr& type_ids, + const std::shared_ptr& value_offsets, + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset) { BufferVector buffers = {null_bitmap, type_ids, value_offsets}; auto internal_data = std::make_shared(type, length, std::move(buffers), null_count, offset); @@ -464,8 +489,8 @@ DictionaryArray::DictionaryArray(const std::shared_ptr& data) SetData(data); } -DictionaryArray::DictionaryArray( - const std::shared_ptr& type, const std::shared_ptr& indices) +DictionaryArray::DictionaryArray(const std::shared_ptr& type, + const std::shared_ptr& indices) : dict_type_(static_cast(type.get())) { DCHECK_EQ(type->id(), Type::DICTIONARY); DCHECK_EQ(indices->type_id(), dict_type_->index_type()->id()); @@ -482,9 +507,7 @@ void DictionaryArray::SetData(const std::shared_ptr& data) { DCHECK(internal::MakeArray(indices_data, &indices_).ok()); } -std::shared_ptr DictionaryArray::indices() const { - return indices_; -} +std::shared_ptr DictionaryArray::indices() const { return indices_; } std::shared_ptr DictionaryArray::dictionary() const { return dict_type_->dictionary(); @@ -517,7 +540,9 @@ struct ValidateVisitor { } Status Visit(const ListArray& array) { - if (array.length() < 0) { return Status::Invalid("Length was negative"); } + if (array.length() < 0) { + return Status::Invalid("Length was negative"); + } auto value_offsets = array.value_offsets(); if (array.length() && !value_offsets) { @@ -550,7 +575,9 @@ struct ValidateVisitor { } int32_t prev_offset = array.value_offset(0); - if (prev_offset != 0) { return Status::Invalid("The first offset wasn't zero"); } + if (prev_offset != 0) { + return Status::Invalid("The first offset wasn't zero"); + } for (int64_t i = 1; i <= array.length(); ++i) { int32_t current_offset = array.value_offset(i); if (array.IsNull(i - 1) && current_offset != prev_offset) { @@ -573,7 +600,9 @@ struct ValidateVisitor { } Status Visit(const StructArray& array) { - if (array.length() < 0) { return Status::Invalid("Length was negative"); } + if (array.length() < 0) { + return Status::Invalid("Length was negative"); + } if (array.null_count() > array.length()) { return Status::Invalid("Null count exceeds the length of this struct"); @@ -610,7 +639,9 @@ struct ValidateVisitor { } Status Visit(const UnionArray& array) { - if (array.length() < 0) { return Status::Invalid("Length was negative"); } + if (array.length() < 0) { + return Status::Invalid("Length was negative"); + } if (array.null_count() > array.length()) { return Status::Invalid("Null count exceeds the length of this struct"); @@ -661,8 +692,9 @@ Status MakeArray(const std::shared_ptr& data, std::shared_ptr* } // namespace internal Status MakePrimitiveArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& data, const std::shared_ptr& null_bitmap, - int64_t null_count, int64_t offset, std::shared_ptr* out) { + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, int64_t null_count, + int64_t offset, std::shared_ptr* out) { BufferVector buffers = {null_bitmap, data}; auto internal_data = std::make_shared( type, length, std::move(buffers), null_count, offset); @@ -670,8 +702,9 @@ Status MakePrimitiveArray(const std::shared_ptr& type, int64_t length, } Status MakePrimitiveArray(const std::shared_ptr& type, - const std::vector>& buffers, int64_t length, - int64_t null_count, int64_t offset, std::shared_ptr* out) { + const std::vector>& buffers, + int64_t length, int64_t null_count, int64_t offset, + std::shared_ptr* out) { auto internal_data = std::make_shared(type, length, buffers, null_count, offset); return internal::MakeArray(internal_data, out); diff --git a/cpp/src/arrow/array.h b/cpp/src/arrow/array.h index c32d5e1c93ffd..a853f2bb5f93d 100644 --- a/cpp/src/arrow/array.h +++ b/cpp/src/arrow/array.h @@ -88,8 +88,8 @@ struct ARROW_EXPORT ArrayData { ArrayData() {} ArrayData(const std::shared_ptr& type, int64_t length, - const std::vector>& buffers, - int64_t null_count = kUnknownNullCount, int64_t offset = 0) + const std::vector>& buffers, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) : type(type), length(length), buffers(buffers), @@ -97,8 +97,8 @@ struct ARROW_EXPORT ArrayData { offset(offset) {} ArrayData(const std::shared_ptr& type, int64_t length, - std::vector>&& buffers, - int64_t null_count = kUnknownNullCount, int64_t offset = 0) + std::vector>&& buffers, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) : type(type), length(length), buffers(std::move(buffers)), @@ -145,8 +145,8 @@ struct ARROW_EXPORT ArrayData { std::vector> child_data; }; -Status ARROW_EXPORT MakeArray( - const std::shared_ptr& data, std::shared_ptr* out); +Status ARROW_EXPORT MakeArray(const std::shared_ptr& data, + std::shared_ptr* out); } // namespace internal @@ -211,10 +211,10 @@ class ARROW_EXPORT Array { /// Compare if the range of slots specified are equal for the given array and /// this array. end_idx exclusive. This methods does not bounds check. bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, - const std::shared_ptr& other) const; + const std::shared_ptr& other) const; bool RangeEquals(const Array& other, int64_t start_idx, int64_t end_idx, - int64_t other_start_idx) const; + int64_t other_start_idx) const; Status Accept(ArrayVisitor* visitor) const; @@ -285,9 +285,9 @@ class ARROW_EXPORT NullArray : public FlatArray { class ARROW_EXPORT PrimitiveArray : public FlatArray { public: PrimitiveArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& data, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = nullptr, + int64_t null_count = 0, int64_t offset = 0); /// Does not account for any slice offset std::shared_ptr values() const { return data_->buffers[1]; } @@ -328,7 +328,7 @@ class ARROW_EXPORT NumericArray : public PrimitiveArray { const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, int64_t offset = 0) : PrimitiveArray(TypeTraits::type_singleton(), length, data, null_bitmap, - null_count, offset) {} + null_count, offset) {} const value_type* raw_values() const { return reinterpret_cast(raw_values_) + data_->offset; @@ -349,14 +349,14 @@ class ARROW_EXPORT BooleanArray : public PrimitiveArray { explicit BooleanArray(const std::shared_ptr& data); BooleanArray(int64_t length, const std::shared_ptr& data, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::shared_ptr& null_bitmap = nullptr, + int64_t null_count = 0, int64_t offset = 0); std::shared_ptr Slice(int64_t offset, int64_t length) const override; bool Value(int64_t i) const { - return BitUtil::GetBit( - reinterpret_cast(raw_values_), i + data_->offset); + return BitUtil::GetBit(reinterpret_cast(raw_values_), + i + data_->offset); } protected: @@ -373,9 +373,10 @@ class ARROW_EXPORT ListArray : public Array { explicit ListArray(const std::shared_ptr& data); ListArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& value_offsets, const std::shared_ptr& values, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, + int64_t offset = 0); /// \brief Construct ListArray from array of offsets and child value array /// @@ -388,7 +389,7 @@ class ARROW_EXPORT ListArray : public Array { /// allocated because of null values /// \param[out] out Will have length equal to offsets.length() - 1 static Status FromArrays(const Array& offsets, const Array& values, MemoryPool* pool, - std::shared_ptr* out); + std::shared_ptr* out); /// \brief Return array object containing the list's values std::shared_ptr values() const; @@ -428,9 +429,9 @@ class ARROW_EXPORT BinaryArray : public FlatArray { explicit BinaryArray(const std::shared_ptr& data); BinaryArray(int64_t length, const std::shared_ptr& value_offsets, - const std::shared_ptr& data, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = nullptr, + int64_t null_count = 0, int64_t offset = 0); // Return the pointer to the given elements bytes // TODO(emkornfield) introduce a StringPiece or something similar to capture zero-copy @@ -471,9 +472,10 @@ class ARROW_EXPORT BinaryArray : public FlatArray { // Constructor that allows sub-classes/builders to propagate there logical type up the // class hierarchy. BinaryArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& value_offsets, const std::shared_ptr& data, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = nullptr, + int64_t null_count = 0, int64_t offset = 0); const int32_t* raw_value_offsets_; const uint8_t* raw_data_; @@ -486,9 +488,9 @@ class ARROW_EXPORT StringArray : public BinaryArray { explicit StringArray(const std::shared_ptr& data); StringArray(int64_t length, const std::shared_ptr& value_offsets, - const std::shared_ptr& data, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = nullptr, + int64_t null_count = 0, int64_t offset = 0); // Construct a std::string // TODO: std::bad_alloc possibility @@ -511,9 +513,9 @@ class ARROW_EXPORT FixedSizeBinaryArray : public PrimitiveArray { explicit FixedSizeBinaryArray(const std::shared_ptr& data); FixedSizeBinaryArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& data, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = nullptr, + int64_t null_count = 0, int64_t offset = 0); const uint8_t* GetValue(int64_t i) const; @@ -542,9 +544,10 @@ class ARROW_EXPORT DecimalArray : public FlatArray { explicit DecimalArray(const std::shared_ptr& data); DecimalArray(const std::shared_ptr& type, int64_t length, - const std::shared_ptr& data, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0, const std::shared_ptr& sign_bitmap = nullptr); + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = nullptr, + int64_t null_count = 0, int64_t offset = 0, + const std::shared_ptr& sign_bitmap = nullptr); bool IsNegative(int64_t i) const; @@ -582,9 +585,9 @@ class ARROW_EXPORT StructArray : public Array { explicit StructArray(const std::shared_ptr& data); StructArray(const std::shared_ptr& type, int64_t length, - const std::vector>& children, - std::shared_ptr null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::vector>& children, + std::shared_ptr null_bitmap = nullptr, int64_t null_count = 0, + int64_t offset = 0); // Return a shared pointer in case the requestor desires to share ownership // with this array. @@ -604,11 +607,11 @@ class ARROW_EXPORT UnionArray : public Array { explicit UnionArray(const std::shared_ptr& data); UnionArray(const std::shared_ptr& type, int64_t length, - const std::vector>& children, - const std::shared_ptr& type_ids, - const std::shared_ptr& value_offsets = nullptr, - const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, - int64_t offset = 0); + const std::vector>& children, + const std::shared_ptr& type_ids, + const std::shared_ptr& value_offsets = nullptr, + const std::shared_ptr& null_bitmap = nullptr, int64_t null_count = 0, + int64_t offset = 0); /// Note that this buffer does not account for any slice offset std::shared_ptr type_ids() const { return data_->buffers[1]; } @@ -656,8 +659,8 @@ class ARROW_EXPORT DictionaryArray : public Array { explicit DictionaryArray(const std::shared_ptr& data); - DictionaryArray( - const std::shared_ptr& type, const std::shared_ptr& indices); + DictionaryArray(const std::shared_ptr& type, + const std::shared_ptr& indices); std::shared_ptr indices() const; std::shared_ptr dictionary() const; @@ -705,13 +708,16 @@ Status ARROW_EXPORT ValidateArray(const Array& array); /// Create new arrays for logical types that are backed by primitive arrays. Status ARROW_EXPORT MakePrimitiveArray(const std::shared_ptr& type, - int64_t length, const std::shared_ptr& data, - const std::shared_ptr& null_bitmap, int64_t null_count, int64_t offset, - std::shared_ptr* out); - -Status ARROW_EXPORT MakePrimitiveArray(const std::shared_ptr& type, - const std::vector>& buffers, int64_t length, - int64_t null_count, int64_t offset, std::shared_ptr* out); + int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap, + int64_t null_count, int64_t offset, + std::shared_ptr* out); + +Status ARROW_EXPORT +MakePrimitiveArray(const std::shared_ptr& type, + const std::vector>& buffers, int64_t length, + int64_t null_count, int64_t offset, std::shared_ptr* out); } // namespace arrow diff --git a/cpp/src/arrow/buffer.cc b/cpp/src/arrow/buffer.cc index a1d119ecdcae5..b9c5897f8a228 100644 --- a/cpp/src/arrow/buffer.cc +++ b/cpp/src/arrow/buffer.cc @@ -27,8 +27,8 @@ namespace arrow { -Status Buffer::Copy( - int64_t start, int64_t nbytes, MemoryPool* pool, std::shared_ptr* out) const { +Status Buffer::Copy(int64_t start, int64_t nbytes, MemoryPool* pool, + std::shared_ptr* out) const { // Sanity checks DCHECK_LT(start, size_); DCHECK_LE(nbytes, size_ - start); @@ -47,25 +47,28 @@ Status Buffer::Copy(int64_t start, int64_t nbytes, std::shared_ptr* out) } bool Buffer::Equals(const Buffer& other, int64_t nbytes) const { - return this == &other || - (size_ >= nbytes && other.size_ >= nbytes && - (data_ == other.data_ || - !memcmp(data_, other.data_, static_cast(nbytes)))); + return this == &other || (size_ >= nbytes && other.size_ >= nbytes && + (data_ == other.data_ || + !memcmp(data_, other.data_, static_cast(nbytes)))); } bool Buffer::Equals(const Buffer& other) const { - return this == &other || (size_ == other.size_ && (data_ == other.data_ || - !memcmp(data_, other.data_, - static_cast(size_)))); + return this == &other || (size_ == other.size_ && + (data_ == other.data_ || + !memcmp(data_, other.data_, static_cast(size_)))); } PoolBuffer::PoolBuffer(MemoryPool* pool) : ResizableBuffer(nullptr, 0) { - if (pool == nullptr) { pool = default_memory_pool(); } + if (pool == nullptr) { + pool = default_memory_pool(); + } pool_ = pool; } PoolBuffer::~PoolBuffer() { - if (mutable_data_ != nullptr) { pool_->Free(mutable_data_, capacity_); } + if (mutable_data_ != nullptr) { + pool_->Free(mutable_data_, capacity_); + } } Status PoolBuffer::Reserve(int64_t new_capacity) { @@ -109,28 +112,28 @@ Status PoolBuffer::Resize(int64_t new_size, bool shrink_to_fit) { return Status::OK(); } -std::shared_ptr SliceMutableBuffer( - const std::shared_ptr& buffer, int64_t offset, int64_t length) { +std::shared_ptr SliceMutableBuffer(const std::shared_ptr& buffer, + int64_t offset, int64_t length) { return std::make_shared(buffer, offset, length); } -MutableBuffer::MutableBuffer( - const std::shared_ptr& parent, int64_t offset, int64_t size) +MutableBuffer::MutableBuffer(const std::shared_ptr& parent, int64_t offset, + int64_t size) : MutableBuffer(parent->mutable_data() + offset, size) { DCHECK(parent->is_mutable()) << "Must pass mutable buffer"; parent_ = parent; } -Status AllocateBuffer( - MemoryPool* pool, int64_t size, std::shared_ptr* out) { +Status AllocateBuffer(MemoryPool* pool, int64_t size, + std::shared_ptr* out) { auto buffer = std::make_shared(pool); RETURN_NOT_OK(buffer->Resize(size)); *out = buffer; return Status::OK(); } -Status AllocateResizableBuffer( - MemoryPool* pool, int64_t size, std::shared_ptr* out) { +Status AllocateResizableBuffer(MemoryPool* pool, int64_t size, + std::shared_ptr* out) { auto buffer = std::make_shared(pool); RETURN_NOT_OK(buffer->Resize(size)); *out = buffer; diff --git a/cpp/src/arrow/buffer.h b/cpp/src/arrow/buffer.h index 488a4c05334d5..09e539d162fb2 100644 --- a/cpp/src/arrow/buffer.h +++ b/cpp/src/arrow/buffer.h @@ -72,7 +72,7 @@ class ARROW_EXPORT Buffer { /// Copy a section of the buffer into a new Buffer. Status Copy(int64_t start, int64_t nbytes, MemoryPool* pool, - std::shared_ptr* out) const; + std::shared_ptr* out) const; /// Copy a section of the buffer using the default memory pool into a new Buffer. Status Copy(int64_t start, int64_t nbytes, std::shared_ptr* out) const; @@ -106,21 +106,21 @@ class ARROW_EXPORT Buffer { /// \param str std::string instance /// \return std::shared_ptr static inline std::shared_ptr GetBufferFromString(const std::string& str) { - return std::make_shared( - reinterpret_cast(str.c_str()), static_cast(str.size())); + return std::make_shared(reinterpret_cast(str.c_str()), + static_cast(str.size())); } /// Construct a view on passed buffer at the indicated offset and length. This /// function cannot fail and does not error checking (except in debug builds) -static inline std::shared_ptr SliceBuffer( - const std::shared_ptr& buffer, int64_t offset, int64_t length) { +static inline std::shared_ptr SliceBuffer(const std::shared_ptr& buffer, + int64_t offset, int64_t length) { return std::make_shared(buffer, offset, length); } /// Construct a mutable buffer slice. If the parent buffer is not mutable, this /// will abort in debug builds -std::shared_ptr ARROW_EXPORT SliceMutableBuffer( - const std::shared_ptr& buffer, int64_t offset, int64_t length); +std::shared_ptr ARROW_EXPORT +SliceMutableBuffer(const std::shared_ptr& buffer, int64_t offset, int64_t length); /// A Buffer whose contents can be mutated. May or may not own its data. class ARROW_EXPORT MutableBuffer : public Buffer { @@ -186,8 +186,12 @@ class ARROW_EXPORT BufferBuilder { /// Resizes the buffer to the nearest multiple of 64 bytes per Layout.md Status Resize(int64_t elements) { // Resize(0) is a no-op - if (elements == 0) { return Status::OK(); } - if (capacity_ == 0) { buffer_ = std::make_shared(pool_); } + if (elements == 0) { + return Status::OK(); + } + if (capacity_ == 0) { + buffer_ = std::make_shared(pool_); + } int64_t old_capacity = capacity_; RETURN_NOT_OK(buffer_->Resize(elements)); capacity_ = buffer_->capacity(); @@ -199,14 +203,18 @@ class ARROW_EXPORT BufferBuilder { } Status Append(const uint8_t* data, int64_t length) { - if (capacity_ < length + size_) { RETURN_NOT_OK(Resize(length + size_)); } + if (capacity_ < length + size_) { + RETURN_NOT_OK(Resize(length + size_)); + } UnsafeAppend(data, length); return Status::OK(); } // Advance pointer and zero out memory Status Advance(int64_t length) { - if (capacity_ < length + size_) { RETURN_NOT_OK(Resize(length + size_)); } + if (capacity_ < length + size_) { + RETURN_NOT_OK(Resize(length + size_)); + } memset(data_ + size_, 0, static_cast(length)); size_ += length; return Status::OK(); @@ -220,7 +228,9 @@ class ARROW_EXPORT BufferBuilder { Status Finish(std::shared_ptr* out) { // Do not shrink to fit to avoid unneeded realloc - if (size_ > 0) { RETURN_NOT_OK(buffer_->Resize(size_, false)); } + if (size_ > 0) { + RETURN_NOT_OK(buffer_->Resize(size_, false)); + } *out = buffer_; Reset(); return Status::OK(); @@ -250,29 +260,29 @@ class ARROW_EXPORT TypedBufferBuilder : public BufferBuilder { Status Append(T arithmetic_value) { static_assert(std::is_arithmetic::value, - "Convenience buffer append only supports arithmetic types"); - return BufferBuilder::Append( - reinterpret_cast(&arithmetic_value), sizeof(T)); + "Convenience buffer append only supports arithmetic types"); + return BufferBuilder::Append(reinterpret_cast(&arithmetic_value), + sizeof(T)); } Status Append(const T* arithmetic_values, int64_t num_elements) { static_assert(std::is_arithmetic::value, - "Convenience buffer append only supports arithmetic types"); - return BufferBuilder::Append( - reinterpret_cast(arithmetic_values), num_elements * sizeof(T)); + "Convenience buffer append only supports arithmetic types"); + return BufferBuilder::Append(reinterpret_cast(arithmetic_values), + num_elements * sizeof(T)); } void UnsafeAppend(T arithmetic_value) { static_assert(std::is_arithmetic::value, - "Convenience buffer append only supports arithmetic types"); + "Convenience buffer append only supports arithmetic types"); BufferBuilder::UnsafeAppend(reinterpret_cast(&arithmetic_value), sizeof(T)); } void UnsafeAppend(const T* arithmetic_values, int64_t num_elements) { static_assert(std::is_arithmetic::value, - "Convenience buffer append only supports arithmetic types"); - BufferBuilder::UnsafeAppend( - reinterpret_cast(arithmetic_values), num_elements * sizeof(T)); + "Convenience buffer append only supports arithmetic types"); + BufferBuilder::UnsafeAppend(reinterpret_cast(arithmetic_values), + num_elements * sizeof(T)); } const T* data() const { return reinterpret_cast(data_); } @@ -286,11 +296,11 @@ class ARROW_EXPORT TypedBufferBuilder : public BufferBuilder { /// \param[out] out the allocated buffer with padding /// /// \return Status message -Status ARROW_EXPORT AllocateBuffer( - MemoryPool* pool, int64_t size, std::shared_ptr* out); +Status ARROW_EXPORT AllocateBuffer(MemoryPool* pool, int64_t size, + std::shared_ptr* out); -Status ARROW_EXPORT AllocateResizableBuffer( - MemoryPool* pool, int64_t size, std::shared_ptr* out); +Status ARROW_EXPORT AllocateResizableBuffer(MemoryPool* pool, int64_t size, + std::shared_ptr* out); } // namespace arrow diff --git a/cpp/src/arrow/builder-benchmark.cc b/cpp/src/arrow/builder-benchmark.cc index 7ca7bb4999801..8ba9360e917fc 100644 --- a/cpp/src/arrow/builder-benchmark.cc +++ b/cpp/src/arrow/builder-benchmark.cc @@ -38,8 +38,8 @@ static void BM_BuildPrimitiveArrayNoNulls( std::shared_ptr out; ABORT_NOT_OK(builder.Finish(&out)); } - state.SetBytesProcessed( - state.iterations() * data.size() * sizeof(int64_t) * kFinalSize); + state.SetBytesProcessed(state.iterations() * data.size() * sizeof(int64_t) * + kFinalSize); } static void BM_BuildVectorNoNulls( @@ -53,8 +53,8 @@ static void BM_BuildVectorNoNulls( builder.insert(builder.end(), data.cbegin(), data.cend()); } } - state.SetBytesProcessed( - state.iterations() * data.size() * sizeof(int64_t) * kFinalSize); + state.SetBytesProcessed(state.iterations() * data.size() * sizeof(int64_t) * + kFinalSize); } static void BM_BuildAdaptiveIntNoNulls( @@ -127,8 +127,8 @@ static void BM_BuildDictionary(benchmark::State& state) { // NOLINT non-const r std::shared_ptr out; ABORT_NOT_OK(builder.Finish(&out)); } - state.SetBytesProcessed( - state.iterations() * iterations * (iterations + 1) / 2 * sizeof(int64_t)); + state.SetBytesProcessed(state.iterations() * iterations * (iterations + 1) / 2 * + sizeof(int64_t)); } static void BM_BuildStringDictionary( @@ -152,8 +152,8 @@ static void BM_BuildStringDictionary( ABORT_NOT_OK(builder.Finish(&out)); } // Assuming a string here needs on average 2 bytes - state.SetBytesProcessed( - state.iterations() * iterations * (iterations + 1) / 2 * sizeof(int32_t)); + state.SetBytesProcessed(state.iterations() * iterations * (iterations + 1) / 2 * + sizeof(int32_t)); } BENCHMARK(BM_BuildPrimitiveArrayNoNulls)->Repetitions(3)->Unit(benchmark::kMicrosecond); diff --git a/cpp/src/arrow/builder.cc b/cpp/src/arrow/builder.cc index ee363b91d8fcc..d3a299e5412fc 100644 --- a/cpp/src/arrow/builder.cc +++ b/cpp/src/arrow/builder.cc @@ -69,7 +69,9 @@ Status ArrayBuilder::Init(int64_t capacity) { } Status ArrayBuilder::Resize(int64_t new_bits) { - if (!null_bitmap_) { return Init(new_bits); } + if (!null_bitmap_) { + return Init(new_bits); + } int64_t new_bytes = BitUtil::CeilByte(new_bits) / 8; int64_t old_bytes = null_bitmap_->size(); RETURN_NOT_OK(null_bitmap_->Resize(new_bytes)); @@ -78,8 +80,8 @@ Status ArrayBuilder::Resize(int64_t new_bits) { const int64_t byte_capacity = null_bitmap_->capacity(); capacity_ = new_bits; if (old_bytes < new_bytes) { - memset( - null_bitmap_data_ + old_bytes, 0, static_cast(byte_capacity - old_bytes)); + memset(null_bitmap_data_ + old_bytes, 0, + static_cast(byte_capacity - old_bytes)); } return Status::OK(); } @@ -140,7 +142,9 @@ void ArrayBuilder::UnsafeAppendToBitmap(const uint8_t* valid_bytes, int64_t leng bit_offset++; } - if (bit_offset != 0) { null_bitmap_data_[byte_offset] = bitset; } + if (bit_offset != 0) { + null_bitmap_data_[byte_offset] = bitset; + } length_ += length; } @@ -149,7 +153,9 @@ void ArrayBuilder::UnsafeSetNotNull(int64_t length) { // Fill up the bytes until we have a byte alignment int64_t pad_to_byte = std::min(8 - (length_ % 8), length); - if (pad_to_byte == 8) { pad_to_byte = 0; } + if (pad_to_byte == 8) { + pad_to_byte = 0; + } for (int64_t i = length_; i < length_ + pad_to_byte; ++i) { BitUtil::SetBit(null_bitmap_data_, i); } @@ -157,7 +163,7 @@ void ArrayBuilder::UnsafeSetNotNull(int64_t length) { // Fast bitsetting int64_t fast_length = (length - pad_to_byte) / 8; memset(null_bitmap_data_ + ((length_ + pad_to_byte) / 8), 0xFF, - static_cast(fast_length)); + static_cast(fast_length)); // Trailing bytes for (int64_t i = length_ + pad_to_byte + (fast_length * 8); i < new_length; ++i) { @@ -184,7 +190,9 @@ Status PrimitiveBuilder::Init(int64_t capacity) { template Status PrimitiveBuilder::Resize(int64_t capacity) { // XXX: Set floor size for now - if (capacity < kMinBuilderCapacity) { capacity = kMinBuilderCapacity; } + if (capacity < kMinBuilderCapacity) { + capacity = kMinBuilderCapacity; + } if (capacity_ == 0) { RETURN_NOT_OK(Init(capacity)); @@ -195,20 +203,20 @@ Status PrimitiveBuilder::Resize(int64_t capacity) { RETURN_NOT_OK(data_->Resize(new_bytes)); raw_data_ = reinterpret_cast(data_->mutable_data()); // TODO(emkornfield) valgrind complains without this - memset( - data_->mutable_data() + old_bytes, 0, static_cast(new_bytes - old_bytes)); + memset(data_->mutable_data() + old_bytes, 0, + static_cast(new_bytes - old_bytes)); } return Status::OK(); } template -Status PrimitiveBuilder::Append( - const value_type* values, int64_t length, const uint8_t* valid_bytes) { +Status PrimitiveBuilder::Append(const value_type* values, int64_t length, + const uint8_t* valid_bytes) { RETURN_NOT_OK(Reserve(length)); if (length > 0) { std::memcpy(raw_data_ + length_, values, - static_cast(TypeTraits::bytes_required(length))); + static_cast(TypeTraits::bytes_required(length))); } // length_ is update by these @@ -224,8 +232,8 @@ Status PrimitiveBuilder::Finish(std::shared_ptr* out) { // Trim buffers RETURN_NOT_OK(data_->Resize(bytes_required)); } - *out = std::make_shared::ArrayType>( - type_, length_, data_, null_bitmap_, null_count_); + *out = std::make_shared::ArrayType>(type_, length_, data_, + null_bitmap_, null_count_); data_ = null_bitmap_ = nullptr; capacity_ = length_ = null_count_ = 0; @@ -267,7 +275,9 @@ Status AdaptiveIntBuilderBase::Init(int64_t capacity) { Status AdaptiveIntBuilderBase::Resize(int64_t capacity) { // XXX: Set floor size for now - if (capacity < kMinBuilderCapacity) { capacity = kMinBuilderCapacity; } + if (capacity < kMinBuilderCapacity) { + capacity = kMinBuilderCapacity; + } if (capacity_ == 0) { RETURN_NOT_OK(Init(capacity)); @@ -278,8 +288,8 @@ Status AdaptiveIntBuilderBase::Resize(int64_t capacity) { RETURN_NOT_OK(data_->Resize(new_bytes)); raw_data_ = data_->mutable_data(); // TODO(emkornfield) valgrind complains without this - memset( - data_->mutable_data() + old_bytes, 0, static_cast(new_bytes - old_bytes)); + memset(data_->mutable_data() + old_bytes, 0, + static_cast(new_bytes - old_bytes)); } return Status::OK(); } @@ -298,16 +308,16 @@ Status AdaptiveIntBuilder::Finish(std::shared_ptr* out) { std::make_shared(int8(), length_, data_, null_bitmap_, null_count_); break; case 2: - *out = std::make_shared( - int16(), length_, data_, null_bitmap_, null_count_); + *out = std::make_shared(int16(), length_, data_, null_bitmap_, + null_count_); break; case 4: - *out = std::make_shared( - int32(), length_, data_, null_bitmap_, null_count_); + *out = std::make_shared(int32(), length_, data_, null_bitmap_, + null_count_); break; case 8: - *out = std::make_shared( - int64(), length_, data_, null_bitmap_, null_count_); + *out = std::make_shared(int64(), length_, data_, null_bitmap_, + null_count_); break; default: DCHECK(false); @@ -319,8 +329,8 @@ Status AdaptiveIntBuilder::Finish(std::shared_ptr* out) { return Status::OK(); } -Status AdaptiveIntBuilder::Append( - const int64_t* values, int64_t length, const uint8_t* valid_bytes) { +Status AdaptiveIntBuilder::Append(const int64_t* values, int64_t length, + const uint8_t* valid_bytes) { RETURN_NOT_OK(Reserve(length)); if (length > 0) { @@ -331,13 +341,15 @@ Status AdaptiveIntBuilder::Append( new_int_size = expanded_int_size(values[i], new_int_size); } } - if (new_int_size != int_size_) { RETURN_NOT_OK(ExpandIntSize(new_int_size)); } + if (new_int_size != int_size_) { + RETURN_NOT_OK(ExpandIntSize(new_int_size)); + } } } if (int_size_ == 8) { std::memcpy(reinterpret_cast(raw_data_) + length_, values, - sizeof(int64_t) * length); + sizeof(int64_t) * length); } else { #ifdef _MSC_VER #pragma warning(push) @@ -348,17 +360,17 @@ Status AdaptiveIntBuilder::Append( case 1: { int8_t* data_ptr = reinterpret_cast(raw_data_) + length_; std::transform(values, values + length, data_ptr, - [](int64_t x) { return static_cast(x); }); + [](int64_t x) { return static_cast(x); }); } break; case 2: { int16_t* data_ptr = reinterpret_cast(raw_data_) + length_; std::transform(values, values + length, data_ptr, - [](int64_t x) { return static_cast(x); }); + [](int64_t x) { return static_cast(x); }); } break; case 4: { int32_t* data_ptr = reinterpret_cast(raw_data_) + length_; std::transform(values, values + length, data_ptr, - [](int64_t x) { return static_cast(x); }); + [](int64_t x) { return static_cast(x); }); } break; default: DCHECK(false); @@ -449,20 +461,20 @@ Status AdaptiveUIntBuilder::Finish(std::shared_ptr* out) { } switch (int_size_) { case 1: - *out = std::make_shared( - uint8(), length_, data_, null_bitmap_, null_count_); + *out = std::make_shared(uint8(), length_, data_, null_bitmap_, + null_count_); break; case 2: - *out = std::make_shared( - uint16(), length_, data_, null_bitmap_, null_count_); + *out = std::make_shared(uint16(), length_, data_, null_bitmap_, + null_count_); break; case 4: - *out = std::make_shared( - uint32(), length_, data_, null_bitmap_, null_count_); + *out = std::make_shared(uint32(), length_, data_, null_bitmap_, + null_count_); break; case 8: - *out = std::make_shared( - uint64(), length_, data_, null_bitmap_, null_count_); + *out = std::make_shared(uint64(), length_, data_, null_bitmap_, + null_count_); break; default: DCHECK(false); @@ -474,8 +486,8 @@ Status AdaptiveUIntBuilder::Finish(std::shared_ptr* out) { return Status::OK(); } -Status AdaptiveUIntBuilder::Append( - const uint64_t* values, int64_t length, const uint8_t* valid_bytes) { +Status AdaptiveUIntBuilder::Append(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes) { RETURN_NOT_OK(Reserve(length)); if (length > 0) { @@ -486,13 +498,15 @@ Status AdaptiveUIntBuilder::Append( new_int_size = expanded_uint_size(values[i], new_int_size); } } - if (new_int_size != int_size_) { RETURN_NOT_OK(ExpandIntSize(new_int_size)); } + if (new_int_size != int_size_) { + RETURN_NOT_OK(ExpandIntSize(new_int_size)); + } } } if (int_size_ == 8) { std::memcpy(reinterpret_cast(raw_data_) + length_, values, - sizeof(uint64_t) * length); + sizeof(uint64_t) * length); } else { #ifdef _MSC_VER #pragma warning(push) @@ -503,17 +517,17 @@ Status AdaptiveUIntBuilder::Append( case 1: { uint8_t* data_ptr = reinterpret_cast(raw_data_) + length_; std::transform(values, values + length, data_ptr, - [](uint64_t x) { return static_cast(x); }); + [](uint64_t x) { return static_cast(x); }); } break; case 2: { uint16_t* data_ptr = reinterpret_cast(raw_data_) + length_; std::transform(values, values + length, data_ptr, - [](uint64_t x) { return static_cast(x); }); + [](uint64_t x) { return static_cast(x); }); } break; case 4: { uint32_t* data_ptr = reinterpret_cast(raw_data_) + length_; std::transform(values, values + length, data_ptr, - [](uint64_t x) { return static_cast(x); }); + [](uint64_t x) { return static_cast(x); }); } break; default: DCHECK(false); @@ -616,7 +630,9 @@ Status BooleanBuilder::Init(int64_t capacity) { Status BooleanBuilder::Resize(int64_t capacity) { // XXX: Set floor size for now - if (capacity < kMinBuilderCapacity) { capacity = kMinBuilderCapacity; } + if (capacity < kMinBuilderCapacity) { + capacity = kMinBuilderCapacity; + } if (capacity_ == 0) { RETURN_NOT_OK(Init(capacity)); @@ -627,8 +643,8 @@ Status BooleanBuilder::Resize(int64_t capacity) { RETURN_NOT_OK(data_->Resize(new_bytes)); raw_data_ = reinterpret_cast(data_->mutable_data()); - memset( - data_->mutable_data() + old_bytes, 0, static_cast(new_bytes - old_bytes)); + memset(data_->mutable_data() + old_bytes, 0, + static_cast(new_bytes - old_bytes)); } return Status::OK(); } @@ -647,8 +663,8 @@ Status BooleanBuilder::Finish(std::shared_ptr* out) { return Status::OK(); } -Status BooleanBuilder::Append( - const uint8_t* values, int64_t length, const uint8_t* valid_bytes) { +Status BooleanBuilder::Append(const uint8_t* values, int64_t length, + const uint8_t* valid_bytes) { RETURN_NOT_OK(Reserve(length)); for (int64_t i = 0; i < length; ++i) { @@ -673,14 +689,16 @@ Status BooleanBuilder::Append( // DictionaryBuilder template -DictionaryBuilder::DictionaryBuilder( - MemoryPool* pool, const std::shared_ptr& type) +DictionaryBuilder::DictionaryBuilder(MemoryPool* pool, + const std::shared_ptr& type) : ArrayBuilder(pool, type), hash_table_(new PoolBuffer(pool)), hash_slots_(nullptr), dict_builder_(pool, type), values_builder_(pool) { - if (!::arrow::CpuInfo::initialized()) { ::arrow::CpuInfo::Init(); } + if (!::arrow::CpuInfo::initialized()) { + ::arrow::CpuInfo::Init(); + } } template @@ -699,7 +717,9 @@ Status DictionaryBuilder::Init(int64_t elements) { template Status DictionaryBuilder::Resize(int64_t capacity) { - if (capacity < kMinBuilderCapacity) { capacity = kMinBuilderCapacity; } + if (capacity < kMinBuilderCapacity) { + capacity = kMinBuilderCapacity; + } if (capacity_ == 0) { return Init(capacity); @@ -732,7 +752,9 @@ Status DictionaryBuilder::Append(const Scalar& value) { while (kHashSlotEmpty != index && SlotDifferent(index, value)) { // Linear probing ++j; - if (j == hash_table_size_) { j = 0; } + if (j == hash_table_size_) { + j = 0; + } index = hash_slots_[j]; } @@ -784,7 +806,9 @@ Status DictionaryBuilder::DoubleTableSize() { for (int i = 0; i < hash_table_size_; ++i) { hash_slot_t index = hash_slots_[i]; - if (index == kHashSlotEmpty) { continue; } + if (index == kHashSlotEmpty) { + continue; + } // Compute the hash value mod the new table size to start looking for an // empty slot @@ -796,7 +820,9 @@ Status DictionaryBuilder::DoubleTableSize() { while (kHashSlotEmpty != slot && SlotDifferent(slot, value)) { ++j; - if (j == new_size) { j = 0; } + if (j == new_size) { + j = 0; + } slot = new_hash_slots[j]; } @@ -870,8 +896,8 @@ Status DictionaryBuilder::AppendDictionary(const Scalar& value) { } \ \ template <> \ - bool DictionaryBuilder::SlotDifferent( \ - hash_slot_t index, const internal::WrappedBinary& value) { \ + bool DictionaryBuilder::SlotDifferent(hash_slot_t index, \ + const internal::WrappedBinary& value) { \ int32_t other_length; \ const uint8_t* other_value = \ dict_builder_.GetValue(static_cast(index), &other_length); \ @@ -951,7 +977,9 @@ Status DecimalBuilder::Init(int64_t capacity) { Status DecimalBuilder::Resize(int64_t capacity) { int64_t old_bytes = null_bitmap_ != nullptr ? null_bitmap_->size() : 0; - if (sign_bitmap_ == nullptr) { return Init(capacity); } + if (sign_bitmap_ == nullptr) { + return Init(capacity); + } RETURN_NOT_OK(FixedSizeBinaryBuilder::Resize(capacity)); if (byte_width_ == 16) { @@ -962,7 +990,7 @@ Status DecimalBuilder::Resize(int64_t capacity) { // The buffer might be overpadded to deal with padding according to the spec if (old_bytes < new_bytes) { memset(sign_bitmap_data_ + old_bytes, 0, - static_cast(sign_bitmap_->capacity() - old_bytes)); + static_cast(sign_bitmap_->capacity() - old_bytes)); } } return Status::OK(); @@ -973,8 +1001,8 @@ Status DecimalBuilder::Finish(std::shared_ptr* out) { RETURN_NOT_OK(byte_builder_.Finish(&data)); /// TODO(phillipc): not sure where to get the offset argument here - *out = std::make_shared( - type_, length_, data, null_bitmap_, null_count_, 0, sign_bitmap_); + *out = std::make_shared(type_, length_, data, null_bitmap_, null_count_, + 0, sign_bitmap_); return Status::OK(); } @@ -982,15 +1010,15 @@ Status DecimalBuilder::Finish(std::shared_ptr* out) { // ListBuilder ListBuilder::ListBuilder(MemoryPool* pool, std::unique_ptr value_builder, - const std::shared_ptr& type) - : ArrayBuilder( - pool, type ? type : std::static_pointer_cast( - std::make_shared(value_builder->type()))), + const std::shared_ptr& type) + : ArrayBuilder(pool, + type ? type : std::static_pointer_cast( + std::make_shared(value_builder->type()))), offsets_builder_(pool), value_builder_(std::move(value_builder)) {} -Status ListBuilder::Append( - const int32_t* offsets, int64_t length, const uint8_t* valid_bytes) { +Status ListBuilder::Append(const int32_t* offsets, int64_t length, + const uint8_t* valid_bytes) { RETURN_NOT_OK(Reserve(length)); UnsafeAppendToBitmap(valid_bytes, length); offsets_builder_.UnsafeAppend(offsets, length); @@ -1035,10 +1063,12 @@ Status ListBuilder::Finish(std::shared_ptr* out) { RETURN_NOT_OK(offsets_builder_.Finish(&offsets)); std::shared_ptr items = values_; - if (!items) { RETURN_NOT_OK(value_builder_->Finish(&items)); } + if (!items) { + RETURN_NOT_OK(value_builder_->Finish(&items)); + } - *out = std::make_shared( - type_, length_, offsets, items, null_bitmap_, null_count_); + *out = std::make_shared(type_, length_, offsets, items, null_bitmap_, + null_count_); Reset(); return Status::OK(); @@ -1111,8 +1141,8 @@ Status BinaryBuilder::FinishInternal(std::shared_ptr* out) RETURN_NOT_OK(value_data_builder_.Finish(&value_data)); BufferVector buffers = {null_bitmap_, offsets, value_data}; - *out = std::make_shared( - type_, length_, std::move(buffers), null_count_, 0); + *out = std::make_shared(type_, length_, std::move(buffers), + null_count_, 0); return Status::OK(); } @@ -1154,8 +1184,8 @@ Status StringBuilder::Finish(std::shared_ptr* out) { // ---------------------------------------------------------------------- // Fixed width binary -FixedSizeBinaryBuilder::FixedSizeBinaryBuilder( - MemoryPool* pool, const std::shared_ptr& type) +FixedSizeBinaryBuilder::FixedSizeBinaryBuilder(MemoryPool* pool, + const std::shared_ptr& type) : ArrayBuilder(pool, type), byte_width_(static_cast(*type).byte_width()), byte_builder_(pool) {} @@ -1166,8 +1196,8 @@ Status FixedSizeBinaryBuilder::Append(const uint8_t* value) { return byte_builder_.Append(value, byte_width_); } -Status FixedSizeBinaryBuilder::Append( - const uint8_t* data, int64_t length, const uint8_t* valid_bytes) { +Status FixedSizeBinaryBuilder::Append(const uint8_t* data, int64_t length, + const uint8_t* valid_bytes) { RETURN_NOT_OK(Reserve(length)); UnsafeAppendToBitmap(valid_bytes, length); return byte_builder_.Append(data, length * byte_width_); @@ -1196,8 +1226,8 @@ Status FixedSizeBinaryBuilder::Resize(int64_t capacity) { Status FixedSizeBinaryBuilder::Finish(std::shared_ptr* out) { std::shared_ptr data; RETURN_NOT_OK(byte_builder_.Finish(&data)); - *out = std::make_shared( - type_, length_, data, null_bitmap_, null_count_); + *out = std::make_shared(type_, length_, data, null_bitmap_, + null_count_); return Status::OK(); } @@ -1205,7 +1235,7 @@ Status FixedSizeBinaryBuilder::Finish(std::shared_ptr* out) { // Struct StructBuilder::StructBuilder(MemoryPool* pool, const std::shared_ptr& type, - std::vector>&& field_builders) + std::vector>&& field_builders) : ArrayBuilder(pool, type) { field_builders_ = std::move(field_builders); } @@ -1237,7 +1267,7 @@ Status StructBuilder::Finish(std::shared_ptr* out) { // // TODO(wesm): come up with a less monolithic strategy Status MakeBuilder(MemoryPool* pool, const std::shared_ptr& type, - std::unique_ptr* out) { + std::unique_ptr* out) { switch (type->id()) { BUILDER_CASE(UINT8, UInt8Builder); BUILDER_CASE(INT8, Int8Builder); @@ -1292,7 +1322,7 @@ Status MakeBuilder(MemoryPool* pool, const std::shared_ptr& type, return Status::OK(); Status MakeDictionaryBuilder(MemoryPool* pool, const std::shared_ptr& type, - std::shared_ptr* out) { + std::shared_ptr* out) { switch (type->id()) { DICTIONARY_BUILDER_CASE(UINT8, DictionaryBuilder); DICTIONARY_BUILDER_CASE(INT8, DictionaryBuilder); diff --git a/cpp/src/arrow/builder.h b/cpp/src/arrow/builder.h index 065e115ac5872..080a32900555c 100644 --- a/cpp/src/arrow/builder.h +++ b/cpp/src/arrow/builder.h @@ -186,8 +186,8 @@ class ARROW_EXPORT PrimitiveBuilder : public ArrayBuilder { /// /// If passed, valid_bytes is of equal length to values, and any zero byte /// will be considered as a null for that slot - Status Append( - const value_type* values, int64_t length, const uint8_t* valid_bytes = nullptr); + Status Append(const value_type* values, int64_t length, + const uint8_t* valid_bytes = nullptr); Status Finish(std::shared_ptr* out) override; Status Init(int64_t capacity) override; @@ -298,15 +298,15 @@ class ARROW_EXPORT AdaptiveIntBuilderBase : public ArrayBuilder { inline uint8_t expanded_uint_size(uint64_t val, uint8_t current_int_size) { if (current_int_size == 8 || (current_int_size < 8 && - (val > static_cast(std::numeric_limits::max())))) { + (val > static_cast(std::numeric_limits::max())))) { return 8; } else if (current_int_size == 4 || (current_int_size < 4 && - (val > static_cast(std::numeric_limits::max())))) { + (val > static_cast(std::numeric_limits::max())))) { return 4; } else if (current_int_size == 2 || (current_int_size == 1 && - (val > static_cast(std::numeric_limits::max())))) { + (val > static_cast(std::numeric_limits::max())))) { return 2; } else { return 1; @@ -325,7 +325,9 @@ class ARROW_EXPORT AdaptiveUIntBuilder : public AdaptiveIntBuilderBase { BitUtil::SetBit(null_bitmap_data_, length_); uint8_t new_int_size = expanded_uint_size(val, int_size_); - if (new_int_size != int_size_) { RETURN_NOT_OK(ExpandIntSize(new_int_size)); } + if (new_int_size != int_size_) { + RETURN_NOT_OK(ExpandIntSize(new_int_size)); + } switch (int_size_) { case 1: @@ -350,8 +352,8 @@ class ARROW_EXPORT AdaptiveUIntBuilder : public AdaptiveIntBuilderBase { /// /// If passed, valid_bytes is of equal length to values, and any zero byte /// will be considered as a null for that slot - Status Append( - const uint64_t* values, int64_t length, const uint8_t* valid_bytes = nullptr); + Status Append(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes = nullptr); Status ExpandIntSize(uint8_t new_int_size); Status Finish(std::shared_ptr* out) override; @@ -374,18 +376,18 @@ class ARROW_EXPORT AdaptiveUIntBuilder : public AdaptiveIntBuilderBase { inline uint8_t expanded_int_size(int64_t val, uint8_t current_int_size) { if (current_int_size == 8 || (current_int_size < 8 && - (val > static_cast(std::numeric_limits::max()) || - val < static_cast(std::numeric_limits::min())))) { + (val > static_cast(std::numeric_limits::max()) || + val < static_cast(std::numeric_limits::min())))) { return 8; } else if (current_int_size == 4 || (current_int_size < 4 && - (val > static_cast(std::numeric_limits::max()) || - val < static_cast(std::numeric_limits::min())))) { + (val > static_cast(std::numeric_limits::max()) || + val < static_cast(std::numeric_limits::min())))) { return 4; } else if (current_int_size == 2 || (current_int_size == 1 && - (val > static_cast(std::numeric_limits::max()) || - val < static_cast(std::numeric_limits::min())))) { + (val > static_cast(std::numeric_limits::max()) || + val < static_cast(std::numeric_limits::min())))) { return 2; } else { return 1; @@ -404,7 +406,9 @@ class ARROW_EXPORT AdaptiveIntBuilder : public AdaptiveIntBuilderBase { BitUtil::SetBit(null_bitmap_data_, length_); uint8_t new_int_size = expanded_int_size(val, int_size_); - if (new_int_size != int_size_) { RETURN_NOT_OK(ExpandIntSize(new_int_size)); } + if (new_int_size != int_size_) { + RETURN_NOT_OK(ExpandIntSize(new_int_size)); + } switch (int_size_) { case 1: @@ -429,8 +433,8 @@ class ARROW_EXPORT AdaptiveIntBuilder : public AdaptiveIntBuilderBase { /// /// If passed, valid_bytes is of equal length to values, and any zero byte /// will be considered as a null for that slot - Status Append( - const int64_t* values, int64_t length, const uint8_t* valid_bytes = nullptr); + Status Append(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = nullptr); Status ExpandIntSize(uint8_t new_int_size); Status Finish(std::shared_ptr* out) override; @@ -490,8 +494,8 @@ class ARROW_EXPORT BooleanBuilder : public ArrayBuilder { /// /// If passed, valid_bytes is of equal length to values, and any zero byte /// will be considered as a null for that slot - Status Append( - const uint8_t* values, int64_t length, const uint8_t* valid_bytes = nullptr); + Status Append(const uint8_t* values, int64_t length, + const uint8_t* valid_bytes = nullptr); Status Finish(std::shared_ptr* out) override; Status Init(int64_t capacity) override; @@ -526,7 +530,7 @@ class ARROW_EXPORT ListBuilder : public ArrayBuilder { /// Use this constructor to incrementally build the value array along with offsets and /// null bitmap. ListBuilder(MemoryPool* pool, std::unique_ptr value_builder, - const std::shared_ptr& type = nullptr); + const std::shared_ptr& type = nullptr); Status Init(int64_t elements) override; Status Resize(int64_t capacity) override; @@ -536,8 +540,8 @@ class ARROW_EXPORT ListBuilder : public ArrayBuilder { /// /// If passed, valid_bytes is of equal length to values, and any zero byte /// will be considered as a null for that slot - Status Append( - const int32_t* offsets, int64_t length, const uint8_t* valid_bytes = nullptr); + Status Append(const int32_t* offsets, int64_t length, + const uint8_t* valid_bytes = nullptr); /// \brief Start a new variable-length list slot /// @@ -626,8 +630,8 @@ class ARROW_EXPORT FixedSizeBinaryBuilder : public ArrayBuilder { FixedSizeBinaryBuilder(MemoryPool* pool, const std::shared_ptr& type); Status Append(const uint8_t* value); - Status Append( - const uint8_t* data, int64_t length, const uint8_t* valid_bytes = nullptr); + Status Append(const uint8_t* data, int64_t length, + const uint8_t* valid_bytes = nullptr); Status Append(const std::string& value); Status AppendNull(); @@ -672,7 +676,7 @@ class ARROW_EXPORT DecimalBuilder : public FixedSizeBinaryBuilder { class ARROW_EXPORT StructBuilder : public ArrayBuilder { public: StructBuilder(MemoryPool* pool, const std::shared_ptr& type, - std::vector>&& field_builders); + std::vector>&& field_builders); Status Finish(std::shared_ptr* out) override; @@ -808,7 +812,7 @@ class ARROW_EXPORT BinaryDictionaryBuilder : public DictionaryBuilder(value.c_str()), - static_cast(value.size()))); + static_cast(value.size()))); } }; @@ -829,7 +833,7 @@ class ARROW_EXPORT StringDictionaryBuilder : public DictionaryBuilder(value.c_str()), - static_cast(value.size()))); + static_cast(value.size()))); } }; @@ -837,10 +841,11 @@ class ARROW_EXPORT StringDictionaryBuilder : public DictionaryBuilder& type, - std::unique_ptr* out); + std::unique_ptr* out); Status ARROW_EXPORT MakeDictionaryBuilder(MemoryPool* pool, - const std::shared_ptr& type, std::shared_ptr* out); + const std::shared_ptr& type, + std::shared_ptr* out); } // namespace arrow diff --git a/cpp/src/arrow/compare.cc b/cpp/src/arrow/compare.cc index 1465e0b414fe3..da10c2ad90177 100644 --- a/cpp/src/arrow/compare.cc +++ b/cpp/src/arrow/compare.cc @@ -41,7 +41,7 @@ namespace arrow { class RangeEqualsVisitor { public: RangeEqualsVisitor(const Array& right, int64_t left_start_idx, int64_t left_end_idx, - int64_t right_start_idx) + int64_t right_start_idx) : right_(right), left_start_idx_(left_start_idx), left_end_idx_(left_end_idx), @@ -71,7 +71,9 @@ class RangeEqualsVisitor { for (int64_t i = left_start_idx_, o_i = right_start_idx_; i < left_end_idx_; ++i, ++o_i) { const bool is_null = left.IsNull(i); - if (is_null != right.IsNull(o_i)) { return false; } + if (is_null != right.IsNull(o_i)) { + return false; + } if (is_null) continue; const int32_t begin_offset = left.value_offset(i); const int32_t end_offset = left.value_offset(i + 1); @@ -84,8 +86,8 @@ class RangeEqualsVisitor { if (end_offset - begin_offset > 0 && std::memcmp(left.value_data()->data() + begin_offset, - right.value_data()->data() + right_begin_offset, - static_cast(end_offset - begin_offset))) { + right.value_data()->data() + right_begin_offset, + static_cast(end_offset - begin_offset))) { return false; } } @@ -101,7 +103,9 @@ class RangeEqualsVisitor { for (int64_t i = left_start_idx_, o_i = right_start_idx_; i < left_end_idx_; ++i, ++o_i) { const bool is_null = left.IsNull(i); - if (is_null != right.IsNull(o_i)) { return false; } + if (is_null != right.IsNull(o_i)) { + return false; + } if (is_null) continue; const int32_t begin_offset = left.value_offset(i); const int32_t end_offset = left.value_offset(i + 1); @@ -111,8 +115,8 @@ class RangeEqualsVisitor { if (end_offset - begin_offset != right_end_offset - right_begin_offset) { return false; } - if (!left_values->RangeEquals( - begin_offset, end_offset, right_begin_offset, right_values)) { + if (!left_values->RangeEquals(begin_offset, end_offset, right_begin_offset, + right_values)) { return false; } } @@ -124,7 +128,9 @@ class RangeEqualsVisitor { bool equal_fields = true; for (int64_t i = left_start_idx_, o_i = right_start_idx_; i < left_end_idx_; ++i, ++o_i) { - if (left.IsNull(i) != right.IsNull(o_i)) { return false; } + if (left.IsNull(i) != right.IsNull(o_i)) { + return false; + } if (left.IsNull(i)) continue; for (int j = 0; j < left.num_fields(); ++j) { // TODO: really we should be comparing stretches of non-null data rather @@ -132,9 +138,11 @@ class RangeEqualsVisitor { const int64_t left_abs_index = i + left.offset(); const int64_t right_abs_index = o_i + right.offset(); - equal_fields = left.field(j)->RangeEquals( - left_abs_index, left_abs_index + 1, right_abs_index, right.field(j)); - if (!equal_fields) { return false; } + equal_fields = left.field(j)->RangeEquals(left_abs_index, left_abs_index + 1, + right_abs_index, right.field(j)); + if (!equal_fields) { + return false; + } } } return true; @@ -144,7 +152,9 @@ class RangeEqualsVisitor { const auto& right = static_cast(right_); const UnionMode union_mode = left.mode(); - if (union_mode != right.mode()) { return false; } + if (union_mode != right.mode()) { + return false; + } const auto& left_type = static_cast(*left.type()); @@ -154,7 +164,9 @@ class RangeEqualsVisitor { const std::vector& type_codes = left_type.type_codes(); for (size_t i = 0; i < type_codes.size(); ++i) { const uint8_t code = type_codes[i]; - if (code > max_code) { max_code = code; } + if (code > max_code) { + max_code = code; + } } // Store mapping in a vector for constant time lookups @@ -169,9 +181,13 @@ class RangeEqualsVisitor { uint8_t id, child_num; for (int64_t i = left_start_idx_, o_i = right_start_idx_; i < left_end_idx_; ++i, ++o_i) { - if (left.IsNull(i) != right.IsNull(o_i)) { return false; } + if (left.IsNull(i) != right.IsNull(o_i)) { + return false; + } if (left.IsNull(i)) continue; - if (left_ids[i] != right_ids[o_i]) { return false; } + if (left_ids[i] != right_ids[o_i]) { + return false; + } id = left_ids[i]; child_num = type_id_to_child_num[id]; @@ -183,14 +199,15 @@ class RangeEqualsVisitor { // rather than looking at one value at a time. if (union_mode == UnionMode::SPARSE) { if (!left.child(child_num)->RangeEquals(left_abs_index, left_abs_index + 1, - right_abs_index, right.child(child_num))) { + right_abs_index, + right.child(child_num))) { return false; } } else { const int32_t offset = left.raw_value_offsets()[i]; const int32_t o_offset = right.raw_value_offsets()[o_i]; - if (!left.child(child_num)->RangeEquals( - offset, offset + 1, o_offset, right.child(child_num))) { + if (!left.child(child_num)->RangeEquals(offset, offset + 1, o_offset, + right.child(child_num))) { return false; } } @@ -211,9 +228,13 @@ class RangeEqualsVisitor { const uint8_t* left_data = nullptr; const uint8_t* right_data = nullptr; - if (left.values()) { left_data = left.raw_values() + left.offset() * width; } + if (left.values()) { + left_data = left.raw_values() + left.offset() * width; + } - if (right.values()) { right_data = right.raw_values() + right.offset() * width; } + if (right.values()) { + right_data = right.raw_values() + right.offset() * width; + } for (int64_t i = left_start_idx_, o_i = right_start_idx_; i < left_end_idx_; ++i, ++o_i) { @@ -241,9 +262,13 @@ class RangeEqualsVisitor { const uint8_t* left_data = nullptr; const uint8_t* right_data = nullptr; - if (left.values()) { left_data = left.raw_values() + left.offset() * width; } + if (left.values()) { + left_data = left.raw_values() + left.offset() * width; + } - if (right.values()) { right_data = right.raw_values() + right.offset() * width; } + if (right.values()) { + right_data = right.raw_values() + right.offset() * width; + } for (int64_t i = left_start_idx_, o_i = right_start_idx_; i < left_end_idx_; ++i, ++o_i) { @@ -301,8 +326,8 @@ class RangeEqualsVisitor { result_ = false; return Status::OK(); } - result_ = left.indices()->RangeEquals( - left_start_idx_, left_end_idx_, right_start_idx_, right.indices()); + result_ = left.indices()->RangeEquals(left_start_idx_, left_end_idx_, + right_start_idx_, right.indices()); return Status::OK(); } @@ -324,7 +349,9 @@ static bool IsEqualPrimitive(const PrimitiveArray& left, const PrimitiveArray& r const uint8_t* left_data = nullptr; const uint8_t* right_data = nullptr; - if (left.values()) { left_data = left.values()->data() + left.offset() * byte_width; } + if (left.values()) { + left_data = left.values()->data() + left.offset() * byte_width; + } if (right.values()) { right_data = right.values()->data() + right.offset() * byte_width; } @@ -341,13 +368,13 @@ static bool IsEqualPrimitive(const PrimitiveArray& left, const PrimitiveArray& r return true; } else { return memcmp(left_data, right_data, - static_cast(byte_width * left.length())) == 0; + static_cast(byte_width * left.length())) == 0; } } template -static inline bool CompareBuiltIn( - const Array& left, const Array& right, const T* ldata, const T* rdata) { +static inline bool CompareBuiltIn(const Array& left, const Array& right, const T* ldata, + const T* rdata) { if (left.null_count() > 0) { for (int64_t i = 0; i < left.length(); ++i) { if (left.IsNull(i) != right.IsNull(i)) { @@ -369,17 +396,21 @@ static bool IsEqualDecimal(const DecimalArray& left, const DecimalArray& right) const uint8_t* left_data = nullptr; const uint8_t* right_data = nullptr; - if (left.values()) { left_data = left.values()->data(); } - if (right.values()) { right_data = right.values()->data(); } + if (left.values()) { + left_data = left.values()->data(); + } + if (right.values()) { + right_data = right.values()->data(); + } const int32_t byte_width = left.byte_width(); if (byte_width == 4) { - return CompareBuiltIn(left, right, - reinterpret_cast(left_data) + loffset, + return CompareBuiltIn( + left, right, reinterpret_cast(left_data) + loffset, reinterpret_cast(right_data) + roffset); } else if (byte_width == 8) { - return CompareBuiltIn(left, right, - reinterpret_cast(left_data) + loffset, + return CompareBuiltIn( + left, right, reinterpret_cast(left_data) + loffset, reinterpret_cast(right_data) + roffset); } else { // 128-bit @@ -387,8 +418,12 @@ static bool IsEqualDecimal(const DecimalArray& left, const DecimalArray& right) // Must also compare sign bitmap const uint8_t* left_sign = nullptr; const uint8_t* right_sign = nullptr; - if (left.sign_bitmap()) { left_sign = left.sign_bitmap()->data(); } - if (right.sign_bitmap()) { right_sign = right.sign_bitmap()->data(); } + if (left.sign_bitmap()) { + left_sign = left.sign_bitmap()->data(); + } + if (right.sign_bitmap()) { + right_sign = right.sign_bitmap()->data(); + } for (int64_t i = 0; i < left.length(); ++i) { bool left_null = left.IsNull(i); @@ -434,7 +469,7 @@ class ArrayEqualsVisitor : public RangeEqualsVisitor { result_ = true; } else { result_ = BitmapEquals(left.values()->data(), left.offset(), right.values()->data(), - right.offset(), left.length()); + right.offset(), left.length()); } return Status::OK(); } @@ -442,7 +477,7 @@ class ArrayEqualsVisitor : public RangeEqualsVisitor { template typename std::enable_if::value && !std::is_base_of::value, - Status>::type + Status>::type Visit(const T& left) { result_ = IsEqualPrimitive(left, static_cast(right_)); return Status::OK(); @@ -458,8 +493,8 @@ class ArrayEqualsVisitor : public RangeEqualsVisitor { const auto& right = static_cast(right_); if (left.offset() == 0 && right.offset() == 0) { - return left.value_offsets()->Equals( - *right.value_offsets(), (left.length() + 1) * sizeof(int32_t)); + return left.value_offsets()->Equals(*right.value_offsets(), + (left.length() + 1) * sizeof(int32_t)); } else { // One of the arrays is sliced; logic is more complicated because the // value offsets are not both 0-based @@ -482,10 +517,16 @@ class ArrayEqualsVisitor : public RangeEqualsVisitor { const auto& right = static_cast(right_); bool equal_offsets = ValueOffsetsEqual(left); - if (!equal_offsets) { return false; } + if (!equal_offsets) { + return false; + } - if (!left.value_data() && !(right.value_data())) { return true; } - if (left.value_offset(left.length()) == 0) { return true; } + if (!left.value_data() && !(right.value_data())) { + return true; + } + if (left.value_offset(left.length()) == 0) { + return true; + } const uint8_t* left_data = left.value_data()->data(); const uint8_t* right_data = right.value_data()->data(); @@ -493,23 +534,25 @@ class ArrayEqualsVisitor : public RangeEqualsVisitor { if (left.null_count() == 0) { // Fast path for null count 0, single memcmp if (left.offset() == 0 && right.offset() == 0) { - return std::memcmp( - left_data, right_data, left.raw_value_offsets()[left.length()]) == 0; + return std::memcmp(left_data, right_data, + left.raw_value_offsets()[left.length()]) == 0; } else { const int64_t total_bytes = left.value_offset(left.length()) - left.value_offset(0); return std::memcmp(left_data + left.value_offset(0), - right_data + right.value_offset(0), - static_cast(total_bytes)) == 0; + right_data + right.value_offset(0), + static_cast(total_bytes)) == 0; } } else { // ARROW-537: Only compare data in non-null slots const int32_t* left_offsets = left.raw_value_offsets(); const int32_t* right_offsets = right.raw_value_offsets(); for (int64_t i = 0; i < left.length(); ++i) { - if (left.IsNull(i)) { continue; } + if (left.IsNull(i)) { + continue; + } if (std::memcmp(left_data + left_offsets[i], right_data + right_offsets[i], - left.value_length(i))) { + left.value_length(i))) { return false; } } @@ -530,8 +573,9 @@ class ArrayEqualsVisitor : public RangeEqualsVisitor { return Status::OK(); } - result_ = left.values()->RangeEquals(left.value_offset(0), - left.value_offset(left.length()), right.value_offset(0), right.values()); + result_ = + left.values()->RangeEquals(left.value_offset(0), left.value_offset(left.length()), + right.value_offset(0), right.values()); return Status::OK(); } @@ -547,15 +591,15 @@ class ArrayEqualsVisitor : public RangeEqualsVisitor { template typename std::enable_if::value, - Status>::type + Status>::type Visit(const T& left) { return RangeEqualsVisitor::Visit(left); } }; template -inline bool FloatingApproxEquals( - const NumericArray& left, const NumericArray& right) { +inline bool FloatingApproxEquals(const NumericArray& left, + const NumericArray& right) { using T = typename TYPE::c_type; const T* left_data = left.raw_values(); @@ -566,11 +610,15 @@ inline bool FloatingApproxEquals( if (left.null_count() > 0) { for (int64_t i = 0; i < left.length(); ++i) { if (left.IsNull(i)) continue; - if (fabs(left_data[i] - right_data[i]) > EPSILON) { return false; } + if (fabs(left_data[i] - right_data[i]) > EPSILON) { + return false; + } } } else { for (int64_t i = 0; i < left.length(); ++i) { - if (fabs(left_data[i] - right_data[i]) > EPSILON) { return false; } + if (fabs(left_data[i] - right_data[i]) > EPSILON) { + return false; + } } } return true; @@ -601,7 +649,7 @@ static bool BaseDataEquals(const Array& left, const Array& right) { } if (left.null_count() > 0 && left.null_count() < left.length()) { return BitmapEquals(left.null_bitmap()->data(), left.offset(), - right.null_bitmap()->data(), right.offset(), left.length()); + right.null_bitmap()->data(), right.offset(), left.length()); } return true; } @@ -634,7 +682,7 @@ Status ArrayApproxEquals(const Array& left, const Array& right, bool* are_equal) } Status ArrayRangeEquals(const Array& left, const Array& right, int64_t left_start_idx, - int64_t left_end_idx, int64_t right_start_idx, bool* are_equal) { + int64_t left_end_idx, int64_t right_start_idx, bool* are_equal) { if (&left == &right) { *are_equal = true; } else if (left.type_id() != right.type_id()) { @@ -705,7 +753,7 @@ class TypeEqualsVisitor { template typename std::enable_if::value || std::is_base_of::value, - Status>::type + Status>::type Visit(const T& type) { result_ = true; return Status::OK(); @@ -714,7 +762,7 @@ class TypeEqualsVisitor { template typename std::enable_if::value || std::is_base_of::value, - Status>::type + Status>::type Visit(const T& left) { const auto& right = static_cast(right_); result_ = left.unit() == right.unit(); diff --git a/cpp/src/arrow/compare.h b/cpp/src/arrow/compare.h index 96a6435c5df33..a36b55320b5a2 100644 --- a/cpp/src/arrow/compare.h +++ b/cpp/src/arrow/compare.h @@ -34,21 +34,22 @@ class Tensor; /// Returns true if the arrays are exactly equal Status ARROW_EXPORT ArrayEquals(const Array& left, const Array& right, bool* are_equal); -Status ARROW_EXPORT TensorEquals( - const Tensor& left, const Tensor& right, bool* are_equal); +Status ARROW_EXPORT TensorEquals(const Tensor& left, const Tensor& right, + bool* are_equal); /// Returns true if the arrays are approximately equal. For non-floating point /// types, this is equivalent to ArrayEquals(left, right) -Status ARROW_EXPORT ArrayApproxEquals( - const Array& left, const Array& right, bool* are_equal); +Status ARROW_EXPORT ArrayApproxEquals(const Array& left, const Array& right, + bool* are_equal); /// Returns true if indicated equal-length segment of arrays is exactly equal Status ARROW_EXPORT ArrayRangeEquals(const Array& left, const Array& right, - int64_t start_idx, int64_t end_idx, int64_t other_start_idx, bool* are_equal); + int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, bool* are_equal); /// Returns true if the type metadata are exactly equal -Status ARROW_EXPORT TypeEquals( - const DataType& left, const DataType& right, bool* are_equal); +Status ARROW_EXPORT TypeEquals(const DataType& left, const DataType& right, + bool* are_equal); } // namespace arrow diff --git a/cpp/src/arrow/io/file.cc b/cpp/src/arrow/io/file.cc index 936655f26dbda..82e3ba8109c23 100644 --- a/cpp/src/arrow/io/file.cc +++ b/cpp/src/arrow/io/file.cc @@ -123,8 +123,8 @@ constexpr const char* kRangeExceptionError = "Range exception during wide-char string conversion"; #endif -static inline Status CheckOpenResult( - int ret, int errno_actual, const char* filename, size_t filename_length) { +static inline Status CheckOpenResult(int ret, int errno_actual, const char* filename, + size_t filename_length) { if (ret == -1) { // TODO: errno codes to strings std::stringstream ss; @@ -134,12 +134,14 @@ static inline Status CheckOpenResult( // this requires c++11 std::wstring_convert, wchar_t> converter; - std::wstring wide_string( - reinterpret_cast(filename), filename_length / sizeof(wchar_t)); + std::wstring wide_string(reinterpret_cast(filename), + filename_length / sizeof(wchar_t)); try { std::string byte_string = converter.to_bytes(wide_string); ss << byte_string; - } catch (const std::range_error&) { ss << kRangeExceptionError; } + } catch (const std::range_error&) { + ss << kRangeExceptionError; + } #else ss << filename; #endif @@ -161,7 +163,9 @@ static inline int64_t lseek64_compat(int fd, int64_t pos, int whence) { #if defined(_MSC_VER) static inline Status ConvertToUtf16(const std::string& input, std::wstring* result) { - if (result == nullptr) { return Status::Invalid("Pointer to result is not valid"); } + if (result == nullptr) { + return Status::Invalid("Pointer to result is not valid"); + } if (input.empty()) { *result = std::wstring(); @@ -171,7 +175,9 @@ static inline Status ConvertToUtf16(const std::string& input, std::wstring* resu std::wstring_convert> utf16_converter; try { *result = utf16_converter.from_bytes(input); - } catch (const std::range_error&) { return Status::Invalid(kRangeExceptionError); } + } catch (const std::range_error&) { + return Status::Invalid(kRangeExceptionError); + } return Status::OK(); } #endif @@ -194,8 +200,8 @@ static inline Status FileOpenReadable(const std::string& filename, int* fd) { return CheckOpenResult(ret, errno_actual, filename.c_str(), filename.size()); } -static inline Status FileOpenWriteable( - const std::string& filename, bool write_only, bool truncate, int* fd) { +static inline Status FileOpenWriteable(const std::string& filename, bool write_only, + bool truncate, int* fd) { int ret; errno_t errno_actual = 0; @@ -205,9 +211,13 @@ static inline Status FileOpenWriteable( int oflag = _O_CREAT | _O_BINARY; int pmode = _S_IWRITE; - if (!write_only) { pmode |= _S_IREAD; } + if (!write_only) { + pmode |= _S_IREAD; + } - if (truncate) { oflag |= _O_TRUNC; } + if (truncate) { + oflag |= _O_TRUNC; + } if (write_only) { oflag |= _O_WRONLY; @@ -221,7 +231,9 @@ static inline Status FileOpenWriteable( #else int oflag = O_CREAT | O_BINARY; - if (truncate) { oflag |= O_TRUNC; } + if (truncate) { + oflag |= O_TRUNC; + } if (write_only) { oflag |= O_WRONLY; @@ -239,7 +251,9 @@ static inline Status FileTell(int fd, int64_t* pos) { #if defined(_MSC_VER) current_pos = _telli64(fd); - if (current_pos == -1) { return Status::IOError("_telli64 failed"); } + if (current_pos == -1) { + return Status::IOError("_telli64 failed"); + } #else current_pos = lseek64_compat(fd, 0, SEEK_CUR); CHECK_LSEEK(current_pos); @@ -255,10 +269,12 @@ static inline Status FileSeek(int fd, int64_t pos) { return Status::OK(); } -static inline Status FileRead( - int fd, uint8_t* buffer, int64_t nbytes, int64_t* bytes_read) { +static inline Status FileRead(int fd, uint8_t* buffer, int64_t nbytes, + int64_t* bytes_read) { #if defined(_MSC_VER) - if (nbytes > INT32_MAX) { return Status::IOError("Unable to read > 2GB blocks yet"); } + if (nbytes > INT32_MAX) { + return Status::IOError("Unable to read > 2GB blocks yet"); + } *bytes_read = static_cast(_read(fd, buffer, static_cast(nbytes))); #else *bytes_read = static_cast(read(fd, buffer, static_cast(nbytes))); @@ -323,7 +339,9 @@ static inline Status FileClose(int fd) { ret = static_cast(close(fd)); #endif - if (ret == -1) { return Status::IOError("error closing file"); } + if (ret == -1) { + return Status::IOError("error closing file"); + } return Status::OK(); } @@ -371,7 +389,9 @@ class OSFile { } Status Seek(int64_t pos) { - if (pos < 0) { return Status::Invalid("Invalid position"); } + if (pos < 0) { + return Status::Invalid("Invalid position"); + } return FileSeek(fd_, pos); } @@ -379,7 +399,9 @@ class OSFile { Status Write(const uint8_t* data, int64_t length) { std::lock_guard guard(lock_); - if (length < 0) { return Status::IOError("Length must be non-negative"); } + if (length < 0) { + return Status::IOError("Length must be non-negative"); + } return FileWrite(fd_, data, length); } @@ -421,7 +443,9 @@ class ReadableFile::ReadableFileImpl : public OSFile { int64_t bytes_read = 0; RETURN_NOT_OK(Read(nbytes, &bytes_read, buffer->mutable_data())); - if (bytes_read < nbytes) { RETURN_NOT_OK(buffer->Resize(bytes_read)); } + if (bytes_read < nbytes) { + RETURN_NOT_OK(buffer->Resize(bytes_read)); + } *out = buffer; return Status::OK(); } @@ -430,13 +454,9 @@ class ReadableFile::ReadableFileImpl : public OSFile { MemoryPool* pool_; }; -ReadableFile::ReadableFile(MemoryPool* pool) { - impl_.reset(new ReadableFileImpl(pool)); -} +ReadableFile::ReadableFile(MemoryPool* pool) { impl_.reset(new ReadableFileImpl(pool)); } -ReadableFile::~ReadableFile() { - DCHECK(impl_->Close().ok()); -} +ReadableFile::~ReadableFile() { DCHECK(impl_->Close().ok()); } Status ReadableFile::Open(const std::string& path, std::shared_ptr* file) { *file = std::shared_ptr(new ReadableFile(default_memory_pool())); @@ -444,18 +464,14 @@ Status ReadableFile::Open(const std::string& path, std::shared_ptr } Status ReadableFile::Open(const std::string& path, MemoryPool* memory_pool, - std::shared_ptr* file) { + std::shared_ptr* file) { *file = std::shared_ptr(new ReadableFile(memory_pool)); return (*file)->impl_->Open(path); } -Status ReadableFile::Close() { - return impl_->Close(); -} +Status ReadableFile::Close() { return impl_->Close(); } -Status ReadableFile::Tell(int64_t* pos) { - return impl_->Tell(pos); -} +Status ReadableFile::Tell(int64_t* pos) { return impl_->Tell(pos); } Status ReadableFile::Read(int64_t nbytes, int64_t* bytes_read, uint8_t* out) { return impl_->Read(nbytes, bytes_read, out); @@ -470,17 +486,11 @@ Status ReadableFile::GetSize(int64_t* size) { return Status::OK(); } -Status ReadableFile::Seek(int64_t pos) { - return impl_->Seek(pos); -} +Status ReadableFile::Seek(int64_t pos) { return impl_->Seek(pos); } -bool ReadableFile::supports_zero_copy() const { - return false; -} +bool ReadableFile::supports_zero_copy() const { return false; } -int ReadableFile::file_descriptor() const { - return impl_->fd(); -} +int ReadableFile::file_descriptor() const { return impl_->fd(); } // ---------------------------------------------------------------------- // FileOutputStream @@ -492,42 +502,34 @@ class FileOutputStream::FileOutputStreamImpl : public OSFile { } }; -FileOutputStream::FileOutputStream() { - impl_.reset(new FileOutputStreamImpl()); -} +FileOutputStream::FileOutputStream() { impl_.reset(new FileOutputStreamImpl()); } FileOutputStream::~FileOutputStream() { // This can fail; better to explicitly call close DCHECK(impl_->Close().ok()); } -Status FileOutputStream::Open( - const std::string& path, std::shared_ptr* file) { +Status FileOutputStream::Open(const std::string& path, + std::shared_ptr* file) { return Open(path, false, file); } -Status FileOutputStream::Open( - const std::string& path, bool append, std::shared_ptr* file) { +Status FileOutputStream::Open(const std::string& path, bool append, + std::shared_ptr* file) { // private ctor *file = std::shared_ptr(new FileOutputStream()); return (*file)->impl_->Open(path, append); } -Status FileOutputStream::Close() { - return impl_->Close(); -} +Status FileOutputStream::Close() { return impl_->Close(); } -Status FileOutputStream::Tell(int64_t* pos) { - return impl_->Tell(pos); -} +Status FileOutputStream::Tell(int64_t* pos) { return impl_->Tell(pos); } Status FileOutputStream::Write(const uint8_t* data, int64_t length) { return impl_->Write(data, length); } -int FileOutputStream::file_descriptor() const { - return impl_->fd(); -} +int FileOutputStream::file_descriptor() const { return impl_->fd(); } // ---------------------------------------------------------------------- // Implement MemoryMappedFile @@ -567,7 +569,7 @@ class MemoryMappedFile::MemoryMap : public MutableBuffer { } void* result = mmap(nullptr, static_cast(file_->size()), prot_flags, map_mode, - file_->fd(), 0); + file_->fd(), 0); if (result == MAP_FAILED) { std::stringstream ss; ss << "Memory mapping file failed, errno: " << errno; @@ -585,7 +587,9 @@ class MemoryMappedFile::MemoryMap : public MutableBuffer { int64_t size() const { return size_; } Status Seek(int64_t position) { - if (position < 0) { return Status::Invalid("position is out of bounds"); } + if (position < 0) { + return Status::Invalid("position is out of bounds"); + } position_ = position; return Status::OK(); } @@ -610,8 +614,8 @@ class MemoryMappedFile::MemoryMap : public MutableBuffer { MemoryMappedFile::MemoryMappedFile() {} MemoryMappedFile::~MemoryMappedFile() {} -Status MemoryMappedFile::Create( - const std::string& path, int64_t size, std::shared_ptr* out) { +Status MemoryMappedFile::Create(const std::string& path, int64_t size, + std::shared_ptr* out) { std::shared_ptr file; RETURN_NOT_OK(FileOutputStream::Open(path, &file)); #ifdef _MSC_VER @@ -624,7 +628,7 @@ Status MemoryMappedFile::Create( } Status MemoryMappedFile::Open(const std::string& path, FileMode::type mode, - std::shared_ptr* out) { + std::shared_ptr* out) { std::shared_ptr result(new MemoryMappedFile()); result->memory_map_.reset(new MemoryMap()); @@ -644,9 +648,7 @@ Status MemoryMappedFile::Tell(int64_t* position) { return Status::OK(); } -Status MemoryMappedFile::Seek(int64_t position) { - return memory_map_->Seek(position); -} +Status MemoryMappedFile::Seek(int64_t position) { return memory_map_->Seek(position); } Status MemoryMappedFile::Close() { // munmap handled in pimpl dtor @@ -656,7 +658,9 @@ Status MemoryMappedFile::Close() { Status MemoryMappedFile::Read(int64_t nbytes, int64_t* bytes_read, uint8_t* out) { nbytes = std::max( 0, std::min(nbytes, memory_map_->size() - memory_map_->position())); - if (nbytes > 0) { std::memcpy(out, memory_map_->head(), static_cast(nbytes)); } + if (nbytes > 0) { + std::memcpy(out, memory_map_->head(), static_cast(nbytes)); + } *bytes_read = nbytes; memory_map_->advance(nbytes); return Status::OK(); @@ -675,9 +679,7 @@ Status MemoryMappedFile::Read(int64_t nbytes, std::shared_ptr* out) { return Status::OK(); } -bool MemoryMappedFile::supports_zero_copy() const { - return true; -} +bool MemoryMappedFile::supports_zero_copy() const { return true; } Status MemoryMappedFile::WriteAt(int64_t position, const uint8_t* data, int64_t nbytes) { std::lock_guard guard(lock_); @@ -708,9 +710,7 @@ Status MemoryMappedFile::WriteInternal(const uint8_t* data, int64_t nbytes) { return Status::OK(); } -int MemoryMappedFile::file_descriptor() const { - return memory_map_->fd(); -} +int MemoryMappedFile::file_descriptor() const { return memory_map_->fd(); } } // namespace io } // namespace arrow diff --git a/cpp/src/arrow/io/file.h b/cpp/src/arrow/io/file.h index f0be3cf980162..ba740f1e8f4a9 100644 --- a/cpp/src/arrow/io/file.h +++ b/cpp/src/arrow/io/file.h @@ -44,8 +44,8 @@ class ARROW_EXPORT FileOutputStream : public OutputStream { // truncated to 0 bytes, deleting any existing memory static Status Open(const std::string& path, std::shared_ptr* file); - static Status Open( - const std::string& path, bool append, std::shared_ptr* file); + static Status Open(const std::string& path, bool append, + std::shared_ptr* file); // OutputStream interface Status Close() override; @@ -73,7 +73,7 @@ class ARROW_EXPORT ReadableFile : public RandomAccessFile { // Open file with one's own memory pool for memory allocations static Status Open(const std::string& path, MemoryPool* memory_pool, - std::shared_ptr* file); + std::shared_ptr* file); Status Close() override; Status Tell(int64_t* position) override; @@ -107,11 +107,11 @@ class ARROW_EXPORT MemoryMappedFile : public ReadWriteFileInterface { ~MemoryMappedFile(); /// Create new file with indicated size, return in read/write mode - static Status Create( - const std::string& path, int64_t size, std::shared_ptr* out); + static Status Create(const std::string& path, int64_t size, + std::shared_ptr* out); static Status Open(const std::string& path, FileMode::type mode, - std::shared_ptr* out); + std::shared_ptr* out); Status Close() override; diff --git a/cpp/src/arrow/io/hdfs-internal.cc b/cpp/src/arrow/io/hdfs-internal.cc index 8b4a92b396789..8f42b1c817fe4 100644 --- a/cpp/src/arrow/io/hdfs-internal.cc +++ b/cpp/src/arrow/io/hdfs-internal.cc @@ -59,9 +59,9 @@ static std::vector get_potential_libhdfs_paths(); static std::vector get_potential_libhdfs3_paths(); static arrow::Status try_dlopen(std::vector potential_paths, const char* name, #ifndef _WIN32 - void*& out_handle); + void*& out_handle); #else - HINSTANCE& out_handle); + HINSTANCE& out_handle); #endif static std::vector get_potential_libhdfs_paths() { @@ -88,7 +88,9 @@ static std::vector get_potential_libhdfs_paths() { } const char* libhdfs_dir = std::getenv("ARROW_LIBHDFS_DIR"); - if (libhdfs_dir != nullptr) { search_paths.push_back(fs::path(libhdfs_dir)); } + if (libhdfs_dir != nullptr) { + search_paths.push_back(fs::path(libhdfs_dir)); + } // All paths with file name for (auto& path : search_paths) { @@ -115,7 +117,9 @@ static std::vector get_potential_libhdfs3_paths() { std::vector search_paths = {fs::path(""), fs::path(".")}; const char* libhdfs3_dir = std::getenv("ARROW_LIBHDFS3_DIR"); - if (libhdfs3_dir != nullptr) { search_paths.push_back(fs::path(libhdfs3_dir)); } + if (libhdfs3_dir != nullptr) { + search_paths.push_back(fs::path(libhdfs3_dir)); + } // All paths with file name for (auto& path : search_paths) { @@ -188,8 +192,8 @@ static std::vector get_potential_libjvm_paths() { } #ifndef _WIN32 -static arrow::Status try_dlopen( - std::vector potential_paths, const char* name, void*& out_handle) { +static arrow::Status try_dlopen(std::vector potential_paths, const char* name, + void*& out_handle) { std::vector error_messages; for (auto& i : potential_paths) { @@ -219,8 +223,8 @@ static arrow::Status try_dlopen( } #else -static arrow::Status try_dlopen( - std::vector potential_paths, const char* name, HINSTANCE& out_handle) { +static arrow::Status try_dlopen(std::vector potential_paths, const char* name, + HINSTANCE& out_handle) { std::vector error_messages; for (auto& i : potential_paths) { @@ -282,9 +286,7 @@ namespace io { static LibHdfsShim libhdfs_shim; static LibHdfsShim libhdfs3_shim; -hdfsBuilder* LibHdfsShim::NewBuilder(void) { - return this->hdfsNewBuilder(); -} +hdfsBuilder* LibHdfsShim::NewBuilder(void) { return this->hdfsNewBuilder(); } void LibHdfsShim::BuilderSetNameNode(hdfsBuilder* bld, const char* nn) { this->hdfsBuilderSetNameNode(bld, nn); @@ -298,8 +300,8 @@ void LibHdfsShim::BuilderSetUserName(hdfsBuilder* bld, const char* userName) { this->hdfsBuilderSetUserName(bld, userName); } -void LibHdfsShim::BuilderSetKerbTicketCachePath( - hdfsBuilder* bld, const char* kerbTicketCachePath) { +void LibHdfsShim::BuilderSetKerbTicketCachePath(hdfsBuilder* bld, + const char* kerbTicketCachePath) { this->hdfsBuilderSetKerbTicketCachePath(bld, kerbTicketCachePath); } @@ -307,12 +309,10 @@ hdfsFS LibHdfsShim::BuilderConnect(hdfsBuilder* bld) { return this->hdfsBuilderConnect(bld); } -int LibHdfsShim::Disconnect(hdfsFS fs) { - return this->hdfsDisconnect(fs); -} +int LibHdfsShim::Disconnect(hdfsFS fs) { return this->hdfsDisconnect(fs); } hdfsFile LibHdfsShim::OpenFile(hdfsFS fs, const char* path, int flags, int bufferSize, - short replication, tSize blocksize) { // NOLINT + short replication, tSize blocksize) { // NOLINT return this->hdfsOpenFile(fs, path, flags, bufferSize, replication, blocksize); } @@ -328,9 +328,7 @@ int LibHdfsShim::Seek(hdfsFS fs, hdfsFile file, tOffset desiredPos) { return this->hdfsSeek(fs, file, desiredPos); } -tOffset LibHdfsShim::Tell(hdfsFS fs, hdfsFile file) { - return this->hdfsTell(fs, file); -} +tOffset LibHdfsShim::Tell(hdfsFS fs, hdfsFile file) { return this->hdfsTell(fs, file); } tSize LibHdfsShim::Read(hdfsFS fs, hdfsFile file, void* buffer, tSize length) { return this->hdfsRead(fs, file, buffer, length); @@ -341,8 +339,8 @@ bool LibHdfsShim::HasPread() { return this->hdfsPread != nullptr; } -tSize LibHdfsShim::Pread( - hdfsFS fs, hdfsFile file, tOffset position, void* buffer, tSize length) { +tSize LibHdfsShim::Pread(hdfsFS fs, hdfsFile file, tOffset position, void* buffer, + tSize length) { GET_SYMBOL(this, hdfsPread); return this->hdfsPread(fs, file, position, buffer, length); } @@ -351,9 +349,7 @@ tSize LibHdfsShim::Write(hdfsFS fs, hdfsFile file, const void* buffer, tSize len return this->hdfsWrite(fs, file, buffer, length); } -int LibHdfsShim::Flush(hdfsFS fs, hdfsFile file) { - return this->hdfsFlush(fs, file); -} +int LibHdfsShim::Flush(hdfsFS fs, hdfsFile file) { return this->hdfsFlush(fs, file); } int LibHdfsShim::Available(hdfsFS fs, hdfsFile file) { GET_SYMBOL(this, hdfsAvailable); @@ -434,8 +430,8 @@ void LibHdfsShim::FreeFileInfo(hdfsFileInfo* hdfsFileInfo, int numEntries) { this->hdfsFreeFileInfo(hdfsFileInfo, numEntries); } -char*** LibHdfsShim::GetHosts( - hdfsFS fs, const char* path, tOffset start, tOffset length) { +char*** LibHdfsShim::GetHosts(hdfsFS fs, const char* path, tOffset start, + tOffset length) { GET_SYMBOL(this, hdfsGetHosts); if (this->hdfsGetHosts) { return this->hdfsGetHosts(fs, path, start, length); @@ -446,7 +442,9 @@ char*** LibHdfsShim::GetHosts( void LibHdfsShim::FreeHosts(char*** blockHosts) { GET_SYMBOL(this, hdfsFreeHosts); - if (this->hdfsFreeHosts) { this->hdfsFreeHosts(blockHosts); } + if (this->hdfsFreeHosts) { + this->hdfsFreeHosts(blockHosts); + } } tOffset LibHdfsShim::GetDefaultBlockSize(hdfsFS fs) { @@ -458,16 +456,12 @@ tOffset LibHdfsShim::GetDefaultBlockSize(hdfsFS fs) { } } -tOffset LibHdfsShim::GetCapacity(hdfsFS fs) { - return this->hdfsGetCapacity(fs); -} +tOffset LibHdfsShim::GetCapacity(hdfsFS fs) { return this->hdfsGetCapacity(fs); } -tOffset LibHdfsShim::GetUsed(hdfsFS fs) { - return this->hdfsGetUsed(fs); -} +tOffset LibHdfsShim::GetUsed(hdfsFS fs) { return this->hdfsGetUsed(fs); } -int LibHdfsShim::Chown( - hdfsFS fs, const char* path, const char* owner, const char* group) { +int LibHdfsShim::Chown(hdfsFS fs, const char* path, const char* owner, + const char* group) { GET_SYMBOL(this, hdfsChown); if (this->hdfsChown) { return this->hdfsChown(fs, path, owner, group); diff --git a/cpp/src/arrow/io/hdfs-internal.h b/cpp/src/arrow/io/hdfs-internal.h index c5ea397af0bd5..db6a21c2b36ac 100644 --- a/cpp/src/arrow/io/hdfs-internal.h +++ b/cpp/src/arrow/io/hdfs-internal.h @@ -45,22 +45,22 @@ struct LibHdfsShim { void (*hdfsBuilderSetNameNode)(hdfsBuilder* bld, const char* nn); void (*hdfsBuilderSetNameNodePort)(hdfsBuilder* bld, tPort port); void (*hdfsBuilderSetUserName)(hdfsBuilder* bld, const char* userName); - void (*hdfsBuilderSetKerbTicketCachePath)( - hdfsBuilder* bld, const char* kerbTicketCachePath); + void (*hdfsBuilderSetKerbTicketCachePath)(hdfsBuilder* bld, + const char* kerbTicketCachePath); hdfsFS (*hdfsBuilderConnect)(hdfsBuilder* bld); int (*hdfsDisconnect)(hdfsFS fs); hdfsFile (*hdfsOpenFile)(hdfsFS fs, const char* path, int flags, int bufferSize, - short replication, tSize blocksize); // NOLINT + short replication, tSize blocksize); // NOLINT int (*hdfsCloseFile)(hdfsFS fs, hdfsFile file); int (*hdfsExists)(hdfsFS fs, const char* path); int (*hdfsSeek)(hdfsFS fs, hdfsFile file, tOffset desiredPos); tOffset (*hdfsTell)(hdfsFS fs, hdfsFile file); tSize (*hdfsRead)(hdfsFS fs, hdfsFile file, void* buffer, tSize length); - tSize (*hdfsPread)( - hdfsFS fs, hdfsFile file, tOffset position, void* buffer, tSize length); + tSize (*hdfsPread)(hdfsFS fs, hdfsFile file, tOffset position, void* buffer, + tSize length); tSize (*hdfsWrite)(hdfsFS fs, hdfsFile file, const void* buffer, tSize length); int (*hdfsFlush)(hdfsFS fs, hdfsFile file); int (*hdfsAvailable)(hdfsFS fs, hdfsFile file); @@ -139,7 +139,7 @@ struct LibHdfsShim { int Disconnect(hdfsFS fs); hdfsFile OpenFile(hdfsFS fs, const char* path, int flags, int bufferSize, - short replication, tSize blocksize); // NOLINT + short replication, tSize blocksize); // NOLINT int CloseFile(hdfsFS fs, hdfsFile file); diff --git a/cpp/src/arrow/io/hdfs.cc b/cpp/src/arrow/io/hdfs.cc index 9ded9bc3f9902..500f42caf5277 100644 --- a/cpp/src/arrow/io/hdfs.cc +++ b/cpp/src/arrow/io/hdfs.cc @@ -61,8 +61,8 @@ static constexpr int kDefaultHdfsBufferSize = 1 << 16; class HdfsAnyFileImpl { public: - void set_members( - const std::string& path, LibHdfsShim* driver, hdfsFS fs, hdfsFile handle) { + void set_members(const std::string& path, LibHdfsShim* driver, hdfsFS fs, + hdfsFile handle) { path_ = path; driver_ = driver; fs_ = fs; @@ -118,7 +118,7 @@ class HdfsReadableFile::HdfsReadableFileImpl : public HdfsAnyFileImpl { tSize ret; if (driver_->HasPread()) { ret = driver_->Pread(fs_, file_, static_cast(position), - reinterpret_cast(buffer), static_cast(nbytes)); + reinterpret_cast(buffer), static_cast(nbytes)); } else { std::lock_guard guard(lock_); RETURN_NOT_OK(Seek(position)); @@ -136,7 +136,9 @@ class HdfsReadableFile::HdfsReadableFileImpl : public HdfsAnyFileImpl { int64_t bytes_read = 0; RETURN_NOT_OK(ReadAt(position, nbytes, &bytes_read, buffer->mutable_data())); - if (bytes_read < nbytes) { RETURN_NOT_OK(buffer->Resize(bytes_read)); } + if (bytes_read < nbytes) { + RETURN_NOT_OK(buffer->Resize(bytes_read)); + } *out = buffer; return Status::OK(); @@ -145,11 +147,14 @@ class HdfsReadableFile::HdfsReadableFileImpl : public HdfsAnyFileImpl { Status Read(int64_t nbytes, int64_t* bytes_read, uint8_t* buffer) { int64_t total_bytes = 0; while (total_bytes < nbytes) { - tSize ret = driver_->Read(fs_, file_, reinterpret_cast(buffer + total_bytes), + tSize ret = driver_->Read( + fs_, file_, reinterpret_cast(buffer + total_bytes), static_cast(std::min(buffer_size_, nbytes - total_bytes))); RETURN_NOT_OK(CheckReadResult(ret)); total_bytes += ret; - if (ret == 0) { break; } + if (ret == 0) { + break; + } } *bytes_read = total_bytes; @@ -162,7 +167,9 @@ class HdfsReadableFile::HdfsReadableFileImpl : public HdfsAnyFileImpl { int64_t bytes_read = 0; RETURN_NOT_OK(Read(nbytes, &bytes_read, buffer->mutable_data())); - if (bytes_read < nbytes) { RETURN_NOT_OK(buffer->Resize(bytes_read)); } + if (bytes_read < nbytes) { + RETURN_NOT_OK(buffer->Resize(bytes_read)); + } *out = buffer; return Status::OK(); @@ -170,7 +177,9 @@ class HdfsReadableFile::HdfsReadableFileImpl : public HdfsAnyFileImpl { Status GetSize(int64_t* size) { hdfsFileInfo* entry = driver_->GetPathInfo(fs_, path_.c_str()); - if (entry == nullptr) { return Status::IOError("HDFS: GetPathInfo failed"); } + if (entry == nullptr) { + return Status::IOError("HDFS: GetPathInfo failed"); + } *size = entry->mSize; driver_->FreeFileInfo(entry, 1); @@ -187,31 +196,27 @@ class HdfsReadableFile::HdfsReadableFileImpl : public HdfsAnyFileImpl { }; HdfsReadableFile::HdfsReadableFile(MemoryPool* pool) { - if (pool == nullptr) { pool = default_memory_pool(); } + if (pool == nullptr) { + pool = default_memory_pool(); + } impl_.reset(new HdfsReadableFileImpl(pool)); } -HdfsReadableFile::~HdfsReadableFile() { - DCHECK(impl_->Close().ok()); -} +HdfsReadableFile::~HdfsReadableFile() { DCHECK(impl_->Close().ok()); } -Status HdfsReadableFile::Close() { - return impl_->Close(); -} +Status HdfsReadableFile::Close() { return impl_->Close(); } -Status HdfsReadableFile::ReadAt( - int64_t position, int64_t nbytes, int64_t* bytes_read, uint8_t* buffer) { +Status HdfsReadableFile::ReadAt(int64_t position, int64_t nbytes, int64_t* bytes_read, + uint8_t* buffer) { return impl_->ReadAt(position, nbytes, bytes_read, buffer); } -Status HdfsReadableFile::ReadAt( - int64_t position, int64_t nbytes, std::shared_ptr* out) { +Status HdfsReadableFile::ReadAt(int64_t position, int64_t nbytes, + std::shared_ptr* out) { return impl_->ReadAt(position, nbytes, out); } -bool HdfsReadableFile::supports_zero_copy() const { - return false; -} +bool HdfsReadableFile::supports_zero_copy() const { return false; } Status HdfsReadableFile::Read(int64_t nbytes, int64_t* bytes_read, uint8_t* buffer) { return impl_->Read(nbytes, bytes_read, buffer); @@ -221,17 +226,11 @@ Status HdfsReadableFile::Read(int64_t nbytes, std::shared_ptr* buffer) { return impl_->Read(nbytes, buffer); } -Status HdfsReadableFile::GetSize(int64_t* size) { - return impl_->GetSize(size); -} +Status HdfsReadableFile::GetSize(int64_t* size) { return impl_->GetSize(size); } -Status HdfsReadableFile::Seek(int64_t position) { - return impl_->Seek(position); -} +Status HdfsReadableFile::Seek(int64_t position) { return impl_->Seek(position); } -Status HdfsReadableFile::Tell(int64_t* position) { - return impl_->Tell(position); -} +Status HdfsReadableFile::Tell(int64_t* position) { return impl_->Tell(position); } // ---------------------------------------------------------------------- // File writing @@ -259,28 +258,22 @@ class HdfsOutputStream::HdfsOutputStreamImpl : public HdfsAnyFileImpl { Status Write(const uint8_t* buffer, int64_t nbytes, int64_t* bytes_written) { std::lock_guard guard(lock_); - tSize ret = driver_->Write( - fs_, file_, reinterpret_cast(buffer), static_cast(nbytes)); + tSize ret = driver_->Write(fs_, file_, reinterpret_cast(buffer), + static_cast(nbytes)); CHECK_FAILURE(ret, "Write"); *bytes_written = ret; return Status::OK(); } }; -HdfsOutputStream::HdfsOutputStream() { - impl_.reset(new HdfsOutputStreamImpl()); -} +HdfsOutputStream::HdfsOutputStream() { impl_.reset(new HdfsOutputStreamImpl()); } -HdfsOutputStream::~HdfsOutputStream() { - DCHECK(impl_->Close().ok()); -} +HdfsOutputStream::~HdfsOutputStream() { DCHECK(impl_->Close().ok()); } -Status HdfsOutputStream::Close() { - return impl_->Close(); -} +Status HdfsOutputStream::Close() { return impl_->Close(); } -Status HdfsOutputStream::Write( - const uint8_t* buffer, int64_t nbytes, int64_t* bytes_read) { +Status HdfsOutputStream::Write(const uint8_t* buffer, int64_t nbytes, + int64_t* bytes_read) { return impl_->Write(buffer, nbytes, bytes_read); } @@ -289,13 +282,9 @@ Status HdfsOutputStream::Write(const uint8_t* buffer, int64_t nbytes) { return Write(buffer, nbytes, &bytes_written_dummy); } -Status HdfsOutputStream::Flush() { - return impl_->Flush(); -} +Status HdfsOutputStream::Flush() { return impl_->Flush(); } -Status HdfsOutputStream::Tell(int64_t* position) { - return impl_->Tell(position); -} +Status HdfsOutputStream::Tell(int64_t* position) { return impl_->Tell(position); } // ---------------------------------------------------------------------- // HDFS client @@ -344,7 +333,9 @@ class HdfsClient::HdfsClientImpl { } fs_ = driver_->BuilderConnect(builder); - if (fs_ == nullptr) { return Status::IOError("HDFS connection failed"); } + if (fs_ == nullptr) { + return Status::IOError("HDFS connection failed"); + } namenode_host_ = config->host; port_ = config->port; user_ = config->user; @@ -395,7 +386,9 @@ class HdfsClient::HdfsClientImpl { Status GetPathInfo(const std::string& path, HdfsPathInfo* info) { hdfsFileInfo* entry = driver_->GetPathInfo(fs_, path.c_str()); - if (entry == nullptr) { return Status::IOError("HDFS: GetPathInfo failed"); } + if (entry == nullptr) { + return Status::IOError("HDFS: GetPathInfo failed"); + } SetPathInfo(entry, info); driver_->FreeFileInfo(entry, 1); @@ -435,7 +428,7 @@ class HdfsClient::HdfsClientImpl { } Status OpenReadable(const std::string& path, int32_t buffer_size, - std::shared_ptr* file) { + std::shared_ptr* file) { hdfsFile handle = driver_->OpenFile(fs_, path.c_str(), O_RDONLY, buffer_size, 0, 0); if (handle == nullptr) { @@ -454,13 +447,14 @@ class HdfsClient::HdfsClientImpl { } Status OpenWriteable(const std::string& path, bool append, int32_t buffer_size, - int16_t replication, int64_t default_block_size, - std::shared_ptr* file) { + int16_t replication, int64_t default_block_size, + std::shared_ptr* file) { int flags = O_WRONLY; if (append) flags |= O_APPEND; - hdfsFile handle = driver_->OpenFile(fs_, path.c_str(), flags, buffer_size, - replication, static_cast(default_block_size)); + hdfsFile handle = + driver_->OpenFile(fs_, path.c_str(), flags, buffer_size, replication, + static_cast(default_block_size)); if (handle == nullptr) { // TODO(wesm): determine cause of failure @@ -496,14 +490,12 @@ class HdfsClient::HdfsClientImpl { // ---------------------------------------------------------------------- // Public API for HDFSClient -HdfsClient::HdfsClient() { - impl_.reset(new HdfsClientImpl()); -} +HdfsClient::HdfsClient() { impl_.reset(new HdfsClientImpl()); } HdfsClient::~HdfsClient() {} -Status HdfsClient::Connect( - const HdfsConnectionConfig* config, std::shared_ptr* fs) { +Status HdfsClient::Connect(const HdfsConnectionConfig* config, + std::shared_ptr* fs) { // ctor is private, make_shared will not work *fs = std::shared_ptr(new HdfsClient()); @@ -519,50 +511,43 @@ Status HdfsClient::Delete(const std::string& path, bool recursive) { return impl_->Delete(path, recursive); } -Status HdfsClient::Disconnect() { - return impl_->Disconnect(); -} +Status HdfsClient::Disconnect() { return impl_->Disconnect(); } -bool HdfsClient::Exists(const std::string& path) { - return impl_->Exists(path); -} +bool HdfsClient::Exists(const std::string& path) { return impl_->Exists(path); } Status HdfsClient::GetPathInfo(const std::string& path, HdfsPathInfo* info) { return impl_->GetPathInfo(path, info); } -Status HdfsClient::GetCapacity(int64_t* nbytes) { - return impl_->GetCapacity(nbytes); -} +Status HdfsClient::GetCapacity(int64_t* nbytes) { return impl_->GetCapacity(nbytes); } -Status HdfsClient::GetUsed(int64_t* nbytes) { - return impl_->GetUsed(nbytes); -} +Status HdfsClient::GetUsed(int64_t* nbytes) { return impl_->GetUsed(nbytes); } -Status HdfsClient::ListDirectory( - const std::string& path, std::vector* listing) { +Status HdfsClient::ListDirectory(const std::string& path, + std::vector* listing) { return impl_->ListDirectory(path, listing); } Status HdfsClient::OpenReadable(const std::string& path, int32_t buffer_size, - std::shared_ptr* file) { + std::shared_ptr* file) { return impl_->OpenReadable(path, buffer_size, file); } -Status HdfsClient::OpenReadable( - const std::string& path, std::shared_ptr* file) { +Status HdfsClient::OpenReadable(const std::string& path, + std::shared_ptr* file) { return OpenReadable(path, kDefaultHdfsBufferSize, file); } Status HdfsClient::OpenWriteable(const std::string& path, bool append, - int32_t buffer_size, int16_t replication, int64_t default_block_size, - std::shared_ptr* file) { - return impl_->OpenWriteable( - path, append, buffer_size, replication, default_block_size, file); + int32_t buffer_size, int16_t replication, + int64_t default_block_size, + std::shared_ptr* file) { + return impl_->OpenWriteable(path, append, buffer_size, replication, default_block_size, + file); } -Status HdfsClient::OpenWriteable( - const std::string& path, bool append, std::shared_ptr* file) { +Status HdfsClient::OpenWriteable(const std::string& path, bool append, + std::shared_ptr* file) { return OpenWriteable(path, append, 0, 0, 0, file); } diff --git a/cpp/src/arrow/io/hdfs.h b/cpp/src/arrow/io/hdfs.h index f3de4a2bf174f..63c3ae0d53724 100644 --- a/cpp/src/arrow/io/hdfs.h +++ b/cpp/src/arrow/io/hdfs.h @@ -75,8 +75,8 @@ class ARROW_EXPORT HdfsClient : public FileSystemClient { // @param config (in): configuration for connecting // @param fs (out): the created client // @returns Status - static Status Connect( - const HdfsConnectionConfig* config, std::shared_ptr* fs); + static Status Connect(const HdfsConnectionConfig* config, + std::shared_ptr* fs); // Create directory and all parents // @@ -132,7 +132,7 @@ class ARROW_EXPORT HdfsClient : public FileSystemClient { // // @param path complete file path Status OpenReadable(const std::string& path, int32_t buffer_size, - std::shared_ptr* file); + std::shared_ptr* file); Status OpenReadable(const std::string& path, std::shared_ptr* file); @@ -142,11 +142,11 @@ class ARROW_EXPORT HdfsClient : public FileSystemClient { // @param replication, 0 for default // @param default_block_size, 0 for default Status OpenWriteable(const std::string& path, bool append, int32_t buffer_size, - int16_t replication, int64_t default_block_size, - std::shared_ptr* file); + int16_t replication, int64_t default_block_size, + std::shared_ptr* file); - Status OpenWriteable( - const std::string& path, bool append, std::shared_ptr* file); + Status OpenWriteable(const std::string& path, bool append, + std::shared_ptr* file); private: friend class HdfsReadableFile; @@ -173,8 +173,8 @@ class ARROW_EXPORT HdfsReadableFile : public RandomAccessFile { Status Read(int64_t nbytes, std::shared_ptr* out) override; - Status ReadAt( - int64_t position, int64_t nbytes, int64_t* bytes_read, uint8_t* buffer) override; + Status ReadAt(int64_t position, int64_t nbytes, int64_t* bytes_read, + uint8_t* buffer) override; Status ReadAt(int64_t position, int64_t nbytes, std::shared_ptr* out) override; diff --git a/cpp/src/arrow/io/interfaces.cc b/cpp/src/arrow/io/interfaces.cc index 06957d4de560d..57dc42d8a9b2a 100644 --- a/cpp/src/arrow/io/interfaces.cc +++ b/cpp/src/arrow/io/interfaces.cc @@ -29,32 +29,28 @@ namespace io { FileInterface::~FileInterface() {} -RandomAccessFile::RandomAccessFile() { - set_mode(FileMode::READ); -} +RandomAccessFile::RandomAccessFile() { set_mode(FileMode::READ); } -Status RandomAccessFile::ReadAt( - int64_t position, int64_t nbytes, int64_t* bytes_read, uint8_t* out) { +Status RandomAccessFile::ReadAt(int64_t position, int64_t nbytes, int64_t* bytes_read, + uint8_t* out) { std::lock_guard guard(lock_); RETURN_NOT_OK(Seek(position)); return Read(nbytes, bytes_read, out); } -Status RandomAccessFile::ReadAt( - int64_t position, int64_t nbytes, std::shared_ptr* out) { +Status RandomAccessFile::ReadAt(int64_t position, int64_t nbytes, + std::shared_ptr* out) { std::lock_guard guard(lock_); RETURN_NOT_OK(Seek(position)); return Read(nbytes, out); } Status Writeable::Write(const std::string& data) { - return Write( - reinterpret_cast(data.c_str()), static_cast(data.size())); + return Write(reinterpret_cast(data.c_str()), + static_cast(data.size())); } -Status Writeable::Flush() { - return Status::OK(); -} +Status Writeable::Flush() { return Status::OK(); } } // namespace io } // namespace arrow diff --git a/cpp/src/arrow/io/interfaces.h b/cpp/src/arrow/io/interfaces.h index b5a0bd85bf27b..e71a5c93baa32 100644 --- a/cpp/src/arrow/io/interfaces.h +++ b/cpp/src/arrow/io/interfaces.h @@ -107,8 +107,8 @@ class ARROW_EXPORT RandomAccessFile : public InputStream, public Seekable { /// be overridden /// /// Default implementation is thread-safe - virtual Status ReadAt( - int64_t position, int64_t nbytes, int64_t* bytes_read, uint8_t* out); + virtual Status ReadAt(int64_t position, int64_t nbytes, int64_t* bytes_read, + uint8_t* out); /// Default implementation is thread-safe virtual Status ReadAt(int64_t position, int64_t nbytes, std::shared_ptr* out); diff --git a/cpp/src/arrow/io/io-file-test.cc b/cpp/src/arrow/io/io-file-test.cc index a077f8cb921c7..36c35700d6496 100644 --- a/cpp/src/arrow/io/io-file-test.cc +++ b/cpp/src/arrow/io/io-file-test.cc @@ -43,9 +43,10 @@ static bool FileExists(const std::string& path) { #if defined(_MSC_VER) void InvalidParamHandler(const wchar_t* expr, const wchar_t* func, - const wchar_t* source_file, unsigned int source_line, uintptr_t reserved) { + const wchar_t* source_file, unsigned int source_line, + uintptr_t reserved) { wprintf(L"Invalid parameter in funcion %s. Source: %s line %d expression %s", func, - source_file, source_line, expr); + source_file, source_line, expr); } #endif @@ -61,7 +62,9 @@ static bool FileIsClosed(int fd) { int ret = static_cast(_close(fd)); return (ret == -1); #else - if (-1 != fcntl(fd, F_GETFD)) { return false; } + if (-1 != fcntl(fd, F_GETFD)) { + return false; + } return errno == EBADF; #endif } @@ -76,7 +79,9 @@ class FileTestFixture : public ::testing::Test { void TearDown() { EnsureFileDeleted(); } void EnsureFileDeleted() { - if (FileExists(path_)) { std::remove(path_.c_str()); } + if (FileExists(path_)) { + std::remove(path_.c_str()); + } } protected: @@ -382,7 +387,9 @@ TEST_F(TestReadableFile, ThreadSafety) { for (int i = 0; i < niter; ++i) { ASSERT_OK(file_->ReadAt(0, 3, &buffer)); - if (0 == memcmp(data.c_str(), buffer->data(), 3)) { correct_count += 1; } + if (0 == memcmp(data.c_str(), buffer->data(), 3)) { + correct_count += 1; + } } }; @@ -547,8 +554,8 @@ TEST_F(TestMemoryMappedFile, InvalidFile) { std::string non_existent_path = "invalid-file-name-asfd"; std::shared_ptr result; - ASSERT_RAISES( - IOError, MemoryMappedFile::Open(non_existent_path, FileMode::READ, &result)); + ASSERT_RAISES(IOError, + MemoryMappedFile::Open(non_existent_path, FileMode::READ, &result)); } TEST_F(TestMemoryMappedFile, CastableToFileInterface) { @@ -563,8 +570,8 @@ TEST_F(TestMemoryMappedFile, ThreadSafety) { std::shared_ptr file; ASSERT_OK(MemoryMappedFile::Open(path, FileMode::READWRITE, &file)); - ASSERT_OK(file->Write( - reinterpret_cast(data.c_str()), static_cast(data.size()))); + ASSERT_OK(file->Write(reinterpret_cast(data.c_str()), + static_cast(data.size()))); std::atomic correct_count(0); const int niter = 10000; @@ -574,7 +581,9 @@ TEST_F(TestMemoryMappedFile, ThreadSafety) { for (int i = 0; i < niter; ++i) { ASSERT_OK(file->ReadAt(0, 3, &buffer)); - if (0 == memcmp(data.c_str(), buffer->data(), 3)) { correct_count += 1; } + if (0 == memcmp(data.c_str(), buffer->data(), 3)) { + correct_count += 1; + } } }; diff --git a/cpp/src/arrow/io/io-hdfs-test.cc b/cpp/src/arrow/io/io-hdfs-test.cc index 74f80428c4561..c584cf5adeaf2 100644 --- a/cpp/src/arrow/io/io-hdfs-test.cc +++ b/cpp/src/arrow/io/io-hdfs-test.cc @@ -58,11 +58,11 @@ class TestHdfsClient : public ::testing::Test { } Status WriteDummyFile(const std::string& path, const uint8_t* buffer, int64_t size, - bool append = false, int buffer_size = 0, int16_t replication = 0, - int default_block_size = 0) { + bool append = false, int buffer_size = 0, int16_t replication = 0, + int default_block_size = 0) { std::shared_ptr file; - RETURN_NOT_OK(client_->OpenWriteable( - path, append, buffer_size, replication, default_block_size, &file)); + RETURN_NOT_OK(client_->OpenWriteable(path, append, buffer_size, replication, + default_block_size, &file)); RETURN_NOT_OK(file->Write(buffer, size)); RETURN_NOT_OK(file->Close()); @@ -87,9 +87,10 @@ class TestHdfsClient : public ::testing::Test { LibHdfsShim* driver_shim; client_ = nullptr; - scratch_dir_ = boost::filesystem::unique_path( - boost::filesystem::temp_directory_path() / "arrow-hdfs/scratch-%%%%") - .string(); + scratch_dir_ = + boost::filesystem::unique_path(boost::filesystem::temp_directory_path() / + "arrow-hdfs/scratch-%%%%") + .string(); loaded_driver_ = false; @@ -175,7 +176,9 @@ TYPED_TEST(TestHdfsClient, MakeDirectory) { std::string path = this->ScratchPath("create-directory"); - if (this->client_->Exists(path)) { ASSERT_OK(this->client_->Delete(path, true)); } + if (this->client_->Exists(path)) { + ASSERT_OK(this->client_->Delete(path, true)); + } ASSERT_OK(this->client_->MakeDirectory(path)); ASSERT_TRUE(this->client_->Exists(path)); @@ -396,7 +399,7 @@ TYPED_TEST(TestHdfsClient, ThreadSafety) { std::string data = "foobar"; ASSERT_OK(this->WriteDummyFile(src_path, reinterpret_cast(data.c_str()), - static_cast(data.size()))); + static_cast(data.size()))); std::shared_ptr file; ASSERT_OK(this->client_->OpenReadable(src_path, &file)); @@ -409,10 +412,14 @@ TYPED_TEST(TestHdfsClient, ThreadSafety) { std::shared_ptr buffer; if (i % 2 == 0) { ASSERT_OK(file->ReadAt(3, 3, &buffer)); - if (0 == memcmp(data.c_str() + 3, buffer->data(), 3)) { correct_count += 1; } + if (0 == memcmp(data.c_str() + 3, buffer->data(), 3)) { + correct_count += 1; + } } else { ASSERT_OK(file->ReadAt(0, 4, &buffer)); - if (0 == memcmp(data.c_str() + 0, buffer->data(), 4)) { correct_count += 1; } + if (0 == memcmp(data.c_str() + 0, buffer->data(), 4)) { + correct_count += 1; + } } } }; diff --git a/cpp/src/arrow/io/memory.cc b/cpp/src/arrow/io/memory.cc index 4d8bf63757d64..b6c48ec39be89 100644 --- a/cpp/src/arrow/io/memory.cc +++ b/cpp/src/arrow/io/memory.cc @@ -46,7 +46,7 @@ BufferOutputStream::BufferOutputStream(const std::shared_ptr& b mutable_data_(buffer->mutable_data()) {} Status BufferOutputStream::Create(int64_t initial_capacity, MemoryPool* pool, - std::shared_ptr* out) { + std::shared_ptr* out) { std::shared_ptr buffer; RETURN_NOT_OK(AllocateResizableBuffer(pool, initial_capacity, &buffer)); *out = std::make_shared(buffer); @@ -55,7 +55,9 @@ Status BufferOutputStream::Create(int64_t initial_capacity, MemoryPool* pool, BufferOutputStream::~BufferOutputStream() { // This can fail, better to explicitly call close - if (buffer_) { DCHECK(Close().ok()); } + if (buffer_) { + DCHECK(Close().ok()); + } } Status BufferOutputStream::Close() { @@ -102,9 +104,7 @@ Status BufferOutputStream::Reserve(int64_t nbytes) { // ---------------------------------------------------------------------- // OutputStream that doesn't write anything -Status MockOutputStream::Close() { - return Status::OK(); -} +Status MockOutputStream::Close() { return Status::OK(); } Status MockOutputStream::Tell(int64_t* position) { *position = extent_bytes_written_; @@ -158,7 +158,7 @@ Status FixedSizeBufferWriter::Tell(int64_t* position) { Status FixedSizeBufferWriter::Write(const uint8_t* data, int64_t nbytes) { if (nbytes > memcopy_threshold_ && memcopy_num_threads_ > 1) { parallel_memcopy(mutable_data_ + position_, data, nbytes, memcopy_blocksize_, - memcopy_num_threads_); + memcopy_num_threads_); } else { memcpy(mutable_data_ + position_, data, nbytes); } @@ -166,8 +166,8 @@ Status FixedSizeBufferWriter::Write(const uint8_t* data, int64_t nbytes) { return Status::OK(); } -Status FixedSizeBufferWriter::WriteAt( - int64_t position, const uint8_t* data, int64_t nbytes) { +Status FixedSizeBufferWriter::WriteAt(int64_t position, const uint8_t* data, + int64_t nbytes) { std::lock_guard guard(lock_); RETURN_NOT_OK(Seek(position)); return Write(data, nbytes); @@ -206,9 +206,7 @@ Status BufferReader::Tell(int64_t* position) { return Status::OK(); } -bool BufferReader::supports_zero_copy() const { - return true; -} +bool BufferReader::supports_zero_copy() const { return true; } Status BufferReader::Read(int64_t nbytes, int64_t* bytes_read, uint8_t* buffer) { memcpy(buffer, data_ + position_, nbytes); diff --git a/cpp/src/arrow/io/memory.h b/cpp/src/arrow/io/memory.h index 06384f0d4c4b7..1f8177436471c 100644 --- a/cpp/src/arrow/io/memory.h +++ b/cpp/src/arrow/io/memory.h @@ -45,7 +45,7 @@ class ARROW_EXPORT BufferOutputStream : public OutputStream { explicit BufferOutputStream(const std::shared_ptr& buffer); static Status Create(int64_t initial_capacity, MemoryPool* pool, - std::shared_ptr* out); + std::shared_ptr* out); ~BufferOutputStream(); diff --git a/cpp/src/arrow/io/test-common.h b/cpp/src/arrow/io/test-common.h index 438f378085f65..a4974b77528de 100644 --- a/cpp/src/arrow/io/test-common.h +++ b/cpp/src/arrow/io/test-common.h @@ -73,8 +73,8 @@ class MemoryMapFixture { tmp_files_.push_back(path); } - Status InitMemoryMap( - int64_t size, const std::string& path, std::shared_ptr* mmap) { + Status InitMemoryMap(int64_t size, const std::string& path, + std::shared_ptr* mmap) { RETURN_NOT_OK(MemoryMappedFile::Create(path, size, mmap)); tmp_files_.push_back(path); return Status::OK(); diff --git a/cpp/src/arrow/ipc/feather-internal.h b/cpp/src/arrow/ipc/feather-internal.h index 646c3b2f9f2e3..36cfecc0493f5 100644 --- a/cpp/src/arrow/ipc/feather-internal.h +++ b/cpp/src/arrow/ipc/feather-internal.h @@ -49,7 +49,7 @@ struct ARROW_EXPORT ArrayMetadata { ArrayMetadata() {} ArrayMetadata(fbs::Type type, int64_t offset, int64_t length, int64_t null_count, - int64_t total_bytes) + int64_t total_bytes) : type(type), offset(offset), length(length), @@ -135,7 +135,9 @@ class ARROW_EXPORT TableMetadata { bool HasDescription() const { return table_->description() != 0; } std::string GetDescription() const { - if (!HasDescription()) { return std::string(""); } + if (!HasDescription()) { + return std::string(""); + } return table_->description()->str(); } @@ -153,7 +155,7 @@ class ARROW_EXPORT TableMetadata { static inline flatbuffers::Offset GetPrimitiveArray( FBB& fbb, const ArrayMetadata& array) { return fbs::CreatePrimitiveArray(fbb, array.type, fbs::Encoding_PLAIN, array.offset, - array.length, array.null_count, array.total_bytes); + array.length, array.null_count, array.total_bytes); } static inline fbs::TimeUnit ToFlatbufferEnum(TimeUnit::type unit) { diff --git a/cpp/src/arrow/ipc/feather-test.cc b/cpp/src/arrow/ipc/feather-test.cc index 029aae31ff52c..b76b518788b91 100644 --- a/cpp/src/arrow/ipc/feather-test.cc +++ b/cpp/src/arrow/ipc/feather-test.cc @@ -365,8 +365,8 @@ TEST_F(TestTableWriter, TimeTypes) { ArrayFromVector(is_valid, date_values_vec, &date_array); const auto& prim_values = static_cast(*values); - std::vector> buffers = { - prim_values.null_bitmap(), prim_values.values()}; + std::vector> buffers = {prim_values.null_bitmap(), + prim_values.values()}; std::vector> arrays; arrays.push_back(date_array->data()); @@ -400,7 +400,8 @@ TEST_F(TestTableWriter, PrimitiveNullRoundTrip) { ASSERT_OK(reader_->GetColumn(i, &col)); ASSERT_EQ(batch->column_name(i), col->name()); StringArray str_values(batch->column(i)->length(), nullptr, nullptr, - batch->column(i)->null_bitmap(), batch->column(i)->null_count()); + batch->column(i)->null_bitmap(), + batch->column(i)->null_count()); CheckArrays(str_values, *col->data()->chunk(0)); } } diff --git a/cpp/src/arrow/ipc/feather.cc b/cpp/src/arrow/ipc/feather.cc index 61b96e0c1dc3b..54771d3356b83 100644 --- a/cpp/src/arrow/ipc/feather.cc +++ b/cpp/src/arrow/ipc/feather.cc @@ -61,26 +61,30 @@ static int64_t GetOutputLength(int64_t nbytes) { } static Status WritePadded(io::OutputStream* stream, const uint8_t* data, int64_t length, - int64_t* bytes_written) { + int64_t* bytes_written) { RETURN_NOT_OK(stream->Write(data, length)); int64_t remainder = PaddedLength(length) - length; - if (remainder != 0) { RETURN_NOT_OK(stream->Write(kPaddingBytes, remainder)); } + if (remainder != 0) { + RETURN_NOT_OK(stream->Write(kPaddingBytes, remainder)); + } *bytes_written = length + remainder; return Status::OK(); } /// For compability, we need to write any data sometimes just to keep producing /// files that can be read with an older reader. -static Status WritePaddedBlank( - io::OutputStream* stream, int64_t length, int64_t* bytes_written) { +static Status WritePaddedBlank(io::OutputStream* stream, int64_t length, + int64_t* bytes_written) { const uint8_t null = 0; for (int64_t i = 0; i < length; i++) { RETURN_NOT_OK(stream->Write(&null, 1)); } int64_t remainder = PaddedLength(length) - length; - if (remainder != 0) { RETURN_NOT_OK(stream->Write(kPaddingBytes, remainder)); } + if (remainder != 0) { + RETURN_NOT_OK(stream->Write(kPaddingBytes, remainder)); + } *bytes_written = length + remainder; return Status::OK(); } @@ -90,20 +94,22 @@ static Status WritePaddedBlank( TableBuilder::TableBuilder(int64_t num_rows) : finished_(false), num_rows_(num_rows) {} -FBB& TableBuilder::fbb() { - return fbb_; -} +FBB& TableBuilder::fbb() { return fbb_; } Status TableBuilder::Finish() { - if (finished_) { return Status::Invalid("can only call this once"); } + if (finished_) { + return Status::Invalid("can only call this once"); + } FBString desc = 0; - if (!description_.empty()) { desc = fbb_.CreateString(description_); } + if (!description_.empty()) { + desc = fbb_.CreateString(description_); + } flatbuffers::Offset metadata = 0; - auto root = fbs::CreateCTable( - fbb_, desc, num_rows_, fbb_.CreateVector(columns_), kFeatherVersion, metadata); + auto root = fbs::CreateCTable(fbb_, desc, num_rows_, fbb_.CreateVector(columns_), + kFeatherVersion, metadata); fbb_.Finish(root); finished_ = true; @@ -111,17 +117,15 @@ Status TableBuilder::Finish() { } std::shared_ptr TableBuilder::GetBuffer() const { - return std::make_shared( - fbb_.GetBufferPointer(), static_cast(fbb_.GetSize())); + return std::make_shared(fbb_.GetBufferPointer(), + static_cast(fbb_.GetSize())); } void TableBuilder::SetDescription(const std::string& description) { description_ = description; } -void TableBuilder::SetNumRows(int64_t num_rows) { - num_rows_ = num_rows; -} +void TableBuilder::SetNumRows(int64_t num_rows) { num_rows_ = num_rows; } void TableBuilder::add_column(const flatbuffers::Offset& col) { columns_.push_back(col); @@ -177,21 +181,17 @@ Status ColumnBuilder::Finish() { flatbuffers::Offset metadata = CreateColumnMetadata(); auto column = fbs::CreateColumn(buf, buf.CreateString(name_), values, - ToFlatbufferEnum(type_), // metadata_type - metadata, buf.CreateString(user_metadata_)); + ToFlatbufferEnum(type_), // metadata_type + metadata, buf.CreateString(user_metadata_)); // bad coupling, but OK for now parent_->add_column(column); return Status::OK(); } -void ColumnBuilder::SetValues(const ArrayMetadata& values) { - values_ = values; -} +void ColumnBuilder::SetValues(const ArrayMetadata& values) { values_ = values; } -void ColumnBuilder::SetUserMetadata(const std::string& data) { - user_metadata_ = data; -} +void ColumnBuilder::SetUserMetadata(const std::string& data) { user_metadata_ = data; } void ColumnBuilder::SetCategory(const ArrayMetadata& levels, bool ordered) { type_ = ColumnType::CATEGORY; @@ -209,18 +209,14 @@ void ColumnBuilder::SetTimestamp(TimeUnit::type unit, const std::string& timezon meta_timestamp_.timezone = timezone; } -void ColumnBuilder::SetDate() { - type_ = ColumnType::DATE; -} +void ColumnBuilder::SetDate() { type_ = ColumnType::DATE; } void ColumnBuilder::SetTime(TimeUnit::type unit) { type_ = ColumnType::TIME; meta_time_.unit = unit; } -FBB& ColumnBuilder::fbb() { - return *fbb_; -} +FBB& ColumnBuilder::fbb() { return *fbb_; } std::unique_ptr TableBuilder::AddColumn(const std::string& name) { return std::unique_ptr(new ColumnBuilder(this, name)); @@ -272,7 +268,7 @@ class TableReader::TableReaderImpl { } Status GetDataType(const fbs::PrimitiveArray* values, fbs::TypeMetadata metadata_type, - const void* metadata, std::shared_ptr* out) { + const void* metadata, std::shared_ptr* out) { #define PRIMITIVE_CASE(CAP_TYPE, FACTORY_FUNC) \ case fbs::Type_##CAP_TYPE: \ *out = FACTORY_FUNC(); \ @@ -342,7 +338,7 @@ class TableReader::TableReaderImpl { // @returns: a Buffer instance, the precise type will depend on the kind of // input data source (which may or may not have memory-map like semantics) Status LoadValues(const fbs::PrimitiveArray* meta, fbs::TypeMetadata metadata_type, - const void* metadata, std::shared_ptr* out) { + const void* metadata, std::shared_ptr* out) { std::shared_ptr type; RETURN_NOT_OK(GetDataType(meta, metadata_type, metadata, &type)); @@ -394,8 +390,8 @@ class TableReader::TableReaderImpl { // if (user_meta->size() > 0) { user_metadata_ = user_meta->str(); } std::shared_ptr values; - RETURN_NOT_OK(LoadValues( - col_meta->values(), col_meta->metadata_type(), col_meta->metadata(), &values)); + RETURN_NOT_OK(LoadValues(col_meta->values(), col_meta->metadata_type(), + col_meta->metadata(), &values)); out->reset(new Column(col_meta->name()->str(), values)); return Status::OK(); } @@ -410,41 +406,27 @@ class TableReader::TableReaderImpl { // ---------------------------------------------------------------------- // TableReader public API -TableReader::TableReader() { - impl_.reset(new TableReaderImpl()); -} +TableReader::TableReader() { impl_.reset(new TableReaderImpl()); } TableReader::~TableReader() {} Status TableReader::Open(const std::shared_ptr& source, - std::unique_ptr* out) { + std::unique_ptr* out) { out->reset(new TableReader()); return (*out)->impl_->Open(source); } -bool TableReader::HasDescription() const { - return impl_->HasDescription(); -} +bool TableReader::HasDescription() const { return impl_->HasDescription(); } -std::string TableReader::GetDescription() const { - return impl_->GetDescription(); -} +std::string TableReader::GetDescription() const { return impl_->GetDescription(); } -int TableReader::version() const { - return impl_->version(); -} +int TableReader::version() const { return impl_->version(); } -int64_t TableReader::num_rows() const { - return impl_->num_rows(); -} +int64_t TableReader::num_rows() const { return impl_->num_rows(); } -int64_t TableReader::num_columns() const { - return impl_->num_columns(); -} +int64_t TableReader::num_columns() const { return impl_->num_columns(); } -std::string TableReader::GetColumnName(int i) const { - return impl_->GetColumnName(i); -} +std::string TableReader::GetColumnName(int i) const { return impl_->GetColumnName(i); } Status TableReader::GetColumn(int i, std::shared_ptr* out) { return impl_->GetColumn(i, out); @@ -501,8 +483,8 @@ static Status SanitizeUnsupportedTypes(const Array& values, std::shared_ptr( - values.length(), nullptr, nullptr, values.null_bitmap(), values.null_count()); + *out = std::make_shared(values.length(), nullptr, nullptr, + values.null_bitmap(), values.null_count()); return Status::OK(); } else { return MakeArray(values.data(), out); @@ -537,8 +519,8 @@ class TableWriter::TableWriterImpl : public ArrayVisitor { // Footer: metadata length, magic bytes RETURN_NOT_OK( stream_->Write(reinterpret_cast(&buffer_size), sizeof(uint32_t))); - return stream_->Write( - reinterpret_cast(kFeatherMagicBytes), strlen(kFeatherMagicBytes)); + return stream_->Write(reinterpret_cast(kFeatherMagicBytes), + strlen(kFeatherMagicBytes)); } Status LoadArrayMetadata(const Array& values, ArrayMetadata* meta) { @@ -571,7 +553,7 @@ class TableWriter::TableWriterImpl : public ArrayVisitor { // byte boundary, and we write this much data into the stream if (values.null_bitmap()) { RETURN_NOT_OK(WritePadded(stream_.get(), values.null_bitmap()->data(), - values.null_bitmap()->size(), &bytes_written)); + values.null_bitmap()->size(), &bytes_written)); } else { RETURN_NOT_OK(WritePaddedBlank( stream_.get(), BitUtil::BytesForBits(values.length()), &bytes_written)); @@ -592,15 +574,17 @@ class TableWriter::TableWriterImpl : public ArrayVisitor { values_bytes = bin_values.raw_value_offsets()[values.length()]; // Write the variable-length offsets - RETURN_NOT_OK(WritePadded(stream_.get(), - reinterpret_cast(bin_values.raw_value_offsets()), - offset_bytes, &bytes_written)); + RETURN_NOT_OK(WritePadded(stream_.get(), reinterpret_cast( + bin_values.raw_value_offsets()), + offset_bytes, &bytes_written)); } else { RETURN_NOT_OK(WritePaddedBlank(stream_.get(), offset_bytes, &bytes_written)); } meta->total_bytes += bytes_written; - if (bin_values.value_data()) { values_buffer = bin_values.value_data()->data(); } + if (bin_values.value_data()) { + values_buffer = bin_values.value_data()->data(); + } } else { const auto& prim_values = static_cast(values); const auto& fw_type = static_cast(*values.type()); @@ -612,7 +596,9 @@ class TableWriter::TableWriterImpl : public ArrayVisitor { values_bytes = values.length() * fw_type.bit_width() / 8; } - if (prim_values.values()) { values_buffer = prim_values.values()->data(); } + if (prim_values.values()) { + values_buffer = prim_values.values()->data(); + } } if (values_buffer) { RETURN_NOT_OK( @@ -710,9 +696,9 @@ class TableWriter::TableWriterImpl : public ArrayVisitor { Status CheckStarted() { if (!initialized_stream_) { int64_t bytes_written_unused; - RETURN_NOT_OK( - WritePadded(stream_.get(), reinterpret_cast(kFeatherMagicBytes), - strlen(kFeatherMagicBytes), &bytes_written_unused)); + RETURN_NOT_OK(WritePadded(stream_.get(), + reinterpret_cast(kFeatherMagicBytes), + strlen(kFeatherMagicBytes), &bytes_written_unused)); initialized_stream_ = true; } return Status::OK(); @@ -728,33 +714,25 @@ class TableWriter::TableWriterImpl : public ArrayVisitor { Status AppendPrimitive(const PrimitiveArray& values, ArrayMetadata* out); }; -TableWriter::TableWriter() { - impl_.reset(new TableWriterImpl()); -} +TableWriter::TableWriter() { impl_.reset(new TableWriterImpl()); } TableWriter::~TableWriter() {} -Status TableWriter::Open( - const std::shared_ptr& stream, std::unique_ptr* out) { +Status TableWriter::Open(const std::shared_ptr& stream, + std::unique_ptr* out) { out->reset(new TableWriter()); return (*out)->impl_->Open(stream); } -void TableWriter::SetDescription(const std::string& desc) { - impl_->SetDescription(desc); -} +void TableWriter::SetDescription(const std::string& desc) { impl_->SetDescription(desc); } -void TableWriter::SetNumRows(int64_t num_rows) { - impl_->SetNumRows(num_rows); -} +void TableWriter::SetNumRows(int64_t num_rows) { impl_->SetNumRows(num_rows); } Status TableWriter::Append(const std::string& name, const Array& values) { return impl_->Append(name, values); } -Status TableWriter::Finalize() { - return impl_->Finalize(); -} +Status TableWriter::Finalize() { return impl_->Finalize(); } } // namespace feather } // namespace ipc diff --git a/cpp/src/arrow/ipc/feather.h b/cpp/src/arrow/ipc/feather.h index 4d59a8bbd54a9..8abcb5c0f2599 100644 --- a/cpp/src/arrow/ipc/feather.h +++ b/cpp/src/arrow/ipc/feather.h @@ -56,7 +56,7 @@ class ARROW_EXPORT TableReader { ~TableReader(); static Status Open(const std::shared_ptr& source, - std::unique_ptr* out); + std::unique_ptr* out); // Optional table description // @@ -83,8 +83,8 @@ class ARROW_EXPORT TableWriter { public: ~TableWriter(); - static Status Open( - const std::shared_ptr& stream, std::unique_ptr* out); + static Status Open(const std::shared_ptr& stream, + std::unique_ptr* out); void SetDescription(const std::string& desc); void SetNumRows(int64_t num_rows); diff --git a/cpp/src/arrow/ipc/file-to-stream.cc b/cpp/src/arrow/ipc/file-to-stream.cc index a1feedc212618..4707c4fcdf0f7 100644 --- a/cpp/src/arrow/ipc/file-to-stream.cc +++ b/cpp/src/arrow/ipc/file-to-stream.cc @@ -15,11 +15,11 @@ // specific language governing permissions and limitations // under the License. +#include #include "arrow/io/file.h" #include "arrow/ipc/reader.h" #include "arrow/ipc/writer.h" #include "arrow/status.h" -#include #include "arrow/util/io-util.h" diff --git a/cpp/src/arrow/ipc/ipc-json-test.cc b/cpp/src/arrow/ipc/ipc-json-test.cc index 79344df46b243..35264fa02c5ba 100644 --- a/cpp/src/arrow/ipc/ipc-json-test.cc +++ b/cpp/src/arrow/ipc/ipc-json-test.cc @@ -77,7 +77,9 @@ void TestArrayRoundTrip(const Array& array) { rj::Document d; d.Parse(array_as_json); - if (d.HasParseError()) { FAIL() << "JSON parsing failed"; } + if (d.HasParseError()) { + FAIL() << "JSON parsing failed"; + } std::shared_ptr out; ASSERT_OK(internal::ReadArray(default_memory_pool(), d, array.type(), &out)); @@ -88,7 +90,8 @@ void TestArrayRoundTrip(const Array& array) { template void CheckPrimitive(const std::shared_ptr& type, - const std::vector& is_valid, const std::vector& values) { + const std::vector& is_valid, + const std::vector& values) { MemoryPool* pool = default_memory_pool(); typename TypeTraits::BuilderType builder(pool); @@ -108,16 +111,17 @@ void CheckPrimitive(const std::shared_ptr& type, TEST(TestJsonSchemaWriter, FlatTypes) { // TODO // field("f14", date32()) - std::vector> fields = {field("f0", int8()), - field("f1", int16(), false), field("f2", int32()), field("f3", int64(), false), - field("f4", uint8()), field("f5", uint16()), field("f6", uint32()), - field("f7", uint64()), field("f8", float32()), field("f9", float64()), - field("f10", utf8()), field("f11", binary()), field("f12", list(int32())), + std::vector> fields = { + field("f0", int8()), field("f1", int16(), false), field("f2", int32()), + field("f3", int64(), false), field("f4", uint8()), field("f5", uint16()), + field("f6", uint32()), field("f7", uint64()), field("f8", float32()), + field("f9", float64()), field("f10", utf8()), field("f11", binary()), + field("f12", list(int32())), field("f13", struct_({field("s1", int32()), field("s2", utf8())})), field("f15", date64()), field("f16", timestamp(TimeUnit::NANO)), field("f17", time64(TimeUnit::MICRO)), field("f18", union_({field("u1", int8()), field("u2", time32(TimeUnit::MILLI))}, - {0, 1}, UnionMode::DENSE))}; + {0, 1}, UnionMode::DENSE))}; Schema schema(fields); TestSchemaRoundTrip(schema); @@ -185,8 +189,8 @@ TEST(TestJsonArrayWriter, NestedTypes) { struct_({field("f1", int32()), field("f2", int32()), field("f3", int32())}); std::vector> fields = {values_array, values_array, values_array}; - StructArray struct_array( - struct_type, static_cast(struct_is_valid.size()), fields, struct_bitmap, 2); + StructArray struct_array(struct_type, static_cast(struct_is_valid.size()), fields, + struct_bitmap, 2); TestArrayRoundTrip(struct_array); } @@ -202,7 +206,7 @@ TEST(TestJsonArrayWriter, Unions) { // Data generation for test case below void MakeBatchArrays(const std::shared_ptr& schema, const int num_rows, - std::vector>* arrays) { + std::vector>* arrays) { std::vector is_valid; test::random_is_valid(num_rows, 0.25, &is_valid); @@ -266,8 +270,8 @@ TEST(TestJsonFileReadWrite, BasicRoundTrip) { std::unique_ptr reader; - auto buffer = std::make_shared( - reinterpret_cast(result.c_str()), static_cast(result.size())); + auto buffer = std::make_shared(reinterpret_cast(result.c_str()), + static_cast(result.size())); ASSERT_OK(JsonReader::Open(buffer, &reader)); ASSERT_TRUE(reader->schema()->Equals(*schema)); @@ -332,8 +336,8 @@ TEST(TestJsonFileReadWrite, MinimalFormatExample) { } )example"; - auto buffer = std::make_shared( - reinterpret_cast(example), strlen(example)); + auto buffer = std::make_shared(reinterpret_cast(example), + strlen(example)); std::unique_ptr reader; ASSERT_OK(JsonReader::Open(buffer, &reader)); @@ -361,9 +365,9 @@ TEST(TestJsonFileReadWrite, MinimalFormatExample) { #define BATCH_CASES() \ ::testing::Values(&MakeIntRecordBatch, &MakeListRecordBatch, &MakeNonNullRecordBatch, \ - &MakeZeroLengthRecordBatch, &MakeDeeplyNestedList, &MakeStringTypesRecordBatch, \ - &MakeStruct, &MakeUnion, &MakeDates, &MakeTimestamps, &MakeTimes, &MakeFWBinary, \ - &MakeDictionary); + &MakeZeroLengthRecordBatch, &MakeDeeplyNestedList, \ + &MakeStringTypesRecordBatch, &MakeStruct, &MakeUnion, &MakeDates, \ + &MakeTimestamps, &MakeTimes, &MakeFWBinary, &MakeDictionary); class TestJsonRoundTrip : public ::testing::TestWithParam { public: @@ -382,7 +386,7 @@ void CheckRoundtrip(const RecordBatch& batch) { ASSERT_OK(writer->Finish(&result)); auto buffer = std::make_shared(reinterpret_cast(result.c_str()), - static_cast(result.size())); + static_cast(result.size())); std::unique_ptr reader; ASSERT_OK(JsonReader::Open(buffer, &reader)); diff --git a/cpp/src/arrow/ipc/ipc-read-write-benchmark.cc b/cpp/src/arrow/ipc/ipc-read-write-benchmark.cc index c890d829849fd..a88120a248d2d 100644 --- a/cpp/src/arrow/ipc/ipc-read-write-benchmark.cc +++ b/cpp/src/arrow/ipc/ipc-read-write-benchmark.cc @@ -80,7 +80,7 @@ static void BM_WriteRecordBatch(benchmark::State& state) { // NOLINT non-const int32_t metadata_length; int64_t body_length; if (!ipc::WriteRecordBatch(*record_batch, 0, &stream, &metadata_length, &body_length, - default_memory_pool()) + default_memory_pool()) .ok()) { state.SkipWithError("Failed to write!"); } @@ -101,7 +101,7 @@ static void BM_ReadRecordBatch(benchmark::State& state) { // NOLINT non-const r int32_t metadata_length; int64_t body_length; if (!ipc::WriteRecordBatch(*record_batch, 0, &stream, &metadata_length, &body_length, - default_memory_pool()) + default_memory_pool()) .ok()) { state.SkipWithError("Failed to write!"); } diff --git a/cpp/src/arrow/ipc/ipc-read-write-test.cc b/cpp/src/arrow/ipc/ipc-read-write-test.cc index 2119ff74056f2..6c7051750b7cb 100644 --- a/cpp/src/arrow/ipc/ipc-read-write-test.cc +++ b/cpp/src/arrow/ipc/ipc-read-write-test.cc @@ -126,40 +126,45 @@ TEST_F(TestSchemaMetadata, NestedFields) { CheckRoundtrip(schema, &memo); } -#define BATCH_CASES() \ - ::testing::Values(&MakeIntRecordBatch, &MakeListRecordBatch, &MakeNonNullRecordBatch, \ - &MakeZeroLengthRecordBatch, &MakeDeeplyNestedList, &MakeStringTypesRecordBatch, \ - &MakeStruct, &MakeUnion, &MakeDictionary, &MakeDates, &MakeTimestamps, &MakeTimes, \ - &MakeFWBinary, &MakeBooleanBatch); +#define BATCH_CASES() \ + ::testing::Values(&MakeIntRecordBatch, &MakeListRecordBatch, &MakeNonNullRecordBatch, \ + &MakeZeroLengthRecordBatch, &MakeDeeplyNestedList, \ + &MakeStringTypesRecordBatch, &MakeStruct, &MakeUnion, \ + &MakeDictionary, &MakeDates, &MakeTimestamps, &MakeTimes, \ + &MakeFWBinary, &MakeBooleanBatch); static int g_file_number = 0; class IpcTestFixture : public io::MemoryMapFixture { public: Status DoStandardRoundTrip(const RecordBatch& batch, bool zero_data, - std::shared_ptr* batch_result) { + std::shared_ptr* batch_result) { int32_t metadata_length; int64_t body_length; const int64_t buffer_offset = 0; - if (zero_data) { RETURN_NOT_OK(ZeroMemoryMap(mmap_.get())); } + if (zero_data) { + RETURN_NOT_OK(ZeroMemoryMap(mmap_.get())); + } RETURN_NOT_OK(mmap_->Seek(0)); - RETURN_NOT_OK(WriteRecordBatch( - batch, buffer_offset, mmap_.get(), &metadata_length, &body_length, pool_)); + RETURN_NOT_OK(WriteRecordBatch(batch, buffer_offset, mmap_.get(), &metadata_length, + &body_length, pool_)); std::unique_ptr message; RETURN_NOT_OK(ReadMessage(0, metadata_length, mmap_.get(), &message)); io::BufferReader buffer_reader(message->body()); - return ReadRecordBatch( - *message->metadata(), batch.schema(), &buffer_reader, batch_result); + return ReadRecordBatch(*message->metadata(), batch.schema(), &buffer_reader, + batch_result); } - Status DoLargeRoundTrip( - const RecordBatch& batch, bool zero_data, std::shared_ptr* result) { - if (zero_data) { RETURN_NOT_OK(ZeroMemoryMap(mmap_.get())); } + Status DoLargeRoundTrip(const RecordBatch& batch, bool zero_data, + std::shared_ptr* result) { + if (zero_data) { + RETURN_NOT_OK(ZeroMemoryMap(mmap_.get())); + } RETURN_NOT_OK(mmap_->Seek(0)); std::shared_ptr file_writer; @@ -244,8 +249,8 @@ TEST_F(TestIpcRoundTrip, MetadataVersion) { const int64_t buffer_offset = 0; - ASSERT_OK(WriteRecordBatch( - *batch, buffer_offset, mmap_.get(), &metadata_length, &body_length, pool_)); + ASSERT_OK(WriteRecordBatch(*batch, buffer_offset, mmap_.get(), &metadata_length, + &body_length, pool_)); std::unique_ptr message; ASSERT_OK(ReadMessage(0, metadata_length, mmap_.get(), &message)); @@ -258,7 +263,9 @@ TEST_P(TestIpcRoundTrip, SliceRoundTrip) { ASSERT_OK((*GetParam())(&batch)); // NOLINT clang-tidy gtest issue // Skip the zero-length case - if (batch->num_rows() < 2) { return; } + if (batch->num_rows() < 2) { + return; + } auto sliced_batch = batch->Slice(2, 10); CheckRoundtrip(*sliced_batch, 1 << 20); @@ -282,8 +289,9 @@ TEST_P(TestIpcRoundTrip, ZeroLengthArrays) { ASSERT_OK(AllocateBuffer(pool_, sizeof(int32_t), &value_offsets)); *reinterpret_cast(value_offsets->mutable_data()) = 0; - std::shared_ptr bin_array = std::make_shared(0, value_offsets, - std::make_shared(nullptr, 0), std::make_shared(nullptr, 0)); + std::shared_ptr bin_array = std::make_shared( + 0, value_offsets, std::make_shared(nullptr, 0), + std::make_shared(nullptr, 0)); // null value_offsets std::shared_ptr bin_array2 = std::make_shared(0, nullptr, nullptr); @@ -357,8 +365,8 @@ TEST_F(TestWriteRecordBatch, SliceTruncatesBuffers) { std::shared_ptr offsets_buffer; ASSERT_OK( test::CopyBufferFromVector(type_offsets, default_memory_pool(), &offsets_buffer)); - a1 = std::make_shared( - dense_union_type, a0->length(), struct_children, ids_buffer, offsets_buffer); + a1 = std::make_shared(dense_union_type, a0->length(), struct_children, + ids_buffer, offsets_buffer); CheckArray(a1); } @@ -367,8 +375,8 @@ void TestGetRecordBatchSize(std::shared_ptr batch) { int32_t mock_metadata_length = -1; int64_t mock_body_length = -1; int64_t size = -1; - ASSERT_OK(WriteRecordBatch( - *batch, 0, &mock, &mock_metadata_length, &mock_body_length, default_memory_pool())); + ASSERT_OK(WriteRecordBatch(*batch, 0, &mock, &mock_metadata_length, &mock_body_length, + default_memory_pool())); ASSERT_OK(GetRecordBatchSize(*batch, &size)); ASSERT_EQ(mock.GetExtentBytesWritten(), size); } @@ -398,8 +406,8 @@ class RecursionLimits : public ::testing::Test, public io::MemoryMapFixture { void TearDown() { io::MemoryMapFixture::TearDown(); } Status WriteToMmap(int recursion_level, bool override_level, int32_t* metadata_length, - int64_t* body_length, std::shared_ptr* batch, - std::shared_ptr* schema) { + int64_t* body_length, std::shared_ptr* batch, + std::shared_ptr* schema) { const int batch_length = 5; TypePtr type = int32(); std::shared_ptr array; @@ -425,10 +433,10 @@ class RecursionLimits : public ::testing::Test, public io::MemoryMapFixture { if (override_level) { return WriteRecordBatch(**batch, 0, mmap_.get(), metadata_length, body_length, - pool_, recursion_level + 1); + pool_, recursion_level + 1); } else { - return WriteRecordBatch( - **batch, 0, mmap_.get(), metadata_length, body_length, pool_); + return WriteRecordBatch(**batch, 0, mmap_.get(), metadata_length, body_length, + pool_); } } @@ -442,8 +450,8 @@ TEST_F(RecursionLimits, WriteLimit) { int64_t body_length = -1; std::shared_ptr schema; std::shared_ptr batch; - ASSERT_RAISES(Invalid, - WriteToMmap((1 << 8) + 1, false, &metadata_length, &body_length, &batch, &schema)); + ASSERT_RAISES(Invalid, WriteToMmap((1 << 8) + 1, false, &metadata_length, &body_length, + &batch, &schema)); } TEST_F(RecursionLimits, ReadLimit) { @@ -454,8 +462,8 @@ TEST_F(RecursionLimits, ReadLimit) { const int recursion_depth = 64; std::shared_ptr batch; - ASSERT_OK(WriteToMmap( - recursion_depth, true, &metadata_length, &body_length, &batch, &schema)); + ASSERT_OK(WriteToMmap(recursion_depth, true, &metadata_length, &body_length, &batch, + &schema)); std::unique_ptr message; ASSERT_OK(ReadMessage(0, metadata_length, mmap_.get(), &message)); @@ -472,16 +480,16 @@ TEST_F(RecursionLimits, StressLimit) { int64_t body_length = -1; std::shared_ptr schema; std::shared_ptr batch; - ASSERT_OK(WriteToMmap( - recursion_depth, true, &metadata_length, &body_length, &batch, &schema)); + ASSERT_OK(WriteToMmap(recursion_depth, true, &metadata_length, &body_length, &batch, + &schema)); std::unique_ptr message; ASSERT_OK(ReadMessage(0, metadata_length, mmap_.get(), &message)); io::BufferReader reader(message->body()); std::shared_ptr result; - ASSERT_OK(ReadRecordBatch( - *message->metadata(), schema, recursion_depth + 1, &reader, &result)); + ASSERT_OK(ReadRecordBatch(*message->metadata(), schema, recursion_depth + 1, &reader, + &result)); *it_works = result->Equals(*batch); }; @@ -568,8 +576,8 @@ class TestStreamFormat : public ::testing::TestWithParam { } void TearDown() {} - Status RoundTripHelper( - const RecordBatch& batch, std::vector>* out_batches) { + Status RoundTripHelper(const RecordBatch& batch, + std::vector>* out_batches) { // Write the file std::shared_ptr writer; RETURN_NOT_OK(RecordBatchStreamWriter::Open(sink_.get(), batch.schema(), &writer)); @@ -589,7 +597,9 @@ class TestStreamFormat : public ::testing::TestWithParam { std::shared_ptr chunk; while (true) { RETURN_NOT_OK(reader->ReadNextRecordBatch(&chunk)); - if (chunk == nullptr) { break; } + if (chunk == nullptr) { + break; + } out_batches->emplace_back(chunk); } return Status::OK(); @@ -747,8 +757,8 @@ TEST_F(TestTensorRoundTrip, NonContiguous) { int32_t metadata_length; int64_t body_length; ASSERT_OK(mmap_->Seek(0)); - ASSERT_RAISES( - Invalid, WriteTensor(tensor, mmap_.get(), &metadata_length, &body_length)); + ASSERT_RAISES(Invalid, + WriteTensor(tensor, mmap_.get(), &metadata_length, &body_length)); } } // namespace ipc diff --git a/cpp/src/arrow/ipc/json-integration-test.cc b/cpp/src/arrow/ipc/json-integration-test.cc index 18f5dfaf57098..035f7086e7e53 100644 --- a/cpp/src/arrow/ipc/json-integration-test.cc +++ b/cpp/src/arrow/ipc/json-integration-test.cc @@ -40,7 +40,8 @@ DEFINE_string(arrow, "", "Arrow file name"); DEFINE_string(json, "", "JSON file name"); -DEFINE_string(mode, "VALIDATE", +DEFINE_string( + mode, "VALIDATE", "Mode of integration testing tool (ARROW_TO_JSON, JSON_TO_ARROW, VALIDATE)"); DEFINE_bool(integration, false, "Run in integration test mode"); DEFINE_bool(verbose, true, "Verbose output"); @@ -55,8 +56,8 @@ bool file_exists(const char* path) { } // Convert JSON file to IPC binary format -static Status ConvertJsonToArrow( - const std::string& json_path, const std::string& arrow_path) { +static Status ConvertJsonToArrow(const std::string& json_path, + const std::string& arrow_path) { std::shared_ptr in_file; std::shared_ptr out_file; @@ -89,8 +90,8 @@ static Status ConvertJsonToArrow( } // Convert IPC binary format to JSON -static Status ConvertArrowToJson( - const std::string& arrow_path, const std::string& json_path) { +static Status ConvertArrowToJson(const std::string& arrow_path, + const std::string& json_path) { std::shared_ptr in_file; std::shared_ptr out_file; @@ -116,11 +117,11 @@ static Status ConvertArrowToJson( std::string result; RETURN_NOT_OK(writer->Finish(&result)); return out_file->Write(reinterpret_cast(result.c_str()), - static_cast(result.size())); + static_cast(result.size())); } -static Status ValidateArrowVsJson( - const std::string& arrow_path, const std::string& json_path) { +static Status ValidateArrowVsJson(const std::string& arrow_path, + const std::string& json_path) { // Construct JSON reader std::shared_ptr json_file; RETURN_NOT_OK(io::ReadableFile::Open(json_path, &json_file)); @@ -151,7 +152,9 @@ static Status ValidateArrowVsJson( << "Arrow schema: \n" << arrow_schema->ToString(); - if (FLAGS_verbose) { std::cout << ss.str() << std::endl; } + if (FLAGS_verbose) { + std::cout << ss.str() << std::endl; + } return Status::Invalid("Schemas did not match"); } @@ -188,10 +191,14 @@ static Status ValidateArrowVsJson( } Status RunCommand(const std::string& json_path, const std::string& arrow_path, - const std::string& command) { - if (json_path == "") { return Status::Invalid("Must specify json file name"); } + const std::string& command) { + if (json_path == "") { + return Status::Invalid("Must specify json file name"); + } - if (arrow_path == "") { return Status::Invalid("Must specify arrow file name"); } + if (arrow_path == "") { + return Status::Invalid("Must specify arrow file name"); + } if (command == "ARROW_TO_JSON") { if (!file_exists(arrow_path.c_str())) { @@ -240,8 +247,8 @@ class TestJSONIntegration : public ::testing::Test { do { std::shared_ptr out; RETURN_NOT_OK(io::FileOutputStream::Open(path, &out)); - RETURN_NOT_OK(out->Write( - reinterpret_cast(data), static_cast(strlen(data)))); + RETURN_NOT_OK(out->Write(reinterpret_cast(data), + static_cast(strlen(data)))); } while (0); return Status::OK(); } diff --git a/cpp/src/arrow/ipc/json-internal.cc b/cpp/src/arrow/ipc/json-internal.cc index 69e4ae8d14a04..175d75b7d1e97 100644 --- a/cpp/src/arrow/ipc/json-internal.cc +++ b/cpp/src/arrow/ipc/json-internal.cc @@ -199,7 +199,7 @@ class SchemaWriter { typename std::enable_if::value || std::is_base_of::value || std::is_base_of::value, - void>::type + void>::type WriteTypeMetadata(const T& type) {} void WriteTypeMetadata(const Integer& type) { @@ -508,7 +508,7 @@ class ArrayWriter { } Status WriteChildren(const std::vector>& fields, - const std::vector>& arrays) { + const std::vector>& arrays) { writer_->Key("children"); writer_->StartArray(); for (size_t i = 0; i < fields.size(); ++i) { @@ -602,16 +602,16 @@ static Status GetObjectBool(const RjObject& obj, const std::string& key, bool* o return Status::OK(); } -static Status GetObjectString( - const RjObject& obj, const std::string& key, std::string* out) { +static Status GetObjectString(const RjObject& obj, const std::string& key, + std::string* out) { const auto& it = obj.FindMember(key); RETURN_NOT_STRING(key, it, obj); *out = it->value.GetString(); return Status::OK(); } -static Status GetInteger( - const rj::Value::ConstObject& json_type, std::shared_ptr* type) { +static Status GetInteger(const rj::Value::ConstObject& json_type, + std::shared_ptr* type) { const auto& it_bit_width = json_type.FindMember("bitWidth"); RETURN_NOT_INT("bitWidth", it_bit_width, json_type); @@ -642,8 +642,8 @@ static Status GetInteger( return Status::OK(); } -static Status GetFloatingPoint( - const RjObject& json_type, std::shared_ptr* type) { +static Status GetFloatingPoint(const RjObject& json_type, + std::shared_ptr* type) { const auto& it_precision = json_type.FindMember("precision"); RETURN_NOT_STRING("precision", it_precision, json_type); @@ -663,8 +663,8 @@ static Status GetFloatingPoint( return Status::OK(); } -static Status GetFixedSizeBinary( - const RjObject& json_type, std::shared_ptr* type) { +static Status GetFixedSizeBinary(const RjObject& json_type, + std::shared_ptr* type) { const auto& it_byte_width = json_type.FindMember("byteWidth"); RETURN_NOT_INT("byteWidth", it_byte_width, json_type); @@ -756,8 +756,8 @@ static Status GetTimestamp(const RjObject& json_type, std::shared_ptr* } static Status GetUnion(const RjObject& json_type, - const std::vector>& children, - std::shared_ptr* type) { + const std::vector>& children, + std::shared_ptr* type) { const auto& it_mode = json_type.FindMember("mode"); RETURN_NOT_STRING("mode", it_mode, json_type); @@ -790,8 +790,8 @@ static Status GetUnion(const RjObject& json_type, } static Status GetType(const RjObject& json_type, - const std::vector>& children, - std::shared_ptr* type) { + const std::vector>& children, + std::shared_ptr* type) { const auto& it_type_name = json_type.FindMember("name"); RETURN_NOT_STRING("name", it_type_name, json_type); @@ -831,10 +831,11 @@ static Status GetType(const RjObject& json_type, } static Status GetField(const rj::Value& obj, const DictionaryMemo* dictionary_memo, - std::shared_ptr* field); + std::shared_ptr* field); static Status GetFieldsFromArray(const rj::Value& obj, - const DictionaryMemo* dictionary_memo, std::vector>* fields) { + const DictionaryMemo* dictionary_memo, + std::vector>* fields) { const auto& values = obj.GetArray(); fields->resize(values.Size()); @@ -845,7 +846,7 @@ static Status GetFieldsFromArray(const rj::Value& obj, } static Status ParseDictionary(const RjObject& obj, int64_t* id, bool* is_ordered, - std::shared_ptr* index_type) { + std::shared_ptr* index_type) { int32_t int32_id; RETURN_NOT_OK(GetObjectInt(obj, "id", &int32_id)); *id = int32_id; @@ -866,8 +867,10 @@ static Status ParseDictionary(const RjObject& obj, int64_t* id, bool* is_ordered } static Status GetField(const rj::Value& obj, const DictionaryMemo* dictionary_memo, - std::shared_ptr* field) { - if (!obj.IsObject()) { return Status::Invalid("Field was not a JSON object"); } + std::shared_ptr* field) { + if (!obj.IsObject()) { + return Status::Invalid("Field was not a JSON object"); + } const auto& json_field = obj.GetObject(); std::string name; @@ -884,8 +887,8 @@ static Status GetField(const rj::Value& obj, const DictionaryMemo* dictionary_me int64_t dictionary_id; bool is_ordered; std::shared_ptr index_type; - RETURN_NOT_OK(ParseDictionary( - it_dictionary->value.GetObject(), &dictionary_id, &is_ordered, &index_type)); + RETURN_NOT_OK(ParseDictionary(it_dictionary->value.GetObject(), &dictionary_id, + &is_ordered, &index_type)); std::shared_ptr dictionary; RETURN_NOT_OK(dictionary_memo->GetDictionary(dictionary_id, &dictionary)); @@ -941,13 +944,13 @@ UnboxValue(const rj::Value& val) { class ArrayReader { public: explicit ArrayReader(const rj::Value& json_array, const std::shared_ptr& type, - MemoryPool* pool) + MemoryPool* pool) : json_array_(json_array), type_(type), pool_(pool) {} Status ParseTypeValues(const DataType& type); Status GetValidityBuffer(const std::vector& is_valid, int32_t* null_count, - std::shared_ptr* validity_buffer) { + std::shared_ptr* validity_buffer) { int length = static_cast(is_valid.size()); std::shared_ptr out_buffer; @@ -1024,7 +1027,9 @@ class ArrayReader { DCHECK(hex_string.size() % 2 == 0) << "Expected base16 hex string"; int32_t length = static_cast(hex_string.size()) / 2; - if (byte_buffer->size() < length) { RETURN_NOT_OK(byte_buffer->Resize(length)); } + if (byte_buffer->size() < length) { + RETURN_NOT_OK(byte_buffer->Resize(length)); + } const char* hex_data = hex_string.c_str(); uint8_t* byte_buffer_data = byte_buffer->mutable_data(); @@ -1078,8 +1083,8 @@ class ArrayReader { } template - Status GetIntArray( - const RjArray& json_array, const int32_t length, std::shared_ptr* out) { + Status GetIntArray(const RjArray& json_array, const int32_t length, + std::shared_ptr* out) { std::shared_ptr buffer; RETURN_NOT_OK(AllocateBuffer(pool_, length * sizeof(T), &buffer)); @@ -1102,15 +1107,15 @@ class ArrayReader { const auto& json_offsets = obj_->FindMember("OFFSET"); RETURN_NOT_ARRAY("OFFSET", json_offsets, *obj_); std::shared_ptr offsets_buffer; - RETURN_NOT_OK(GetIntArray( - json_offsets->value.GetArray(), length_ + 1, &offsets_buffer)); + RETURN_NOT_OK(GetIntArray(json_offsets->value.GetArray(), length_ + 1, + &offsets_buffer)); std::vector> children; RETURN_NOT_OK(GetChildren(*obj_, type, &children)); DCHECK_EQ(children.size(), 1); - result_ = std::make_shared( - type_, length_, offsets_buffer, children[0], validity_buffer, null_count); + result_ = std::make_shared(type_, length_, offsets_buffer, children[0], + validity_buffer, null_count); return Status::OK(); } @@ -1123,8 +1128,8 @@ class ArrayReader { std::vector> fields; RETURN_NOT_OK(GetChildren(*obj_, type, &fields)); - result_ = std::make_shared( - type_, length_, fields, validity_buffer, null_count); + result_ = std::make_shared(type_, length_, fields, validity_buffer, + null_count); return Status::OK(); } @@ -1154,7 +1159,7 @@ class ArrayReader { RETURN_NOT_OK(GetChildren(*obj_, type, &children)); result_ = std::make_shared(type_, length_, children, type_id_buffer, - offsets_buffer, validity_buffer, null_count); + offsets_buffer, validity_buffer, null_count); return Status::OK(); } @@ -1177,7 +1182,7 @@ class ArrayReader { } Status GetChildren(const RjObject& obj, const DataType& type, - std::vector>* array) { + std::vector>* array) { const auto& json_children = obj.FindMember("children"); RETURN_NOT_ARRAY("children", json_children, obj); const auto& json_children_arr = json_children->value.GetArray(); @@ -1280,7 +1285,8 @@ static Status GetDictionaryTypes(const RjArray& fields, DictionaryTypeMap* id_to } static Status ReadDictionary(const RjObject& obj, const DictionaryTypeMap& id_to_field, - MemoryPool* pool, int64_t* dictionary_id, std::shared_ptr* out) { + MemoryPool* pool, int64_t* dictionary_id, + std::shared_ptr* out) { int id; RETURN_NOT_OK(GetObjectInt(obj, "id", &id)); @@ -1312,7 +1318,7 @@ static Status ReadDictionary(const RjObject& obj, const DictionaryTypeMap& id_to } static Status ReadDictionaries(const rj::Value& doc, const DictionaryTypeMap& id_to_field, - MemoryPool* pool, DictionaryMemo* dictionary_memo) { + MemoryPool* pool, DictionaryMemo* dictionary_memo) { auto it = doc.FindMember("dictionaries"); if (it == doc.MemberEnd()) { // No dictionaries @@ -1334,8 +1340,8 @@ static Status ReadDictionaries(const rj::Value& doc, const DictionaryTypeMap& id return Status::OK(); } -Status ReadSchema( - const rj::Value& json_schema, MemoryPool* pool, std::shared_ptr* schema) { +Status ReadSchema(const rj::Value& json_schema, MemoryPool* pool, + std::shared_ptr* schema) { auto it = json_schema.FindMember("schema"); RETURN_NOT_OBJECT("schema", it, json_schema); const auto& obj_schema = it->value.GetObject(); @@ -1359,7 +1365,7 @@ Status ReadSchema( } Status ReadRecordBatch(const rj::Value& json_obj, const std::shared_ptr& schema, - MemoryPool* pool, std::shared_ptr* batch) { + MemoryPool* pool, std::shared_ptr* batch) { DCHECK(json_obj.IsObject()); const auto& batch_obj = json_obj.GetObject(); @@ -1409,14 +1415,16 @@ Status WriteArray(const std::string& name, const Array& array, RjWriter* json_wr } Status ReadArray(MemoryPool* pool, const rj::Value& json_array, - const std::shared_ptr& type, std::shared_ptr* array) { + const std::shared_ptr& type, std::shared_ptr* array) { ArrayReader converter(json_array, type, pool); return converter.GetArray(array); } Status ReadArray(MemoryPool* pool, const rj::Value& json_array, const Schema& schema, - std::shared_ptr* array) { - if (!json_array.IsObject()) { return Status::Invalid("Element was not a JSON object"); } + std::shared_ptr* array) { + if (!json_array.IsObject()) { + return Status::Invalid("Element was not a JSON object"); + } const auto& json_obj = json_array.GetObject(); diff --git a/cpp/src/arrow/ipc/json-internal.h b/cpp/src/arrow/ipc/json-internal.h index 5571d9233969c..9b641cd53329b 100644 --- a/cpp/src/arrow/ipc/json-internal.h +++ b/cpp/src/arrow/ipc/json-internal.h @@ -99,17 +99,17 @@ Status WriteSchema(const Schema& schema, RjWriter* writer); Status WriteRecordBatch(const RecordBatch& batch, RjWriter* writer); Status WriteArray(const std::string& name, const Array& array, RjWriter* writer); -Status ReadSchema( - const rj::Value& json_obj, MemoryPool* pool, std::shared_ptr* schema); +Status ReadSchema(const rj::Value& json_obj, MemoryPool* pool, + std::shared_ptr* schema); Status ReadRecordBatch(const rj::Value& json_obj, const std::shared_ptr& schema, - MemoryPool* pool, std::shared_ptr* batch); + MemoryPool* pool, std::shared_ptr* batch); Status ReadArray(MemoryPool* pool, const rj::Value& json_obj, - const std::shared_ptr& type, std::shared_ptr* array); + const std::shared_ptr& type, std::shared_ptr* array); Status ReadArray(MemoryPool* pool, const rj::Value& json_obj, const Schema& schema, - std::shared_ptr* array); + std::shared_ptr* array); } // namespace internal } // namespace json diff --git a/cpp/src/arrow/ipc/json.cc b/cpp/src/arrow/ipc/json.cc index 36e343e5fb5bc..f57101a31a97d 100644 --- a/cpp/src/arrow/ipc/json.cc +++ b/cpp/src/arrow/ipc/json.cc @@ -79,15 +79,13 @@ JsonWriter::JsonWriter(const std::shared_ptr& schema) { JsonWriter::~JsonWriter() {} -Status JsonWriter::Open( - const std::shared_ptr& schema, std::unique_ptr* writer) { +Status JsonWriter::Open(const std::shared_ptr& schema, + std::unique_ptr* writer) { *writer = std::unique_ptr(new JsonWriter(schema)); return (*writer)->impl_->Start(); } -Status JsonWriter::Finish(std::string* result) { - return impl_->Finish(result); -} +Status JsonWriter::Finish(std::string* result) { return impl_->Finish(result); } Status JsonWriter::WriteRecordBatch(const RecordBatch& batch) { return impl_->WriteRecordBatch(batch); @@ -103,8 +101,10 @@ class JsonReader::JsonReaderImpl { Status ParseAndReadSchema() { doc_.Parse(reinterpret_cast(data_->data()), - static_cast(data_->size())); - if (doc_.HasParseError()) { return Status::IOError("JSON parsing failed"); } + static_cast(data_->size())); + if (doc_.HasParseError()) { + return Status::IOError("JSON parsing failed"); + } RETURN_NOT_OK(json::internal::ReadSchema(doc_, pool_, &schema_)); @@ -120,8 +120,8 @@ class JsonReader::JsonReaderImpl { DCHECK_LT(i, static_cast(record_batches_->GetArray().Size())) << "i out of bounds"; - return json::internal::ReadRecordBatch( - record_batches_->GetArray()[i], schema_, pool_, batch); + return json::internal::ReadRecordBatch(record_batches_->GetArray()[i], schema_, pool_, + batch); } std::shared_ptr schema() const { return schema_; } @@ -145,24 +145,20 @@ JsonReader::JsonReader(MemoryPool* pool, const std::shared_ptr& data) { JsonReader::~JsonReader() {} -Status JsonReader::Open( - const std::shared_ptr& data, std::unique_ptr* reader) { +Status JsonReader::Open(const std::shared_ptr& data, + std::unique_ptr* reader) { return Open(default_memory_pool(), data, reader); } Status JsonReader::Open(MemoryPool* pool, const std::shared_ptr& data, - std::unique_ptr* reader) { + std::unique_ptr* reader) { *reader = std::unique_ptr(new JsonReader(pool, data)); return (*reader)->impl_->ParseAndReadSchema(); } -std::shared_ptr JsonReader::schema() const { - return impl_->schema(); -} +std::shared_ptr JsonReader::schema() const { return impl_->schema(); } -int JsonReader::num_record_batches() const { - return impl_->num_record_batches(); -} +int JsonReader::num_record_batches() const { return impl_->num_record_batches(); } Status JsonReader::ReadRecordBatch(int i, std::shared_ptr* batch) const { return impl_->ReadRecordBatch(i, batch); diff --git a/cpp/src/arrow/ipc/json.h b/cpp/src/arrow/ipc/json.h index 2ba27c7f2c37d..be26f0233ebeb 100644 --- a/cpp/src/arrow/ipc/json.h +++ b/cpp/src/arrow/ipc/json.h @@ -41,8 +41,8 @@ class ARROW_EXPORT JsonWriter { public: ~JsonWriter(); - static Status Open( - const std::shared_ptr& schema, std::unique_ptr* out); + static Status Open(const std::shared_ptr& schema, + std::unique_ptr* out); Status WriteRecordBatch(const RecordBatch& batch); Status Finish(std::string* result); @@ -61,11 +61,11 @@ class ARROW_EXPORT JsonReader { ~JsonReader(); static Status Open(MemoryPool* pool, const std::shared_ptr& data, - std::unique_ptr* reader); + std::unique_ptr* reader); // Use the default memory pool - static Status Open( - const std::shared_ptr& data, std::unique_ptr* reader); + static Status Open(const std::shared_ptr& data, + std::unique_ptr* reader); std::shared_ptr schema() const; diff --git a/cpp/src/arrow/ipc/metadata.cc b/cpp/src/arrow/ipc/metadata.cc index 49c24c72727b7..20fd280db6de6 100644 --- a/cpp/src/arrow/ipc/metadata.cc +++ b/cpp/src/arrow/ipc/metadata.cc @@ -58,8 +58,8 @@ static constexpr flatbuf::MetadataVersion kCurrentMetadataVersion = static constexpr flatbuf::MetadataVersion kMinMetadataVersion = flatbuf::MetadataVersion_V3; -static Status IntFromFlatbuffer( - const flatbuf::Int* int_data, std::shared_ptr* out) { +static Status IntFromFlatbuffer(const flatbuf::Int* int_data, + std::shared_ptr* out) { if (int_data->bitWidth() > 64) { return Status::NotImplemented("Integers with more than 64 bits not implemented"); } @@ -86,8 +86,8 @@ static Status IntFromFlatbuffer( return Status::OK(); } -static Status FloatFromFlatuffer( - const flatbuf::FloatingPoint* float_data, std::shared_ptr* out) { +static Status FloatFromFlatuffer(const flatbuf::FloatingPoint* float_data, + std::shared_ptr* out) { if (float_data->precision() == flatbuf::Precision_HALF) { *out = float16(); } else if (float_data->precision() == flatbuf::Precision_SINGLE) { @@ -100,7 +100,7 @@ static Status FloatFromFlatuffer( // Forward declaration static Status FieldToFlatbuffer(FBB& fbb, const std::shared_ptr& field, - DictionaryMemo* dictionary_memo, FieldOffset* offset); + DictionaryMemo* dictionary_memo, FieldOffset* offset); static Offset IntToFlatbuffer(FBB& fbb, int bitWidth, bool is_signed) { return flatbuf::CreateInt(fbb, bitWidth, is_signed).Union(); @@ -111,7 +111,8 @@ static Offset FloatToFlatbuffer(FBB& fbb, flatbuf::Precision precision) { } static Status AppendChildFields(FBB& fbb, const std::shared_ptr& type, - std::vector* out_children, DictionaryMemo* dictionary_memo) { + std::vector* out_children, + DictionaryMemo* dictionary_memo) { FieldOffset field; for (int i = 0; i < type->num_children(); ++i) { RETURN_NOT_OK(FieldToFlatbuffer(fbb, type->child(i), dictionary_memo, &field)); @@ -121,16 +122,16 @@ static Status AppendChildFields(FBB& fbb, const std::shared_ptr& type, } static Status ListToFlatbuffer(FBB& fbb, const std::shared_ptr& type, - std::vector* out_children, DictionaryMemo* dictionary_memo, - Offset* offset) { + std::vector* out_children, + DictionaryMemo* dictionary_memo, Offset* offset) { RETURN_NOT_OK(AppendChildFields(fbb, type, out_children, dictionary_memo)); *offset = flatbuf::CreateList(fbb).Union(); return Status::OK(); } static Status StructToFlatbuffer(FBB& fbb, const std::shared_ptr& type, - std::vector* out_children, DictionaryMemo* dictionary_memo, - Offset* offset) { + std::vector* out_children, + DictionaryMemo* dictionary_memo, Offset* offset) { RETURN_NOT_OK(AppendChildFields(fbb, type, out_children, dictionary_memo)); *offset = flatbuf::CreateStruct_(fbb).Union(); return Status::OK(); @@ -140,7 +141,8 @@ static Status StructToFlatbuffer(FBB& fbb, const std::shared_ptr& type // Union implementation static Status UnionFromFlatbuffer(const flatbuf::Union* union_data, - const std::vector>& children, std::shared_ptr* out) { + const std::vector>& children, + std::shared_ptr* out) { UnionMode mode = union_data->mode() == flatbuf::UnionMode_Sparse ? UnionMode::SPARSE : UnionMode::DENSE; @@ -163,8 +165,8 @@ static Status UnionFromFlatbuffer(const flatbuf::Union* union_data, } static Status UnionToFlatBuffer(FBB& fbb, const std::shared_ptr& type, - std::vector* out_children, DictionaryMemo* dictionary_memo, - Offset* offset) { + std::vector* out_children, + DictionaryMemo* dictionary_memo, Offset* offset) { RETURN_NOT_OK(AppendChildFields(fbb, type, out_children, dictionary_memo)); const auto& union_type = static_cast(*type); @@ -224,15 +226,16 @@ static inline TimeUnit::type FromFlatbufferUnit(flatbuf::TimeUnit unit) { } static Status TypeFromFlatbuffer(flatbuf::Type type, const void* type_data, - const std::vector>& children, std::shared_ptr* out) { + const std::vector>& children, + std::shared_ptr* out) { switch (type) { case flatbuf::Type_NONE: return Status::Invalid("Type metadata cannot be none"); case flatbuf::Type_Int: return IntFromFlatbuffer(static_cast(type_data), out); case flatbuf::Type_FloatingPoint: - return FloatFromFlatuffer( - static_cast(type_data), out); + return FloatFromFlatuffer(static_cast(type_data), + out); case flatbuf::Type_Binary: *out = binary(); return Status::OK(); @@ -301,8 +304,8 @@ static Status TypeFromFlatbuffer(flatbuf::Type type, const void* type_data, *out = std::make_shared(children); return Status::OK(); case flatbuf::Type_Union: - return UnionFromFlatbuffer( - static_cast(type_data), children, out); + return UnionFromFlatbuffer(static_cast(type_data), children, + out); default: return Status::Invalid("Unrecognized type"); } @@ -310,15 +313,17 @@ static Status TypeFromFlatbuffer(flatbuf::Type type, const void* type_data, // TODO(wesm): Convert this to visitor pattern static Status TypeToFlatbuffer(FBB& fbb, const std::shared_ptr& type, - std::vector* children, std::vector* layout, - flatbuf::Type* out_type, DictionaryMemo* dictionary_memo, Offset* offset) { + std::vector* children, + std::vector* layout, + flatbuf::Type* out_type, DictionaryMemo* dictionary_memo, + Offset* offset) { if (type->id() == Type::DICTIONARY) { // In this library, the dictionary "type" is a logical construct. Here we // pass through to the value type, as we've already captured the index // type in the DictionaryEncoding metadata in the parent field const auto& dict_type = static_cast(*type); return TypeToFlatbuffer(fbb, dict_type.dictionary()->type(), children, layout, - out_type, dictionary_memo, offset); + out_type, dictionary_memo, offset); } std::vector buffer_layout = type->GetBufferLayout(); @@ -436,7 +441,7 @@ static Status TypeToFlatbuffer(FBB& fbb, const std::shared_ptr& type, } static Status TensorTypeToFlatbuffer(FBB& fbb, const std::shared_ptr& type, - flatbuf::Type* out_type, Offset* offset) { + flatbuf::Type* out_type, Offset* offset) { switch (type->id()) { case Type::UINT8: INT_TO_FB_CASE(8, false); @@ -475,8 +480,8 @@ static Status TensorTypeToFlatbuffer(FBB& fbb, const std::shared_ptr& return Status::OK(); } -static DictionaryOffset GetDictionaryEncoding( - FBB& fbb, const DictionaryType& type, DictionaryMemo* memo) { +static DictionaryOffset GetDictionaryEncoding(FBB& fbb, const DictionaryType& type, + DictionaryMemo* memo) { int64_t dictionary_id = memo->GetId(type.dictionary()); // We assume that the dictionary index type (as an integer) has already been @@ -491,7 +496,7 @@ static DictionaryOffset GetDictionaryEncoding( } static Status FieldToFlatbuffer(FBB& fbb, const std::shared_ptr& field, - DictionaryMemo* dictionary_memo, FieldOffset* offset) { + DictionaryMemo* dictionary_memo, FieldOffset* offset) { auto fb_name = fbb.CreateString(field->name()); flatbuf::Type type_enum; @@ -500,8 +505,8 @@ static Status FieldToFlatbuffer(FBB& fbb, const std::shared_ptr& field, std::vector children; std::vector layout; - RETURN_NOT_OK(TypeToFlatbuffer( - fbb, field->type(), &children, &layout, &type_enum, dictionary_memo, &type_offset)); + RETURN_NOT_OK(TypeToFlatbuffer(fbb, field->type(), &children, &layout, &type_enum, + dictionary_memo, &type_offset)); auto fb_children = fbb.CreateVector(children); auto fb_layout = fbb.CreateVector(layout); @@ -513,13 +518,14 @@ static Status FieldToFlatbuffer(FBB& fbb, const std::shared_ptr& field, // TODO: produce the list of VectorTypes *offset = flatbuf::CreateField(fbb, fb_name, field->nullable(), type_enum, type_offset, - dictionary, fb_children, fb_layout); + dictionary, fb_children, fb_layout); return Status::OK(); } static Status FieldFromFlatbuffer(const flatbuf::Field* field, - const DictionaryMemo& dictionary_memo, std::shared_ptr* out) { + const DictionaryMemo& dictionary_memo, + std::shared_ptr* out) { std::shared_ptr type; const flatbuf::DictionaryEncoding* encoding = field->dictionary(); @@ -551,8 +557,8 @@ static Status FieldFromFlatbuffer(const flatbuf::Field* field, return Status::OK(); } -static Status FieldFromFlatbufferDictionary( - const flatbuf::Field* field, std::shared_ptr* out) { +static Status FieldFromFlatbufferDictionary(const flatbuf::Field* field, + std::shared_ptr* out) { // Need an empty memo to pass down for constructing children DictionaryMemo dummy_memo; @@ -584,7 +590,8 @@ flatbuf::Endianness endianness() { } static Status SchemaToFlatbuffer(FBB& fbb, const Schema& schema, - DictionaryMemo* dictionary_memo, flatbuffers::Offset* out) { + DictionaryMemo* dictionary_memo, + flatbuffers::Offset* out) { /// Fields std::vector field_offsets; for (int i = 0; i < schema.num_fields(); ++i) { @@ -609,8 +616,8 @@ static Status SchemaToFlatbuffer(FBB& fbb, const Schema& schema, key_value_offsets.push_back( flatbuf::CreateKeyValue(fbb, fbb.CreateString(key), fbb.CreateString(value))); } - *out = flatbuf::CreateSchema( - fbb, endianness(), fb_offsets, fbb.CreateVector(key_value_offsets)); + *out = flatbuf::CreateSchema(fbb, endianness(), fb_offsets, + fbb.CreateVector(key_value_offsets)); } else { *out = flatbuf::CreateSchema(fbb, endianness(), fb_offsets); } @@ -631,15 +638,16 @@ static Status WriteFlatbufferBuilder(FBB& fbb, std::shared_ptr* out) { } static Status WriteFBMessage(FBB& fbb, flatbuf::MessageHeader header_type, - flatbuffers::Offset header, int64_t body_length, std::shared_ptr* out) { - auto message = flatbuf::CreateMessage( - fbb, kCurrentMetadataVersion, header_type, header, body_length); + flatbuffers::Offset header, int64_t body_length, + std::shared_ptr* out) { + auto message = flatbuf::CreateMessage(fbb, kCurrentMetadataVersion, header_type, header, + body_length); fbb.Finish(message); return WriteFlatbufferBuilder(fbb, out); } -Status WriteSchemaMessage( - const Schema& schema, DictionaryMemo* dictionary_memo, std::shared_ptr* out) { +Status WriteSchemaMessage(const Schema& schema, DictionaryMemo* dictionary_memo, + std::shared_ptr* out) { FBB fbb; flatbuffers::Offset fb_schema; RETURN_NOT_OK(SchemaToFlatbuffer(fbb, schema, dictionary_memo, &fb_schema)); @@ -650,8 +658,8 @@ using FieldNodeVector = flatbuffers::Offset>; using BufferVector = flatbuffers::Offset>; -static Status WriteFieldNodes( - FBB& fbb, const std::vector& nodes, FieldNodeVector* out) { +static Status WriteFieldNodes(FBB& fbb, const std::vector& nodes, + FieldNodeVector* out) { std::vector fb_nodes; fb_nodes.reserve(nodes.size()); @@ -666,8 +674,8 @@ static Status WriteFieldNodes( return Status::OK(); } -static Status WriteBuffers( - FBB& fbb, const std::vector& buffers, BufferVector* out) { +static Status WriteBuffers(FBB& fbb, const std::vector& buffers, + BufferVector* out) { std::vector fb_buffers; fb_buffers.reserve(buffers.size()); @@ -680,8 +688,9 @@ static Status WriteBuffers( } static Status MakeRecordBatch(FBB& fbb, int64_t length, int64_t body_length, - const std::vector& nodes, const std::vector& buffers, - RecordBatchOffset* offset) { + const std::vector& nodes, + const std::vector& buffers, + RecordBatchOffset* offset) { FieldNodeVector fb_nodes; BufferVector fb_buffers; @@ -693,17 +702,18 @@ static Status MakeRecordBatch(FBB& fbb, int64_t length, int64_t body_length, } Status WriteRecordBatchMessage(int64_t length, int64_t body_length, - const std::vector& nodes, const std::vector& buffers, - std::shared_ptr* out) { + const std::vector& nodes, + const std::vector& buffers, + std::shared_ptr* out) { FBB fbb; RecordBatchOffset record_batch; RETURN_NOT_OK(MakeRecordBatch(fbb, length, body_length, nodes, buffers, &record_batch)); - return WriteFBMessage( - fbb, flatbuf::MessageHeader_RecordBatch, record_batch.Union(), body_length, out); + return WriteFBMessage(fbb, flatbuf::MessageHeader_RecordBatch, record_batch.Union(), + body_length, out); } -Status WriteTensorMessage( - const Tensor& tensor, int64_t buffer_start_offset, std::shared_ptr* out) { +Status WriteTensorMessage(const Tensor& tensor, int64_t buffer_start_offset, + std::shared_ptr* out) { using TensorDimOffset = flatbuffers::Offset; using TensorOffset = flatbuffers::Offset; @@ -727,19 +737,20 @@ Status WriteTensorMessage( TensorOffset fb_tensor = flatbuf::CreateTensor(fbb, fb_type_type, fb_type, fb_shape, fb_strides, &buffer); - return WriteFBMessage( - fbb, flatbuf::MessageHeader_Tensor, fb_tensor.Union(), body_length, out); + return WriteFBMessage(fbb, flatbuf::MessageHeader_Tensor, fb_tensor.Union(), + body_length, out); } Status WriteDictionaryMessage(int64_t id, int64_t length, int64_t body_length, - const std::vector& nodes, const std::vector& buffers, - std::shared_ptr* out) { + const std::vector& nodes, + const std::vector& buffers, + std::shared_ptr* out) { FBB fbb; RecordBatchOffset record_batch; RETURN_NOT_OK(MakeRecordBatch(fbb, length, body_length, nodes, buffers, &record_batch)); auto dictionary_batch = flatbuf::CreateDictionaryBatch(fbb, id, record_batch).Union(); - return WriteFBMessage( - fbb, flatbuf::MessageHeader_DictionaryBatch, dictionary_batch, body_length, out); + return WriteFBMessage(fbb, flatbuf::MessageHeader_DictionaryBatch, dictionary_batch, + body_length, out); } static flatbuffers::Offset> @@ -754,8 +765,8 @@ FileBlocksToFlatbuffer(FBB& fbb, const std::vector& blocks) { } Status WriteFileFooter(const Schema& schema, const std::vector& dictionaries, - const std::vector& record_batches, DictionaryMemo* dictionary_memo, - io::OutputStream* out) { + const std::vector& record_batches, + DictionaryMemo* dictionary_memo, io::OutputStream* out) { FBB fbb; flatbuffers::Offset fb_schema; @@ -764,8 +775,8 @@ Status WriteFileFooter(const Schema& schema, const std::vector& dicti auto fb_dictionaries = FileBlocksToFlatbuffer(fbb, dictionaries); auto fb_record_batches = FileBlocksToFlatbuffer(fbb, record_batches); - auto footer = flatbuf::CreateFooter( - fbb, kCurrentMetadataVersion, fb_schema, fb_dictionaries, fb_record_batches); + auto footer = flatbuf::CreateFooter(fbb, kCurrentMetadataVersion, fb_schema, + fb_dictionaries, fb_record_batches); fbb.Finish(footer); @@ -780,8 +791,8 @@ Status WriteFileFooter(const Schema& schema, const std::vector& dicti DictionaryMemo::DictionaryMemo() {} // Returns KeyError if dictionary not found -Status DictionaryMemo::GetDictionary( - int64_t id, std::shared_ptr* dictionary) const { +Status DictionaryMemo::GetDictionary(int64_t id, + std::shared_ptr* dictionary) const { auto it = id_to_dictionary_.find(id); if (it == id_to_dictionary_.end()) { std::stringstream ss; @@ -817,8 +828,8 @@ bool DictionaryMemo::HasDictionaryId(int64_t id) const { return it != id_to_dictionary_.end(); } -Status DictionaryMemo::AddDictionary( - int64_t id, const std::shared_ptr& dictionary) { +Status DictionaryMemo::AddDictionary(int64_t id, + const std::shared_ptr& dictionary) { if (HasDictionaryId(id)) { std::stringstream ss; ss << "Dictionary with id " << id << " already exists"; @@ -835,8 +846,8 @@ Status DictionaryMemo::AddDictionary( class Message::MessageImpl { public: - explicit MessageImpl( - const std::shared_ptr& metadata, const std::shared_ptr& body) + explicit MessageImpl(const std::shared_ptr& metadata, + const std::shared_ptr& body) : metadata_(metadata), message_(nullptr), body_(body) {} Status Open() { @@ -897,43 +908,35 @@ class Message::MessageImpl { std::shared_ptr body_; }; -Message::Message( - const std::shared_ptr& metadata, const std::shared_ptr& body) { +Message::Message(const std::shared_ptr& metadata, + const std::shared_ptr& body) { impl_.reset(new MessageImpl(metadata, body)); } Status Message::Open(const std::shared_ptr& metadata, - const std::shared_ptr& body, std::unique_ptr* out) { + const std::shared_ptr& body, std::unique_ptr* out) { out->reset(new Message(metadata, body)); return (*out)->impl_->Open(); } Message::~Message() {} -std::shared_ptr Message::body() const { - return impl_->body(); -} +std::shared_ptr Message::body() const { return impl_->body(); } -std::shared_ptr Message::metadata() const { - return impl_->metadata(); -} +std::shared_ptr Message::metadata() const { return impl_->metadata(); } -Message::Type Message::type() const { - return impl_->type(); -} +Message::Type Message::type() const { return impl_->type(); } -MetadataVersion Message::metadata_version() const { - return impl_->version(); -} +MetadataVersion Message::metadata_version() const { return impl_->version(); } -const void* Message::header() const { - return impl_->header(); -} +const void* Message::header() const { return impl_->header(); } bool Message::Equals(const Message& other) const { int64_t metadata_bytes = std::min(metadata()->size(), other.metadata()->size()); - if (!metadata()->Equals(*other.metadata(), metadata_bytes)) { return false; } + if (!metadata()->Equals(*other.metadata(), metadata_bytes)) { + return false; + } // Compare bodies, if they have them auto this_body = body(); @@ -1012,7 +1015,7 @@ Status GetDictionaryTypes(const void* opaque_schema, DictionaryTypeMap* id_to_fi } Status GetSchema(const void* opaque_schema, const DictionaryMemo& dictionary_memo, - std::shared_ptr* out) { + std::shared_ptr* out) { auto schema = static_cast(opaque_schema); int num_fields = static_cast(schema->fields()->size()); @@ -1036,8 +1039,8 @@ Status GetSchema(const void* opaque_schema, const DictionaryMemo& dictionary_mem } Status GetTensorMetadata(const Buffer& metadata, std::shared_ptr* type, - std::vector* shape, std::vector* strides, - std::vector* dim_names) { + std::vector* shape, std::vector* strides, + std::vector* dim_names) { auto message = flatbuf::GetMessage(metadata.data()); auto tensor = reinterpret_cast(message->header()); @@ -1068,7 +1071,8 @@ Status GetTensorMetadata(const Buffer& metadata, std::shared_ptr* type // Read and write messages static Status ReadFullMessage(const std::shared_ptr& metadata, - io::InputStream* stream, std::unique_ptr* message) { + io::InputStream* stream, + std::unique_ptr* message) { auto fb_message = flatbuf::GetMessage(metadata->data()); int64_t body_length = fb_message->bodyLength(); @@ -1087,7 +1091,7 @@ static Status ReadFullMessage(const std::shared_ptr& metadata, } Status ReadMessage(int64_t offset, int32_t metadata_length, io::RandomAccessFile* file, - std::unique_ptr* message) { + std::unique_ptr* message) { std::shared_ptr buffer; RETURN_NOT_OK(file->ReadAt(offset, metadata_length, &buffer)); @@ -1141,8 +1145,8 @@ InputStreamMessageReader::~InputStreamMessageReader() {} // ---------------------------------------------------------------------- // Implement message writing -Status WriteMessage( - const Buffer& message, io::OutputStream* file, int32_t* message_length) { +Status WriteMessage(const Buffer& message, io::OutputStream* file, + int32_t* message_length) { // Need to write 4 bytes (message size), the message, plus padding to // end on an 8-byte offset int64_t start_offset; @@ -1151,7 +1155,9 @@ Status WriteMessage( int32_t padded_message_length = static_cast(message.size()) + 4; const int32_t remainder = (padded_message_length + static_cast(start_offset)) % 8; - if (remainder != 0) { padded_message_length += 8 - remainder; } + if (remainder != 0) { + padded_message_length += 8 - remainder; + } // The returned message size includes the length prefix, the flatbuffer, // plus padding @@ -1167,7 +1173,9 @@ Status WriteMessage( // Write any padding int32_t padding = padded_message_length - static_cast(message.size()) - 4; - if (padding > 0) { RETURN_NOT_OK(file->Write(kPaddingBytes, padding)); } + if (padding > 0) { + RETURN_NOT_OK(file->Write(kPaddingBytes, padding)); + } return Status::OK(); } diff --git a/cpp/src/arrow/ipc/metadata.h b/cpp/src/arrow/ipc/metadata.h index 614f7a6a922cc..90e4defd6a300 100644 --- a/cpp/src/arrow/ipc/metadata.h +++ b/cpp/src/arrow/ipc/metadata.h @@ -133,11 +133,14 @@ Status GetDictionaryTypes(const void* opaque_schema, DictionaryTypeMap* id_to_fi // Construct a complete Schema from the message. May be expensive for very // large schemas if you are only interested in a few fields Status ARROW_EXPORT GetSchema(const void* opaque_schema, - const DictionaryMemo& dictionary_memo, std::shared_ptr* out); + const DictionaryMemo& dictionary_memo, + std::shared_ptr* out); Status ARROW_EXPORT GetTensorMetadata(const Buffer& metadata, - std::shared_ptr* type, std::vector* shape, - std::vector* strides, std::vector* dim_names); + std::shared_ptr* type, + std::vector* shape, + std::vector* strides, + std::vector* dim_names); /// \brief An IPC message including metadata and body class ARROW_EXPORT Message { @@ -157,7 +160,7 @@ class ARROW_EXPORT Message { /// \param[in] body a buffer containing the message body, which may be nullptr /// \param[out] out the created message static Status Open(const std::shared_ptr& metadata, - const std::shared_ptr& body, std::unique_ptr* out); + const std::shared_ptr& body, std::unique_ptr* out); /// \brief Write length-prefixed metadata and body to output stream /// @@ -242,22 +245,23 @@ class ARROW_EXPORT InputStreamMessageReader : public MessageReader { /// \param[out] message the message read /// \return Status success or failure Status ARROW_EXPORT ReadMessage(int64_t offset, int32_t metadata_length, - io::RandomAccessFile* file, std::unique_ptr* message); + io::RandomAccessFile* file, + std::unique_ptr* message); /// \brief Read encapulated RPC message (metadata and body) from InputStream /// /// Read length-prefixed message with as-yet unknown length. Returns nullptr if /// there are not enough bytes available or the message length is 0 (e.g. EOS /// in a stream) -Status ARROW_EXPORT ReadMessage( - io::InputStream* stream, std::unique_ptr* message); +Status ARROW_EXPORT ReadMessage(io::InputStream* stream, + std::unique_ptr* message); /// Write a serialized message metadata with a length-prefix and padding to an /// 8-byte offset /// /// -Status ARROW_EXPORT WriteMessage( - const Buffer& message, io::OutputStream* file, int32_t* message_length); +Status ARROW_EXPORT WriteMessage(const Buffer& message, io::OutputStream* file, + int32_t* message_length); // Serialize arrow::Schema as a Flatbuffer // @@ -266,23 +270,26 @@ Status ARROW_EXPORT WriteMessage( // dictionary ids // \param[out] out the serialized arrow::Buffer // \return Status outcome -Status ARROW_EXPORT WriteSchemaMessage( - const Schema& schema, DictionaryMemo* dictionary_memo, std::shared_ptr* out); +Status ARROW_EXPORT WriteSchemaMessage(const Schema& schema, + DictionaryMemo* dictionary_memo, + std::shared_ptr* out); Status ARROW_EXPORT WriteRecordBatchMessage(int64_t length, int64_t body_length, - const std::vector& nodes, const std::vector& buffers, - std::shared_ptr* out); + const std::vector& nodes, + const std::vector& buffers, + std::shared_ptr* out); -Status ARROW_EXPORT WriteTensorMessage( - const Tensor& tensor, int64_t buffer_start_offset, std::shared_ptr* out); +Status ARROW_EXPORT WriteTensorMessage(const Tensor& tensor, int64_t buffer_start_offset, + std::shared_ptr* out); Status WriteDictionaryMessage(int64_t id, int64_t length, int64_t body_length, - const std::vector& nodes, const std::vector& buffers, - std::shared_ptr* out); + const std::vector& nodes, + const std::vector& buffers, + std::shared_ptr* out); Status WriteFileFooter(const Schema& schema, const std::vector& dictionaries, - const std::vector& record_batches, DictionaryMemo* dictionary_memo, - io::OutputStream* out); + const std::vector& record_batches, + DictionaryMemo* dictionary_memo, io::OutputStream* out); } // namespace ipc } // namespace arrow diff --git a/cpp/src/arrow/ipc/reader.cc b/cpp/src/arrow/ipc/reader.cc index 88ab33087b637..8ae82804c3164 100644 --- a/cpp/src/arrow/ipc/reader.cc +++ b/cpp/src/arrow/ipc/reader.cc @@ -95,12 +95,12 @@ struct ArrayLoaderContext { }; static Status LoadArray(const std::shared_ptr& type, - ArrayLoaderContext* context, internal::ArrayData* out); + ArrayLoaderContext* context, internal::ArrayData* out); class ArrayLoader { public: ArrayLoader(const std::shared_ptr& type, internal::ArrayData* out, - ArrayLoaderContext* context) + ArrayLoaderContext* context) : type_(type), context_(context), out_(out) {} Status Load() { @@ -184,7 +184,7 @@ class ArrayLoader { typename std::enable_if::value && !std::is_base_of::value && !std::is_base_of::value, - Status>::type + Status>::type Visit(const T& type) { return LoadPrimitive(); } @@ -252,18 +252,18 @@ class ArrayLoader { }; static Status LoadArray(const std::shared_ptr& type, - ArrayLoaderContext* context, internal::ArrayData* out) { + ArrayLoaderContext* context, internal::ArrayData* out) { ArrayLoader loader(type, out, context); return loader.Load(); } Status ReadRecordBatch(const Buffer& metadata, const std::shared_ptr& schema, - io::RandomAccessFile* file, std::shared_ptr* out) { + io::RandomAccessFile* file, std::shared_ptr* out) { return ReadRecordBatch(metadata, schema, kMaxNestingDepth, file, out); } Status ReadRecordBatch(const Message& message, const std::shared_ptr& schema, - std::shared_ptr* out) { + std::shared_ptr* out) { io::BufferReader reader(message.body()); DCHECK_EQ(message.type(), Message::RECORD_BATCH); return ReadRecordBatch(*message.metadata(), schema, kMaxNestingDepth, &reader, out); @@ -273,8 +273,9 @@ Status ReadRecordBatch(const Message& message, const std::shared_ptr& sc // Array loading static Status LoadRecordBatchFromSource(const std::shared_ptr& schema, - int64_t num_rows, int max_recursion_depth, IpcComponentSource* source, - std::shared_ptr* out) { + int64_t num_rows, int max_recursion_depth, + IpcComponentSource* source, + std::shared_ptr* out) { ArrayLoaderContext context; context.source = source; context.field_index = 0; @@ -294,16 +295,17 @@ static Status LoadRecordBatchFromSource(const std::shared_ptr& schema, } static inline Status ReadRecordBatch(const flatbuf::RecordBatch* metadata, - const std::shared_ptr& schema, int max_recursion_depth, - io::RandomAccessFile* file, std::shared_ptr* out) { + const std::shared_ptr& schema, + int max_recursion_depth, io::RandomAccessFile* file, + std::shared_ptr* out) { IpcComponentSource source(metadata, file); - return LoadRecordBatchFromSource( - schema, metadata->length(), max_recursion_depth, &source, out); + return LoadRecordBatchFromSource(schema, metadata->length(), max_recursion_depth, + &source, out); } Status ReadRecordBatch(const Buffer& metadata, const std::shared_ptr& schema, - int max_recursion_depth, io::RandomAccessFile* file, - std::shared_ptr* out) { + int max_recursion_depth, io::RandomAccessFile* file, + std::shared_ptr* out) { auto message = flatbuf::GetMessage(metadata.data()); if (message->header_type() != flatbuf::MessageHeader_RecordBatch) { DCHECK_EQ(message->header_type(), flatbuf::MessageHeader_RecordBatch); @@ -313,7 +315,8 @@ Status ReadRecordBatch(const Buffer& metadata, const std::shared_ptr& sc } Status ReadDictionary(const Buffer& metadata, const DictionaryTypeMap& dictionary_types, - io::RandomAccessFile* file, int64_t* dictionary_id, std::shared_ptr* out) { + io::RandomAccessFile* file, int64_t* dictionary_id, + std::shared_ptr* out) { auto message = flatbuf::GetMessage(metadata.data()); auto dictionary_batch = reinterpret_cast(message->header()); @@ -347,7 +350,7 @@ Status ReadDictionary(const Buffer& metadata, const DictionaryTypeMap& dictionar } static Status ReadMessageAndValidate(MessageReader* reader, Message::Type expected_type, - bool allow_null, std::unique_ptr* message) { + bool allow_null, std::unique_ptr* message) { RETURN_NOT_OK(reader->ReadNextMessage(message)); if (!(*message) && !allow_null) { @@ -357,7 +360,9 @@ static Status ReadMessageAndValidate(MessageReader* reader, Message::Type expect return Status::Invalid(ss.str()); } - if ((*message) == nullptr) { return Status::OK(); } + if ((*message) == nullptr) { + return Status::OK(); + } if ((*message)->type() != expected_type) { std::stringstream ss; @@ -389,15 +394,15 @@ class RecordBatchStreamReader::RecordBatchStreamReaderImpl { Status ReadNextDictionary() { std::unique_ptr message; - RETURN_NOT_OK(ReadMessageAndValidate( - message_reader_.get(), Message::DICTIONARY_BATCH, false, &message)); + RETURN_NOT_OK(ReadMessageAndValidate(message_reader_.get(), Message::DICTIONARY_BATCH, + false, &message)); io::BufferReader reader(message->body()); std::shared_ptr dictionary; int64_t id; - RETURN_NOT_OK(ReadDictionary( - *message->metadata(), dictionary_types_, &reader, &id, &dictionary)); + RETURN_NOT_OK(ReadDictionary(*message->metadata(), dictionary_types_, &reader, &id, + &dictionary)); return dictionary_memo_.AddDictionary(id, dictionary); } @@ -420,8 +425,8 @@ class RecordBatchStreamReader::RecordBatchStreamReaderImpl { Status ReadNextRecordBatch(std::shared_ptr* batch) { std::unique_ptr message; - RETURN_NOT_OK(ReadMessageAndValidate( - message_reader_.get(), Message::RECORD_BATCH, true, &message)); + RETURN_NOT_OK(ReadMessageAndValidate(message_reader_.get(), Message::RECORD_BATCH, + true, &message)); if (message == nullptr) { // End of stream @@ -451,14 +456,14 @@ RecordBatchStreamReader::RecordBatchStreamReader() { RecordBatchStreamReader::~RecordBatchStreamReader() {} Status RecordBatchStreamReader::Open(std::unique_ptr message_reader, - std::shared_ptr* reader) { + std::shared_ptr* reader) { // Private ctor *reader = std::shared_ptr(new RecordBatchStreamReader()); return (*reader)->impl_->Open(std::move(message_reader)); } Status RecordBatchStreamReader::Open(const std::shared_ptr& stream, - std::shared_ptr* out) { + std::shared_ptr* out) { std::unique_ptr message_reader(new InputStreamMessageReader(stream)); return Open(std::move(message_reader), out); } @@ -502,8 +507,8 @@ class RecordBatchFileReader::RecordBatchFileReaderImpl { } // Now read the footer - RETURN_NOT_OK(file_->ReadAt( - footer_offset_ - footer_length - file_end_size, footer_length, &footer_buffer_)); + RETURN_NOT_OK(file_->ReadAt(footer_offset_ - footer_length - file_end_size, + footer_length, &footer_buffer_)); // TODO(wesm): Verify the footer footer_ = flatbuf::GetFooter(footer_buffer_->data()); @@ -568,7 +573,7 @@ class RecordBatchFileReader::RecordBatchFileReaderImpl { std::shared_ptr dictionary; int64_t dictionary_id; RETURN_NOT_OK(ReadDictionary(*message->metadata(), dictionary_fields_, &reader, - &dictionary_id, &dictionary)); + &dictionary_id, &dictionary)); RETURN_NOT_OK(dictionary_memo_->AddDictionary(dictionary_id, dictionary)); } @@ -610,37 +615,34 @@ RecordBatchFileReader::RecordBatchFileReader() { RecordBatchFileReader::~RecordBatchFileReader() {} Status RecordBatchFileReader::Open(const std::shared_ptr& file, - std::shared_ptr* reader) { + std::shared_ptr* reader) { int64_t footer_offset; RETURN_NOT_OK(file->GetSize(&footer_offset)); return Open(file, footer_offset, reader); } Status RecordBatchFileReader::Open(const std::shared_ptr& file, - int64_t footer_offset, std::shared_ptr* reader) { + int64_t footer_offset, + std::shared_ptr* reader) { *reader = std::shared_ptr(new RecordBatchFileReader()); return (*reader)->impl_->Open(file, footer_offset); } -std::shared_ptr RecordBatchFileReader::schema() const { - return impl_->schema(); -} +std::shared_ptr RecordBatchFileReader::schema() const { return impl_->schema(); } int RecordBatchFileReader::num_record_batches() const { return impl_->num_record_batches(); } -MetadataVersion RecordBatchFileReader::version() const { - return impl_->version(); -} +MetadataVersion RecordBatchFileReader::version() const { return impl_->version(); } -Status RecordBatchFileReader::ReadRecordBatch( - int i, std::shared_ptr* batch) { +Status RecordBatchFileReader::ReadRecordBatch(int i, + std::shared_ptr* batch) { return impl_->ReadRecordBatch(i, batch); } -static Status ReadContiguousPayload( - int64_t offset, io::RandomAccessFile* file, std::unique_ptr* message) { +static Status ReadContiguousPayload(int64_t offset, io::RandomAccessFile* file, + std::unique_ptr* message) { std::shared_ptr buffer; RETURN_NOT_OK(file->Seek(offset)); RETURN_NOT_OK(ReadMessage(file, message)); @@ -652,16 +654,16 @@ static Status ReadContiguousPayload( } Status ReadRecordBatch(const std::shared_ptr& schema, int64_t offset, - io::RandomAccessFile* file, std::shared_ptr* out) { + io::RandomAccessFile* file, std::shared_ptr* out) { std::unique_ptr message; RETURN_NOT_OK(ReadContiguousPayload(offset, file, &message)); io::BufferReader buffer_reader(message->body()); - return ReadRecordBatch( - *message->metadata(), schema, kMaxNestingDepth, &buffer_reader, out); + return ReadRecordBatch(*message->metadata(), schema, kMaxNestingDepth, &buffer_reader, + out); } -Status ReadTensor( - int64_t offset, io::RandomAccessFile* file, std::shared_ptr* out) { +Status ReadTensor(int64_t offset, io::RandomAccessFile* file, + std::shared_ptr* out) { // Respect alignment of Tensor messages (see WriteTensor) offset = PaddedLength(offset); std::unique_ptr message; diff --git a/cpp/src/arrow/ipc/reader.h b/cpp/src/arrow/ipc/reader.h index d6c261475014c..c0d3fb1f185f9 100644 --- a/cpp/src/arrow/ipc/reader.h +++ b/cpp/src/arrow/ipc/reader.h @@ -72,7 +72,7 @@ class ARROW_EXPORT RecordBatchStreamReader : public RecordBatchReader { /// \param(out) out the created RecordBatchStreamReader object /// \return Status static Status Open(std::unique_ptr message_reader, - std::shared_ptr* out); + std::shared_ptr* out); /// \Create Record batch stream reader from InputStream /// @@ -80,7 +80,7 @@ class ARROW_EXPORT RecordBatchStreamReader : public RecordBatchReader { /// \param(out) out the created RecordBatchStreamReader object /// \return Status static Status Open(const std::shared_ptr& stream, - std::shared_ptr* out); + std::shared_ptr* out); std::shared_ptr schema() const override; Status ReadNextRecordBatch(std::shared_ptr* batch) override; @@ -103,7 +103,7 @@ class ARROW_EXPORT RecordBatchFileReader { // need only locate the end of the Arrow file stream to discover the metadata // and then proceed to read the data into memory. static Status Open(const std::shared_ptr& file, - std::shared_ptr* reader); + std::shared_ptr* reader); // If the file is embedded within some larger file or memory region, you can // pass the absolute memory offset to the end of the file (which contains the @@ -113,7 +113,8 @@ class ARROW_EXPORT RecordBatchFileReader { // @param file: the data source // @param footer_offset: the position of the end of the Arrow "file" static Status Open(const std::shared_ptr& file, - int64_t footer_offset, std::shared_ptr* reader); + int64_t footer_offset, + std::shared_ptr* reader); /// The schema includes any dictionaries std::shared_ptr schema() const; @@ -148,8 +149,9 @@ class ARROW_EXPORT RecordBatchFileReader { /// \param(in) file a random access file /// \param(out) out the read record batch Status ARROW_EXPORT ReadRecordBatch(const Buffer& metadata, - const std::shared_ptr& schema, io::RandomAccessFile* file, - std::shared_ptr* out); + const std::shared_ptr& schema, + io::RandomAccessFile* file, + std::shared_ptr* out); /// \brief Read record batch from fully encapulated Message /// @@ -158,7 +160,8 @@ Status ARROW_EXPORT ReadRecordBatch(const Buffer& metadata, /// \param[out] out the resulting RecordBatch /// \return Status Status ARROW_EXPORT ReadRecordBatch(const Message& message, - const std::shared_ptr& schema, std::shared_ptr* out); + const std::shared_ptr& schema, + std::shared_ptr* out); /// Read record batch from file given metadata and schema /// @@ -168,8 +171,9 @@ Status ARROW_EXPORT ReadRecordBatch(const Message& message, /// \param(in) max_recursion_depth the maximum permitted nesting depth /// \param(out) out the read record batch Status ARROW_EXPORT ReadRecordBatch(const Buffer& metadata, - const std::shared_ptr& schema, int max_recursion_depth, - io::RandomAccessFile* file, std::shared_ptr* out); + const std::shared_ptr& schema, + int max_recursion_depth, io::RandomAccessFile* file, + std::shared_ptr* out); /// Read record batch as encapsulated IPC message with metadata size prefix and /// header @@ -179,15 +183,16 @@ Status ARROW_EXPORT ReadRecordBatch(const Buffer& metadata, /// \param(in) file the file where the batch is located /// \param(out) out the read record batch Status ARROW_EXPORT ReadRecordBatch(const std::shared_ptr& schema, int64_t offset, - io::RandomAccessFile* file, std::shared_ptr* out); + io::RandomAccessFile* file, + std::shared_ptr* out); /// EXPERIMENTAL: Read arrow::Tensor as encapsulated IPC message in file /// /// \param(in) offset the file location of the start of the message /// \param(in) file the file where the batch is located /// \param(out) out the read tensor -Status ARROW_EXPORT ReadTensor( - int64_t offset, io::RandomAccessFile* file, std::shared_ptr* out); +Status ARROW_EXPORT ReadTensor(int64_t offset, io::RandomAccessFile* file, + std::shared_ptr* out); /// Backwards-compatibility for Arrow < 0.4.0 /// diff --git a/cpp/src/arrow/ipc/stream-to-file.cc b/cpp/src/arrow/ipc/stream-to-file.cc index de65883910120..33719b3c89c9e 100644 --- a/cpp/src/arrow/ipc/stream-to-file.cc +++ b/cpp/src/arrow/ipc/stream-to-file.cc @@ -15,11 +15,11 @@ // specific language governing permissions and limitations // under the License. +#include #include "arrow/io/file.h" #include "arrow/ipc/reader.h" #include "arrow/ipc/writer.h" #include "arrow/status.h" -#include #include "arrow/util/io-util.h" diff --git a/cpp/src/arrow/ipc/test-common.h b/cpp/src/arrow/ipc/test-common.h index 67a41ba086b75..a8767926b2a07 100644 --- a/cpp/src/arrow/ipc/test-common.h +++ b/cpp/src/arrow/ipc/test-common.h @@ -69,8 +69,8 @@ static inline void CompareBatch(const RecordBatch& left, const RecordBatch& righ } } -static inline void CompareArraysDetailed( - int index, const Array& result, const Array& expected) { +static inline void CompareArraysDetailed(int index, const Array& result, + const Array& expected) { if (!expected.Equals(result)) { std::stringstream pp_result; std::stringstream pp_expected; @@ -83,8 +83,8 @@ static inline void CompareArraysDetailed( } } -static inline void CompareBatchColumnsDetailed( - const RecordBatch& result, const RecordBatch& expected) { +static inline void CompareBatchColumnsDetailed(const RecordBatch& result, + const RecordBatch& expected) { for (int i = 0; i < expected.num_columns(); ++i) { auto left = result.column(i); auto right = expected.column(i); @@ -95,16 +95,16 @@ static inline void CompareBatchColumnsDetailed( const auto kListInt32 = list(int32()); const auto kListListInt32 = list(kListInt32); -Status MakeRandomInt32Array( - int64_t length, bool include_nulls, MemoryPool* pool, std::shared_ptr* out) { +Status MakeRandomInt32Array(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out) { std::shared_ptr data; RETURN_NOT_OK(test::MakeRandomInt32PoolBuffer(length, pool, &data)); Int32Builder builder(pool, int32()); if (include_nulls) { std::shared_ptr valid_bytes; RETURN_NOT_OK(test::MakeRandomBytePoolBuffer(length, pool, &valid_bytes)); - RETURN_NOT_OK(builder.Append( - reinterpret_cast(data->data()), length, valid_bytes->data())); + RETURN_NOT_OK(builder.Append(reinterpret_cast(data->data()), length, + valid_bytes->data())); return builder.Finish(out); } RETURN_NOT_OK(builder.Append(reinterpret_cast(data->data()), length)); @@ -112,7 +112,8 @@ Status MakeRandomInt32Array( } Status MakeRandomListArray(const std::shared_ptr& child_array, int num_lists, - bool include_nulls, MemoryPool* pool, std::shared_ptr* out) { + bool include_nulls, MemoryPool* pool, + std::shared_ptr* out) { // Create the null list values std::vector valid_lists(num_lists); const double null_percent = include_nulls ? 0.1 : 0; @@ -129,15 +130,16 @@ Status MakeRandomListArray(const std::shared_ptr& child_array, int num_li test::rand_uniform_int(num_lists, seed, 0, max_list_size, list_sizes.data()); // make sure sizes are consistent with null std::transform(list_sizes.begin(), list_sizes.end(), valid_lists.begin(), - list_sizes.begin(), - [](int32_t size, int32_t valid) { return valid == 0 ? 0 : size; }); + list_sizes.begin(), + [](int32_t size, int32_t valid) { return valid == 0 ? 0 : size; }); std::partial_sum(list_sizes.begin(), list_sizes.end(), ++offsets.begin()); // Force invariants const int32_t child_length = static_cast(child_array->length()); offsets[0] = 0; std::replace_if(offsets.begin(), offsets.end(), - [child_length](int32_t offset) { return offset > child_length; }, child_length); + [child_length](int32_t offset) { return offset > child_length; }, + child_length); } offsets[num_lists] = static_cast(child_array->length()); @@ -148,14 +150,14 @@ Status MakeRandomListArray(const std::shared_ptr& child_array, int num_li RETURN_NOT_OK(test::CopyBufferFromVector(offsets, pool, &offsets_buffer)); *out = std::make_shared(list(child_array->type()), num_lists, offsets_buffer, - child_array, null_bitmap, kUnknownNullCount); + child_array, null_bitmap, kUnknownNullCount); return ValidateArray(**out); } typedef Status MakeRecordBatch(std::shared_ptr* out); -Status MakeRandomBooleanArray( - const int length, bool include_nulls, std::shared_ptr* out) { +Status MakeRandomBooleanArray(const int length, bool include_nulls, + std::shared_ptr* out) { std::vector values(length); test::random_null_bytes(length, 0.5, values.data()); std::shared_ptr data; @@ -210,10 +212,10 @@ Status MakeIntRecordBatch(std::shared_ptr* out) { } template -Status MakeRandomBinaryArray( - int64_t length, bool include_nulls, MemoryPool* pool, std::shared_ptr* out) { - const std::vector values = { - "", "", "abc", "123", "efg", "456!@#!@#", "12312"}; +Status MakeRandomBinaryArray(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out) { + const std::vector values = {"", "", "abc", "123", + "efg", "456!@#!@#", "12312"}; Builder builder(pool); const size_t values_len = values.size(); for (int64_t i = 0; i < length; ++i) { @@ -223,7 +225,7 @@ Status MakeRandomBinaryArray( } else { const std::string& value = values[values_index]; RETURN_NOT_OK(builder.Append(reinterpret_cast(value.data()), - static_cast(value.size()))); + static_cast(value.size()))); } } return builder.Finish(out); @@ -434,11 +436,12 @@ Status MakeUnion(std::shared_ptr* out) { // construct individual nullable/non-nullable struct arrays auto sparse_no_nulls = std::make_shared(sparse_type, length, sparse_children, type_ids_buffer); - auto sparse = std::make_shared( - sparse_type, length, sparse_children, type_ids_buffer, nullptr, null_bitmask, 1); + auto sparse = std::make_shared(sparse_type, length, sparse_children, + type_ids_buffer, nullptr, null_bitmask, 1); - auto dense = std::make_shared(dense_type, length, dense_children, - type_ids_buffer, offsets_buffer, null_bitmask, 1); + auto dense = + std::make_shared(dense_type, length, dense_children, type_ids_buffer, + offsets_buffer, null_bitmask, 1); // construct batch std::vector> arrays = {sparse_no_nulls, sparse, dense}; @@ -480,8 +483,8 @@ Status MakeDictionary(std::shared_ptr* out) { std::vector list_offsets = {0, 0, 2, 2, 5, 6, 9}; std::shared_ptr offsets, indices3; - ArrayFromVector( - std::vector(list_offsets.size(), true), list_offsets, &offsets); + ArrayFromVector(std::vector(list_offsets.size(), true), + list_offsets, &offsets); std::vector indices3_values = {0, 1, 2, 0, 1, 2, 0, 1, 2}; std::vector is_valid3(9, true); @@ -490,8 +493,8 @@ Status MakeDictionary(std::shared_ptr* out) { std::shared_ptr null_bitmap; RETURN_NOT_OK(test::GetBitmapFromVector(is_valid, &null_bitmap)); - std::shared_ptr a3 = std::make_shared(f3_type, length, - std::static_pointer_cast(offsets)->values(), + std::shared_ptr a3 = std::make_shared( + f3_type, length, std::static_pointer_cast(offsets)->values(), std::make_shared(f1_type, indices3), null_bitmap, 1); // Dictionary-encoded list of integer @@ -500,14 +503,15 @@ Status MakeDictionary(std::shared_ptr* out) { std::shared_ptr offsets4, values4, indices4; std::vector list_offsets4 = {0, 2, 2, 3}; - ArrayFromVector( - std::vector(4, true), list_offsets4, &offsets4); + ArrayFromVector(std::vector(4, true), list_offsets4, + &offsets4); std::vector list_values4 = {0, 1, 2}; ArrayFromVector(std::vector(3, true), list_values4, &values4); - auto dict3 = std::make_shared(f4_value_type, 3, - std::static_pointer_cast(offsets4)->values(), values4); + auto dict3 = std::make_shared( + f4_value_type, 3, std::static_pointer_cast(offsets4)->values(), + values4); std::vector indices4_values = {0, 1, 2, 0, 1, 2}; ArrayFromVector(is_valid, indices4_values, &indices4); @@ -516,9 +520,9 @@ Status MakeDictionary(std::shared_ptr* out) { auto a4 = std::make_shared(f4_type, indices4); // construct batch - std::shared_ptr schema(new Schema({field("dict1", f0_type), - field("sparse", f1_type), field("dense", f2_type), - field("list of encoded string", f3_type), field("encoded list", f4_type)})); + std::shared_ptr schema(new Schema( + {field("dict1", f0_type), field("sparse", f1_type), field("dense", f2_type), + field("list of encoded string", f3_type), field("encoded list", f4_type)})); std::vector> arrays = {a0, a1, a2, a3, a4}; @@ -575,7 +579,8 @@ Status MakeDates(std::shared_ptr* out) { ArrayFromVector(is_valid, date32_values, &date32_array); std::vector date64_values = {1489269000000, 1489270000000, 1489271000000, - 1489272000000, 1489272000000, 1489273000000, 1489274000000}; + 1489272000000, 1489272000000, 1489273000000, + 1489274000000}; std::shared_ptr date64_array; ArrayFromVector(is_valid, date64_values, &date64_array); @@ -592,7 +597,7 @@ Status MakeTimestamps(std::shared_ptr* out) { std::shared_ptr schema(new Schema({f0, f1, f2})); std::vector ts_values = {1489269000000, 1489270000000, 1489271000000, - 1489272000000, 1489272000000, 1489273000000}; + 1489272000000, 1489272000000, 1489273000000}; std::shared_ptr a0, a1, a2; ArrayFromVector(f0->type(), is_valid, ts_values, &a0); @@ -612,10 +617,10 @@ Status MakeTimes(std::shared_ptr* out) { auto f3 = field("f3", time64(TimeUnit::NANO)); std::shared_ptr schema(new Schema({f0, f1, f2, f3})); - std::vector t32_values = { - 1489269000, 1489270000, 1489271000, 1489272000, 1489272000, 1489273000}; + std::vector t32_values = {1489269000, 1489270000, 1489271000, + 1489272000, 1489272000, 1489273000}; std::vector t64_values = {1489269000000, 1489270000000, 1489271000000, - 1489272000000, 1489272000000, 1489273000000}; + 1489272000000, 1489272000000, 1489273000000}; std::shared_ptr a0, a1, a2, a3; ArrayFromVector(f0->type(), is_valid, t32_values, &a0); @@ -630,7 +635,7 @@ Status MakeTimes(std::shared_ptr* out) { template void AppendValues(const std::vector& is_valid, const std::vector& values, - BuilderType* builder) { + BuilderType* builder) { for (size_t i = 0; i < values.size(); ++i) { if (is_valid[i]) { ASSERT_OK(builder->Append(values[i])); diff --git a/cpp/src/arrow/ipc/writer.cc b/cpp/src/arrow/ipc/writer.cc index 14708a1e7a032..163b27b443351 100644 --- a/cpp/src/arrow/ipc/writer.cc +++ b/cpp/src/arrow/ipc/writer.cc @@ -45,8 +45,9 @@ namespace ipc { // Record batch write path static inline Status GetTruncatedBitmap(int64_t offset, int64_t length, - const std::shared_ptr input, MemoryPool* pool, - std::shared_ptr* buffer) { + const std::shared_ptr input, + MemoryPool* pool, + std::shared_ptr* buffer) { if (!input) { *buffer = input; return Status::OK(); @@ -63,8 +64,8 @@ static inline Status GetTruncatedBitmap(int64_t offset, int64_t length, template inline Status GetTruncatedBuffer(int64_t offset, int64_t length, - const std::shared_ptr input, MemoryPool* pool, - std::shared_ptr* buffer) { + const std::shared_ptr input, MemoryPool* pool, + std::shared_ptr* buffer) { if (!input) { *buffer = input; return Status::OK(); @@ -80,17 +81,19 @@ inline Status GetTruncatedBuffer(int64_t offset, int64_t length, return Status::OK(); } -static inline bool NeedTruncate( - int64_t offset, const Buffer* buffer, int64_t min_length) { +static inline bool NeedTruncate(int64_t offset, const Buffer* buffer, + int64_t min_length) { // buffer can be NULL - if (buffer == nullptr) { return false; } + if (buffer == nullptr) { + return false; + } return offset != 0 || min_length < buffer->size(); } class RecordBatchSerializer : public ArrayVisitor { public: RecordBatchSerializer(MemoryPool* pool, int64_t buffer_start_offset, - int max_recursion_depth, bool allow_64bit) + int max_recursion_depth, bool allow_64bit) : pool_(pool), max_recursion_depth_(max_recursion_depth), buffer_start_offset_(buffer_start_offset), @@ -114,8 +117,8 @@ class RecordBatchSerializer : public ArrayVisitor { if (arr.null_count() > 0) { std::shared_ptr bitmap; - RETURN_NOT_OK(GetTruncatedBitmap( - arr.offset(), arr.length(), arr.null_bitmap(), pool_, &bitmap)); + RETURN_NOT_OK(GetTruncatedBitmap(arr.offset(), arr.length(), arr.null_bitmap(), + pool_, &bitmap)); buffers_.push_back(bitmap); } else { // Push a dummy zero-length buffer, not to be copied @@ -175,14 +178,14 @@ class RecordBatchSerializer : public ArrayVisitor { } // Override this for writing dictionary metadata - virtual Status WriteMetadataMessage( - int64_t num_rows, int64_t body_length, std::shared_ptr* out) { - return WriteRecordBatchMessage( - num_rows, body_length, field_nodes_, buffer_meta_, out); + virtual Status WriteMetadataMessage(int64_t num_rows, int64_t body_length, + std::shared_ptr* out) { + return WriteRecordBatchMessage(num_rows, body_length, field_nodes_, buffer_meta_, + out); } Status Write(const RecordBatch& batch, io::OutputStream* dst, int32_t* metadata_length, - int64_t* body_length) { + int64_t* body_length) { RETURN_NOT_OK(Assemble(batch, body_length)); #ifndef NDEBUG @@ -216,9 +219,13 @@ class RecordBatchSerializer : public ArrayVisitor { padding = BitUtil::RoundUpToMultipleOf64(size) - size; } - if (size > 0) { RETURN_NOT_OK(dst->Write(buffer->data(), size)); } + if (size > 0) { + RETURN_NOT_OK(dst->Write(buffer->data(), size)); + } - if (padding > 0) { RETURN_NOT_OK(dst->Write(kPaddingBytes, padding)); } + if (padding > 0) { + RETURN_NOT_OK(dst->Write(kPaddingBytes, padding)); + } } #ifndef NDEBUG @@ -245,7 +252,7 @@ class RecordBatchSerializer : public ArrayVisitor { // Send padding if it's available const int64_t buffer_length = std::min(BitUtil::RoundUpToMultipleOf64(array.length() * type_width), - data->size() - byte_offset); + data->size() - byte_offset); data = SliceBuffer(data, byte_offset, buffer_length); } buffers_.push_back(data); @@ -253,8 +260,8 @@ class RecordBatchSerializer : public ArrayVisitor { } template - Status GetZeroBasedValueOffsets( - const ArrayType& array, std::shared_ptr* value_offsets) { + Status GetZeroBasedValueOffsets(const ArrayType& array, + std::shared_ptr* value_offsets) { // Share slicing logic between ListArray and BinaryArray auto offsets = array.value_offsets(); @@ -265,8 +272,8 @@ class RecordBatchSerializer : public ArrayVisitor { // b) slice the values array accordingly std::shared_ptr shifted_offsets; - RETURN_NOT_OK(AllocateBuffer( - pool_, sizeof(int32_t) * (array.length() + 1), &shifted_offsets)); + RETURN_NOT_OK(AllocateBuffer(pool_, sizeof(int32_t) * (array.length() + 1), + &shifted_offsets)); int32_t* dest_offsets = reinterpret_cast(shifted_offsets->mutable_data()); const int32_t start_offset = array.value_offset(0); @@ -392,13 +399,15 @@ class RecordBatchSerializer : public ArrayVisitor { const auto& type = static_cast(*array.type()); std::shared_ptr value_offsets; - RETURN_NOT_OK(GetTruncatedBuffer( - offset, length, array.value_offsets(), pool_, &value_offsets)); + RETURN_NOT_OK(GetTruncatedBuffer(offset, length, array.value_offsets(), + pool_, &value_offsets)); // The Union type codes are not necessary 0-indexed uint8_t max_code = 0; for (uint8_t code : type.type_codes()) { - if (code > max_code) { max_code = code; } + if (code > max_code) { + max_code = code; + } } // Allocate an array of child offsets. Set all to -1 to indicate that we @@ -424,7 +433,9 @@ class RecordBatchSerializer : public ArrayVisitor { for (int64_t i = 0; i < length; ++i) { const uint8_t code = type_ids[i]; int32_t shift = child_offsets[code]; - if (shift == -1) { child_offsets[code] = shift = unshifted_offsets[i]; } + if (shift == -1) { + child_offsets[code] = shift = unshifted_offsets[i]; + } shifted_offsets[i] = unshifted_offsets[i] - shift; // Update the child length to account for observed value @@ -486,14 +497,14 @@ class DictionaryWriter : public RecordBatchSerializer { public: using RecordBatchSerializer::RecordBatchSerializer; - Status WriteMetadataMessage( - int64_t num_rows, int64_t body_length, std::shared_ptr* out) override { - return WriteDictionaryMessage( - dictionary_id_, num_rows, body_length, field_nodes_, buffer_meta_, out); + Status WriteMetadataMessage(int64_t num_rows, int64_t body_length, + std::shared_ptr* out) override { + return WriteDictionaryMessage(dictionary_id_, num_rows, body_length, field_nodes_, + buffer_meta_, out); } Status Write(int64_t dictionary_id, const std::shared_ptr& dictionary, - io::OutputStream* dst, int32_t* metadata_length, int64_t* body_length) { + io::OutputStream* dst, int32_t* metadata_length, int64_t* body_length) { dictionary_id_ = dictionary_id; // Make a dummy record batch. A bit tedious as we have to make a schema @@ -516,27 +527,30 @@ Status AlignStreamPosition(io::OutputStream* stream) { int64_t position; RETURN_NOT_OK(stream->Tell(&position)); int64_t remainder = PaddedLength(position) - position; - if (remainder > 0) { return stream->Write(kPaddingBytes, remainder); } + if (remainder > 0) { + return stream->Write(kPaddingBytes, remainder); + } return Status::OK(); } Status WriteRecordBatch(const RecordBatch& batch, int64_t buffer_start_offset, - io::OutputStream* dst, int32_t* metadata_length, int64_t* body_length, - MemoryPool* pool, int max_recursion_depth, bool allow_64bit) { - RecordBatchSerializer writer( - pool, buffer_start_offset, max_recursion_depth, allow_64bit); + io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length, MemoryPool* pool, int max_recursion_depth, + bool allow_64bit) { + RecordBatchSerializer writer(pool, buffer_start_offset, max_recursion_depth, + allow_64bit); return writer.Write(batch, dst, metadata_length, body_length); } Status WriteLargeRecordBatch(const RecordBatch& batch, int64_t buffer_start_offset, - io::OutputStream* dst, int32_t* metadata_length, int64_t* body_length, - MemoryPool* pool) { + io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length, MemoryPool* pool) { return WriteRecordBatch(batch, buffer_start_offset, dst, metadata_length, body_length, - pool, kMaxNestingDepth, true); + pool, kMaxNestingDepth, true); } Status WriteTensor(const Tensor& tensor, io::OutputStream* dst, int32_t* metadata_length, - int64_t* body_length) { + int64_t* body_length) { if (!tensor.is_contiguous()) { return Status::Invalid("No support yet for writing non-contiguous tensors"); } @@ -556,8 +570,8 @@ Status WriteTensor(const Tensor& tensor, io::OutputStream* dst, int32_t* metadat } Status WriteDictionary(int64_t dictionary_id, const std::shared_ptr& dictionary, - int64_t buffer_start_offset, io::OutputStream* dst, int32_t* metadata_length, - int64_t* body_length, MemoryPool* pool) { + int64_t buffer_start_offset, io::OutputStream* dst, + int32_t* metadata_length, int64_t* body_length, MemoryPool* pool) { DictionaryWriter writer(pool, buffer_start_offset, kMaxNestingDepth, false); return writer.Write(dictionary_id, dictionary, dst, metadata_length, body_length); } @@ -568,7 +582,7 @@ Status GetRecordBatchSize(const RecordBatch& batch, int64_t* size) { int64_t body_length = 0; io::MockOutputStream dst; RETURN_NOT_OK(WriteRecordBatch(batch, 0, &dst, &metadata_length, &body_length, - default_memory_pool(), kMaxNestingDepth, true)); + default_memory_pool(), kMaxNestingDepth, true)); *size = dst.GetExtentBytesWritten(); return Status::OK(); } @@ -632,7 +646,9 @@ class RecordBatchStreamWriter::RecordBatchStreamWriterImpl { } Status CheckStarted() { - if (!started_) { return Start(); } + if (!started_) { + return Start(); + } return Status::OK(); } @@ -653,7 +669,7 @@ class RecordBatchStreamWriter::RecordBatchStreamWriterImpl { // Frame of reference in file format is 0, see ARROW-384 const int64_t buffer_start_offset = 0; RETURN_NOT_OK(WriteDictionary(entry.first, entry.second, buffer_start_offset, sink_, - &block->metadata_length, &block->body_length, pool_)); + &block->metadata_length, &block->body_length, pool_)); RETURN_NOT_OK(UpdatePosition()); DCHECK(position_ % 8 == 0) << "WriteDictionary did not perform aligned writes"; } @@ -668,9 +684,9 @@ class RecordBatchStreamWriter::RecordBatchStreamWriterImpl { // Frame of reference in file format is 0, see ARROW-384 const int64_t buffer_start_offset = 0; - RETURN_NOT_OK(arrow::ipc::WriteRecordBatch(batch, buffer_start_offset, sink_, - &block->metadata_length, &block->body_length, pool_, kMaxNestingDepth, - allow_64bit)); + RETURN_NOT_OK(arrow::ipc::WriteRecordBatch( + batch, buffer_start_offset, sink_, &block->metadata_length, &block->body_length, + pool_, kMaxNestingDepth, allow_64bit)); RETURN_NOT_OK(UpdatePosition()); DCHECK(position_ % 8 == 0) << "WriteRecordBatch did not perform aligned writes"; @@ -681,15 +697,17 @@ class RecordBatchStreamWriter::RecordBatchStreamWriterImpl { Status WriteRecordBatch(const RecordBatch& batch, bool allow_64bit) { // Push an empty FileBlock. Can be written in the footer later record_batches_.push_back({0, 0, 0}); - return WriteRecordBatch( - batch, allow_64bit, &record_batches_[record_batches_.size() - 1]); + return WriteRecordBatch(batch, allow_64bit, + &record_batches_[record_batches_.size() - 1]); } // Adds padding bytes if necessary to ensure all memory blocks are written on // 64-byte (or other alignment) boundaries. Status Align(int64_t alignment = kArrowAlignment) { int64_t remainder = PaddedLength(position_, alignment) - position_; - if (remainder > 0) { return Write(kPaddingBytes, remainder); } + if (remainder > 0) { + return Write(kPaddingBytes, remainder); + } return Status::OK(); } @@ -725,8 +743,8 @@ RecordBatchStreamWriter::RecordBatchStreamWriter() { RecordBatchStreamWriter::~RecordBatchStreamWriter() {} -Status RecordBatchStreamWriter::WriteRecordBatch( - const RecordBatch& batch, bool allow_64bit) { +Status RecordBatchStreamWriter::WriteRecordBatch(const RecordBatch& batch, + bool allow_64bit) { return impl_->WriteRecordBatch(batch, allow_64bit); } @@ -735,16 +753,14 @@ void RecordBatchStreamWriter::set_memory_pool(MemoryPool* pool) { } Status RecordBatchStreamWriter::Open(io::OutputStream* sink, - const std::shared_ptr& schema, - std::shared_ptr* out) { + const std::shared_ptr& schema, + std::shared_ptr* out) { // ctor is private *out = std::shared_ptr(new RecordBatchStreamWriter()); return (*out)->impl_->Open(sink, schema); } -Status RecordBatchStreamWriter::Close() { - return impl_->Close(); -} +Status RecordBatchStreamWriter::Close() { return impl_->Close(); } // ---------------------------------------------------------------------- // File writer implementation @@ -756,8 +772,8 @@ class RecordBatchFileWriter::RecordBatchFileWriterImpl Status Start() override { // It is only necessary to align to 8-byte boundary at the start of the file - RETURN_NOT_OK(Write( - reinterpret_cast(kArrowMagicBytes), strlen(kArrowMagicBytes))); + RETURN_NOT_OK(Write(reinterpret_cast(kArrowMagicBytes), + strlen(kArrowMagicBytes))); RETURN_NOT_OK(Align(8)); // We write the schema at the start of the file (and the end). This also @@ -768,21 +784,23 @@ class RecordBatchFileWriter::RecordBatchFileWriterImpl Status Close() override { // Write metadata int64_t initial_position = position_; - RETURN_NOT_OK(WriteFileFooter( - *schema_, dictionaries_, record_batches_, &dictionary_memo_, sink_)); + RETURN_NOT_OK(WriteFileFooter(*schema_, dictionaries_, record_batches_, + &dictionary_memo_, sink_)); RETURN_NOT_OK(UpdatePosition()); // Write footer length int32_t footer_length = static_cast(position_ - initial_position); - if (footer_length <= 0) { return Status::Invalid("Invalid file footer"); } + if (footer_length <= 0) { + return Status::Invalid("Invalid file footer"); + } RETURN_NOT_OK( Write(reinterpret_cast(&footer_length), sizeof(int32_t))); // Write magic bytes to end file - return Write( - reinterpret_cast(kArrowMagicBytes), strlen(kArrowMagicBytes)); + return Write(reinterpret_cast(kArrowMagicBytes), + strlen(kArrowMagicBytes)); } }; @@ -793,20 +811,19 @@ RecordBatchFileWriter::RecordBatchFileWriter() { RecordBatchFileWriter::~RecordBatchFileWriter() {} Status RecordBatchFileWriter::Open(io::OutputStream* sink, - const std::shared_ptr& schema, std::shared_ptr* out) { + const std::shared_ptr& schema, + std::shared_ptr* out) { *out = std::shared_ptr( new RecordBatchFileWriter()); // ctor is private return (*out)->impl_->Open(sink, schema); } -Status RecordBatchFileWriter::WriteRecordBatch( - const RecordBatch& batch, bool allow_64bit) { +Status RecordBatchFileWriter::WriteRecordBatch(const RecordBatch& batch, + bool allow_64bit) { return impl_->WriteRecordBatch(batch, allow_64bit); } -Status RecordBatchFileWriter::Close() { - return impl_->Close(); -} +Status RecordBatchFileWriter::Close() { return impl_->Close(); } } // namespace ipc } // namespace arrow diff --git a/cpp/src/arrow/ipc/writer.h b/cpp/src/arrow/ipc/writer.h index 899a1b2cc1e30..c28dfe0afbb11 100644 --- a/cpp/src/arrow/ipc/writer.h +++ b/cpp/src/arrow/ipc/writer.h @@ -85,7 +85,7 @@ class ARROW_EXPORT RecordBatchStreamWriter : public RecordBatchWriter { /// \param(out) out the created stream writer /// \return Status indicating success or failure static Status Open(io::OutputStream* sink, const std::shared_ptr& schema, - std::shared_ptr* out); + std::shared_ptr* out); Status WriteRecordBatch(const RecordBatch& batch, bool allow_64bit = false) override; Status Close() override; @@ -113,7 +113,7 @@ class ARROW_EXPORT RecordBatchFileWriter : public RecordBatchStreamWriter { /// \param(out) out the created stream writer /// \return Status indicating success or failure static Status Open(io::OutputStream* sink, const std::shared_ptr& schema, - std::shared_ptr* out); + std::shared_ptr* out); Status WriteRecordBatch(const RecordBatch& batch, bool allow_64bit = false) override; Status Close() override; @@ -145,14 +145,16 @@ class ARROW_EXPORT RecordBatchFileWriter : public RecordBatchStreamWriter { /// \param(out) body_length: the size of the contiguous buffer block plus /// padding bytes Status ARROW_EXPORT WriteRecordBatch(const RecordBatch& batch, - int64_t buffer_start_offset, io::OutputStream* dst, int32_t* metadata_length, - int64_t* body_length, MemoryPool* pool, int max_recursion_depth = kMaxNestingDepth, - bool allow_64bit = false); + int64_t buffer_start_offset, io::OutputStream* dst, + int32_t* metadata_length, int64_t* body_length, + MemoryPool* pool, + int max_recursion_depth = kMaxNestingDepth, + bool allow_64bit = false); // Write Array as a DictionaryBatch message Status WriteDictionary(int64_t dictionary_id, const std::shared_ptr& dictionary, - int64_t buffer_start_offset, io::OutputStream* dst, int32_t* metadata_length, - int64_t* body_length, MemoryPool* pool); + int64_t buffer_start_offset, io::OutputStream* dst, + int32_t* metadata_length, int64_t* body_length, MemoryPool* pool); // Compute the precise number of bytes needed in a contiguous memory segment to // write the record batch. This involves generating the complete serialized @@ -166,13 +168,14 @@ Status ARROW_EXPORT GetTensorSize(const Tensor& tensor, int64_t* size); /// EXPERIMENTAL: Write RecordBatch allowing lengths over INT32_MAX. This data /// may not be readable by all Arrow implementations Status ARROW_EXPORT WriteLargeRecordBatch(const RecordBatch& batch, - int64_t buffer_start_offset, io::OutputStream* dst, int32_t* metadata_length, - int64_t* body_length, MemoryPool* pool); + int64_t buffer_start_offset, + io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length, MemoryPool* pool); /// EXPERIMENTAL: Write arrow::Tensor as a contiguous message /// Status ARROW_EXPORT WriteTensor(const Tensor& tensor, io::OutputStream* dst, - int32_t* metadata_length, int64_t* body_length); + int32_t* metadata_length, int64_t* body_length); /// Backwards-compatibility for Arrow < 0.4.0 /// diff --git a/cpp/src/arrow/memory_pool-test.cc b/cpp/src/arrow/memory_pool-test.cc index 8a185abca71cc..52e48dbefab9e 100644 --- a/cpp/src/arrow/memory_pool-test.cc +++ b/cpp/src/arrow/memory_pool-test.cc @@ -27,9 +27,7 @@ class TestDefaultMemoryPool : public ::arrow::test::TestMemoryPoolBase { ::arrow::MemoryPool* memory_pool() override { return ::arrow::default_memory_pool(); } }; -TEST_F(TestDefaultMemoryPool, MemoryTracking) { - this->TestMemoryTracking(); -} +TEST_F(TestDefaultMemoryPool, MemoryTracking) { this->TestMemoryTracking(); } TEST_F(TestDefaultMemoryPool, OOM) { #ifndef ADDRESS_SANITIZER @@ -37,9 +35,7 @@ TEST_F(TestDefaultMemoryPool, OOM) { #endif } -TEST_F(TestDefaultMemoryPool, Reallocate) { - this->TestReallocate(); -} +TEST_F(TestDefaultMemoryPool, Reallocate) { this->TestReallocate(); } // Death tests and valgrind are known to not play well 100% of the time. See // googletest documentation @@ -53,7 +49,7 @@ TEST(DefaultMemoryPoolDeathTest, FreeLargeMemory) { #ifndef NDEBUG EXPECT_EXIT(pool->Free(data, 120), ::testing::ExitedWithCode(1), - ".*Check failed: \\(bytes_allocated_\\) >= \\(size\\)"); + ".*Check failed: \\(bytes_allocated_\\) >= \\(size\\)"); #endif pool->Free(data, 100); diff --git a/cpp/src/arrow/memory_pool.cc b/cpp/src/arrow/memory_pool.cc index e7de5c4fc589a..769fc1037ee80 100644 --- a/cpp/src/arrow/memory_pool.cc +++ b/cpp/src/arrow/memory_pool.cc @@ -17,12 +17,12 @@ #include "arrow/memory_pool.h" +#include #include #include #include #include #include -#include #include "arrow/status.h" #include "arrow/util/logging.h" @@ -60,8 +60,8 @@ Status AllocateAligned(int64_t size, uint8_t** out) { return Status::OutOfMemory(ss.str()); } #else - const int result = posix_memalign( - reinterpret_cast(out), kAlignment, static_cast(size)); + const int result = posix_memalign(reinterpret_cast(out), kAlignment, + static_cast(size)); if (result == ENOMEM) { std::stringstream ss; ss << "malloc of size " << size << " failed"; @@ -82,13 +82,9 @@ MemoryPool::MemoryPool() {} MemoryPool::~MemoryPool() {} -int64_t MemoryPool::max_memory() const { - return -1; -} +int64_t MemoryPool::max_memory() const { return -1; } -DefaultMemoryPool::DefaultMemoryPool() : bytes_allocated_(0) { - max_memory_ = 0; -} +DefaultMemoryPool::DefaultMemoryPool() : bytes_allocated_(0) { max_memory_ = 0; } Status DefaultMemoryPool::Allocate(int64_t size, uint8_t** out) { RETURN_NOT_OK(AllocateAligned(size, out)); @@ -96,7 +92,9 @@ Status DefaultMemoryPool::Allocate(int64_t size, uint8_t** out) { { std::lock_guard guard(lock_); - if (bytes_allocated_ > max_memory_) { max_memory_ = bytes_allocated_.load(); } + if (bytes_allocated_ > max_memory_) { + max_memory_ = bytes_allocated_.load(); + } } return Status::OK(); } @@ -128,15 +126,15 @@ Status DefaultMemoryPool::Reallocate(int64_t old_size, int64_t new_size, uint8_t bytes_allocated_ += new_size - old_size; { std::lock_guard guard(lock_); - if (bytes_allocated_ > max_memory_) { max_memory_ = bytes_allocated_.load(); } + if (bytes_allocated_ > max_memory_) { + max_memory_ = bytes_allocated_.load(); + } } return Status::OK(); } -int64_t DefaultMemoryPool::bytes_allocated() const { - return bytes_allocated_.load(); -} +int64_t DefaultMemoryPool::bytes_allocated() const { return bytes_allocated_.load(); } void DefaultMemoryPool::Free(uint8_t* buffer, int64_t size) { DCHECK_GE(bytes_allocated_, size); @@ -150,9 +148,7 @@ void DefaultMemoryPool::Free(uint8_t* buffer, int64_t size) { bytes_allocated_ -= size; } -int64_t DefaultMemoryPool::max_memory() const { - return max_memory_.load(); -} +int64_t DefaultMemoryPool::max_memory() const { return max_memory_.load(); } DefaultMemoryPool::~DefaultMemoryPool() {} diff --git a/cpp/src/arrow/pretty_print-test.cc b/cpp/src/arrow/pretty_print-test.cc index 10a91f5e4e461..049f5a58a6841 100644 --- a/cpp/src/arrow/pretty_print-test.cc +++ b/cpp/src/arrow/pretty_print-test.cc @@ -57,7 +57,7 @@ void CheckArray(const Array& arr, int indent, const char* expected) { template void CheckPrimitive(int indent, const std::vector& is_valid, - const std::vector& values, const char* expected) { + const std::vector& values, const char* expected) { std::shared_ptr array; ArrayFromVector(is_valid, values, &array); CheckArray(*array, indent, expected); diff --git a/cpp/src/arrow/pretty_print.cc b/cpp/src/arrow/pretty_print.cc index 93f6ff0f363f0..aedad1228dfb2 100644 --- a/cpp/src/arrow/pretty_print.cc +++ b/cpp/src/arrow/pretty_print.cc @@ -42,7 +42,9 @@ class ArrayPrinter { const T& array) { const auto data = array.raw_values(); for (int i = 0; i < array.length(); ++i) { - if (i > 0) { (*sink_) << ", "; } + if (i > 0) { + (*sink_) << ", "; + } if (array.IsNull(i)) { (*sink_) << "null"; } else { @@ -56,7 +58,9 @@ class ArrayPrinter { const T& array) { const auto data = array.raw_values(); for (int i = 0; i < array.length(); ++i) { - if (i > 0) { (*sink_) << ", "; } + if (i > 0) { + (*sink_) << ", "; + } if (array.IsNull(i)) { Write("null"); } else { @@ -71,7 +75,9 @@ class ArrayPrinter { WriteDataValues(const T& array) { int32_t length; for (int i = 0; i < array.length(); ++i) { - if (i > 0) { (*sink_) << ", "; } + if (i > 0) { + (*sink_) << ", "; + } if (array.IsNull(i)) { Write("null"); } else { @@ -87,7 +93,9 @@ class ArrayPrinter { WriteDataValues(const T& array) { int32_t length; for (int i = 0; i < array.length(); ++i) { - if (i > 0) { (*sink_) << ", "; } + if (i > 0) { + (*sink_) << ", "; + } if (array.IsNull(i)) { Write("null"); } else { @@ -102,7 +110,9 @@ class ArrayPrinter { WriteDataValues(const T& array) { int32_t width = array.byte_width(); for (int i = 0; i < array.length(); ++i) { - if (i > 0) { (*sink_) << ", "; } + if (i > 0) { + (*sink_) << ", "; + } if (array.IsNull(i)) { Write("null"); } else { @@ -116,7 +126,9 @@ class ArrayPrinter { inline typename std::enable_if::value, void>::type WriteDataValues(const T& array) { for (int i = 0; i < array.length(); ++i) { - if (i > 0) { (*sink_) << ", "; } + if (i > 0) { + (*sink_) << ", "; + } if (array.IsNull(i)) { Write("null"); } else { @@ -138,7 +150,7 @@ class ArrayPrinter { typename std::enable_if::value || std::is_base_of::value || std::is_base_of::value, - Status>::type + Status>::type Visit(const T& array) { OpenArray(); WriteDataValues(array); @@ -157,8 +169,8 @@ class ArrayPrinter { Newline(); Write("-- value_offsets: "); - Int32Array value_offsets( - array.length() + 1, array.value_offsets(), nullptr, 0, array.offset()); + Int32Array value_offsets(array.length() + 1, array.value_offsets(), nullptr, 0, + array.offset()); RETURN_NOT_OK(PrettyPrint(value_offsets, indent_ + 2, sink_)); Newline(); @@ -170,8 +182,8 @@ class ArrayPrinter { return Status::OK(); } - Status PrintChildren( - const std::vector>& fields, int64_t offset, int64_t length) { + Status PrintChildren(const std::vector>& fields, int64_t offset, + int64_t length) { for (size_t i = 0; i < fields.size(); ++i) { Newline(); std::stringstream ss; @@ -179,7 +191,9 @@ class ArrayPrinter { Write(ss.str()); std::shared_ptr field = fields[i]; - if (offset != 0) { field = field->Slice(offset, length); } + if (offset != 0) { + field = field->Slice(offset, length); + } RETURN_NOT_OK(PrettyPrint(*field, indent_ + 2, sink_)); } @@ -207,8 +221,8 @@ class ArrayPrinter { if (array.mode() == UnionMode::DENSE) { Newline(); Write("-- value_offsets: "); - Int32Array value_offsets( - array.length(), array.value_offsets(), nullptr, 0, array.offset()); + Int32Array value_offsets(array.length(), array.value_offsets(), nullptr, 0, + array.offset()); RETURN_NOT_OK(PrettyPrint(value_offsets, indent_ + 2, sink_)); } @@ -247,8 +261,8 @@ Status ArrayPrinter::WriteValidityBitmap(const Array& array) { Write("-- is_valid: "); if (array.null_count() > 0) { - BooleanArray is_valid( - array.length(), array.null_bitmap(), nullptr, 0, array.offset()); + BooleanArray is_valid(array.length(), array.null_bitmap(), nullptr, 0, + array.offset()); return PrettyPrint(is_valid, indent_ + 2, sink_); } else { Write("all not null"); @@ -256,20 +270,12 @@ Status ArrayPrinter::WriteValidityBitmap(const Array& array) { } } -void ArrayPrinter::OpenArray() { - (*sink_) << "["; -} -void ArrayPrinter::CloseArray() { - (*sink_) << "]"; -} +void ArrayPrinter::OpenArray() { (*sink_) << "["; } +void ArrayPrinter::CloseArray() { (*sink_) << "]"; } -void ArrayPrinter::Write(const char* data) { - (*sink_) << data; -} +void ArrayPrinter::Write(const char* data) { (*sink_) << data; } -void ArrayPrinter::Write(const std::string& data) { - (*sink_) << data; -} +void ArrayPrinter::Write(const std::string& data) { (*sink_) << data; } void ArrayPrinter::Newline() { (*sink_) << "\n"; diff --git a/cpp/src/arrow/python/arrow_to_pandas.cc b/cpp/src/arrow/python/arrow_to_pandas.cc index d40609fe3fad2..462bdb7b7d744 100644 --- a/cpp/src/arrow/python/arrow_to_pandas.cc +++ b/cpp/src/arrow/python/arrow_to_pandas.cc @@ -147,8 +147,8 @@ static inline PyArray_Descr* GetSafeNumPyDtype(int type) { return PyArray_DescrFromType(type); } } -static inline PyObject* NewArray1DFromType( - DataType* arrow_type, int type, int64_t length, void* data) { +static inline PyObject* NewArray1DFromType(DataType* arrow_type, int type, int64_t length, + void* data) { npy_intp dims[1] = {length}; PyArray_Descr* descr = GetSafeNumPyDtype(type); @@ -159,7 +159,8 @@ static inline PyObject* NewArray1DFromType( set_numpy_metadata(type, arrow_type, descr); return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, nullptr, data, - NPY_ARRAY_OWNDATA | NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEABLE, nullptr); + NPY_ARRAY_OWNDATA | NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEABLE, + nullptr); } class PandasBlock { @@ -188,7 +189,7 @@ class PandasBlock { virtual Status Allocate() = 0; virtual Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) = 0; + int64_t rel_placement) = 0; PyObject* block_arr() const { return block_arr_.obj(); } @@ -408,7 +409,9 @@ inline Status ConvertFixedSizeBinary(const ChunkedArray& data, PyObject** out_va inline Status ConvertStruct(const ChunkedArray& data, PyObject** out_values) { PyAcquireGIL lock; - if (data.num_chunks() <= 0) { return Status::OK(); } + if (data.num_chunks() <= 0) { + return Status::OK(); + } // ChunkedArray has at least one chunk auto arr = static_cast(data.chunk(0).get()); // Use it to cache the struct type and number of fields for all chunks @@ -467,8 +470,8 @@ inline Status ConvertStruct(const ChunkedArray& data, PyObject** out_values) { } template -inline Status ConvertListsLike( - const std::shared_ptr& col, PyObject** out_values) { +inline Status ConvertListsLike(const std::shared_ptr& col, + PyObject** out_values) { const ChunkedArray& data = *col->data().get(); auto list_type = std::static_pointer_cast(col->type()); @@ -532,8 +535,8 @@ inline void ConvertNumericNullable(const ChunkedArray& data, T na_value, T* out_ } template -inline void ConvertNumericNullableCast( - const ChunkedArray& data, OutType na_value, OutType* out_values) { +inline void ConvertNumericNullableCast(const ChunkedArray& data, OutType na_value, + OutType* out_values) { for (int c = 0; c < data.num_chunks(); c++) { const std::shared_ptr arr = data.chunk(c); auto prim_arr = static_cast(arr.get()); @@ -602,8 +605,8 @@ Status ValidateDecimalPrecision(int precision) { } template -Status RawDecimalToString( - const uint8_t* bytes, int precision, int scale, std::string* result) { +Status RawDecimalToString(const uint8_t* bytes, int precision, int scale, + std::string* result) { DCHECK_NE(bytes, nullptr); DCHECK_NE(result, nullptr); RETURN_NOT_OK(ValidateDecimalPrecision(precision)); @@ -613,13 +616,13 @@ Status RawDecimalToString( return Status::OK(); } -template Status RawDecimalToString( - const uint8_t*, int, int, std::string* result); -template Status RawDecimalToString( - const uint8_t*, int, int, std::string* result); +template Status RawDecimalToString(const uint8_t*, int, int, + std::string* result); +template Status RawDecimalToString(const uint8_t*, int, int, + std::string* result); Status RawDecimalToString(const uint8_t* bytes, int precision, int scale, - bool is_negative, std::string* result) { + bool is_negative, std::string* result) { DCHECK_NE(bytes, nullptr); DCHECK_NE(result, nullptr); RETURN_NOT_OK(ValidateDecimalPrecision(precision)); @@ -684,7 +687,7 @@ class ObjectBlock : public PandasBlock { Status Allocate() override { return AllocateNDArray(NPY_OBJECT); } Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) override { + int64_t rel_placement) override { Type::type type = col->type()->id(); PyObject** out_buffer = @@ -753,7 +756,7 @@ class IntBlock : public PandasBlock { } Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) override { + int64_t rel_placement) override { Type::type type = col->type()->id(); C_TYPE* out_buffer = @@ -789,7 +792,7 @@ class Float32Block : public PandasBlock { Status Allocate() override { return AllocateNDArray(NPY_FLOAT32); } Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) override { + int64_t rel_placement) override { Type::type type = col->type()->id(); if (type != Type::FLOAT) { @@ -813,7 +816,7 @@ class Float64Block : public PandasBlock { Status Allocate() override { return AllocateNDArray(NPY_FLOAT64); } Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) override { + int64_t rel_placement) override { Type::type type = col->type()->id(); double* out_buffer = @@ -868,7 +871,7 @@ class BoolBlock : public PandasBlock { Status Allocate() override { return AllocateNDArray(NPY_BOOL); } Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) override { + int64_t rel_placement) override { Type::type type = col->type()->id(); if (type != Type::BOOL) { @@ -903,7 +906,7 @@ class DatetimeBlock : public PandasBlock { Status Allocate() override { return AllocateDatetime(2); } Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) override { + int64_t rel_placement) override { Type::type type = col->type()->id(); int64_t* out_buffer = @@ -981,14 +984,14 @@ class CategoricalBlock : public PandasBlock { constexpr int npy_type = arrow_traits::npy_type; if (!(npy_type == NPY_INT8 || npy_type == NPY_INT16 || npy_type == NPY_INT32 || - npy_type == NPY_INT64)) { + npy_type == NPY_INT64)) { return Status::Invalid("Category indices must be signed integers"); } return AllocateNDArray(npy_type, 1); } Status Write(const std::shared_ptr& col, int64_t abs_placement, - int64_t rel_placement) override { + int64_t rel_placement) override { using T = typename arrow_traits::T; T* out_values = reinterpret_cast(block_data_) + rel_placement * num_rows_; @@ -1036,7 +1039,7 @@ class CategoricalBlock : public PandasBlock { }; Status MakeBlock(PandasBlock::type type, int64_t num_rows, int num_columns, - std::shared_ptr* block) { + std::shared_ptr* block) { #define BLOCK_CASE(NAME, TYPE) \ case PandasBlock::NAME: \ *block = std::make_shared(num_rows, num_columns); \ @@ -1066,7 +1069,8 @@ Status MakeBlock(PandasBlock::type type, int64_t num_rows, int num_columns, } static inline Status MakeCategoricalBlock(const std::shared_ptr& type, - int64_t num_rows, std::shared_ptr* block) { + int64_t num_rows, + std::shared_ptr* block) { // All categoricals become a block with a single column auto dict_type = static_cast(type.get()); switch (dict_type->index_type()->id()) { @@ -1259,7 +1263,9 @@ class DataFrameBlockCreator { block = it->second; } else { auto it = this->blocks_.find(output_type); - if (it == this->blocks_.end()) { return Status::KeyError("No block allocated"); } + if (it == this->blocks_.end()) { + return Status::KeyError("No block allocated"); + } block = it->second; } return block->Write(col, i, rel_placement); @@ -1286,7 +1292,9 @@ class DataFrameBlockCreator { int column_num; while (!error_occurred) { column_num = task_counter.fetch_add(1); - if (column_num >= this->table_->num_columns()) { break; } + if (column_num >= this->table_->num_columns()) { + break; + } Status s = WriteColumn(column_num); if (!s.ok()) { std::lock_guard lock(error_mtx); @@ -1301,7 +1309,9 @@ class DataFrameBlockCreator { thread.join(); } - if (error_occurred) { return error; } + if (error_occurred) { + return error; + } } return Status::OK(); } @@ -1310,7 +1320,9 @@ class DataFrameBlockCreator { for (const auto& it : blocks) { PyObject* item; RETURN_NOT_OK(it.second->GetPyResult(&item)); - if (PyList_Append(list, item) < 0) { RETURN_IF_PYERROR(); } + if (PyList_Append(list, item) < 0) { + RETURN_IF_PYERROR(); + } // ARROW-1017; PyList_Append increments object refcount Py_DECREF(item); @@ -1432,7 +1444,7 @@ class ArrowDeserializer { template typename std::enable_if::value || std::is_base_of::value, - Status>::type + Status>::type Visit(const Type& type) { constexpr int TYPE = Type::type_id; using traits = arrow_traits; @@ -1603,22 +1615,22 @@ class ArrowDeserializer { PyObject* result_; }; -Status ConvertArrayToPandas( - const std::shared_ptr& arr, PyObject* py_ref, PyObject** out) { +Status ConvertArrayToPandas(const std::shared_ptr& arr, PyObject* py_ref, + PyObject** out) { static std::string dummy_name = "dummy"; auto field = std::make_shared(dummy_name, arr->type()); auto col = std::make_shared(field, arr); return ConvertColumnToPandas(col, py_ref, out); } -Status ConvertColumnToPandas( - const std::shared_ptr& col, PyObject* py_ref, PyObject** out) { +Status ConvertColumnToPandas(const std::shared_ptr& col, PyObject* py_ref, + PyObject** out) { ArrowDeserializer converter(col, py_ref); return converter.Convert(out); } -Status ConvertTableToPandas( - const std::shared_ptr& table, int nthreads, PyObject** out) { +Status ConvertTableToPandas(const std::shared_ptr
& table, int nthreads, + PyObject** out) { DataFrameBlockCreator helper(table); return helper.Convert(nthreads, out); } diff --git a/cpp/src/arrow/python/arrow_to_pandas.h b/cpp/src/arrow/python/arrow_to_pandas.h index c606dcbbe0aa7..5a99274a33ee0 100644 --- a/cpp/src/arrow/python/arrow_to_pandas.h +++ b/cpp/src/arrow/python/arrow_to_pandas.h @@ -40,12 +40,12 @@ class Table; namespace py { ARROW_EXPORT -Status ConvertArrayToPandas( - const std::shared_ptr& arr, PyObject* py_ref, PyObject** out); +Status ConvertArrayToPandas(const std::shared_ptr& arr, PyObject* py_ref, + PyObject** out); ARROW_EXPORT -Status ConvertColumnToPandas( - const std::shared_ptr& col, PyObject* py_ref, PyObject** out); +Status ConvertColumnToPandas(const std::shared_ptr& col, PyObject* py_ref, + PyObject** out); struct PandasOptions { bool strings_to_categorical; @@ -58,8 +58,8 @@ struct PandasOptions { // // tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2]) ARROW_EXPORT -Status ConvertTableToPandas( - const std::shared_ptr
& table, int nthreads, PyObject** out); +Status ConvertTableToPandas(const std::shared_ptr
& table, int nthreads, + PyObject** out); } // namespace py } // namespace arrow diff --git a/cpp/src/arrow/python/builtin_convert.cc b/cpp/src/arrow/python/builtin_convert.cc index a76b6ba25531c..6eaa37fb8ca93 100644 --- a/cpp/src/arrow/python/builtin_convert.cc +++ b/cpp/src/arrow/python/builtin_convert.cc @@ -44,8 +44,8 @@ static inline bool IsPyInteger(PyObject* obj) { #endif } -Status InvalidConversion( - PyObject* obj, const std::string& expected_types, std::ostream* out) { +Status InvalidConversion(PyObject* obj, const std::string& expected_types, + std::ostream* out) { OwnedRef type(PyObject_Type(obj)); RETURN_IF_PYERROR(); DCHECK_NE(type.obj(), nullptr); @@ -161,7 +161,9 @@ class SeqVisitor { // co-recursive with VisitElem Status Visit(PyObject* obj, int level = 0) { - if (level > max_nesting_level_) { max_nesting_level_ = level; } + if (level > max_nesting_level_) { + max_nesting_level_ = level; + } // Loop through either a sequence or an iterator. if (PySequence_Check(obj)) { Py_ssize_t size = PySequence_Size(obj); @@ -226,7 +228,9 @@ class SeqVisitor { int max_observed_level() const { int result = 0; for (int i = 0; i < MAX_NESTING_LEVELS; ++i) { - if (nesting_histogram_[i] > 0) { result = i; } + if (nesting_histogram_[i] > 0) { + result = i; + } } return result; } @@ -235,7 +239,9 @@ class SeqVisitor { int num_nesting_levels() const { int result = 0; for (int i = 0; i < MAX_NESTING_LEVELS; ++i) { - if (nesting_histogram_[i] > 0) { ++result; } + if (nesting_histogram_[i] > 0) { + ++result; + } } return result; } @@ -300,13 +306,15 @@ Status InferArrowType(PyObject* obj, std::shared_ptr* out_type) { RETURN_NOT_OK(seq_visitor.Validate()); *out_type = seq_visitor.GetType(); - if (*out_type == nullptr) { return Status::TypeError("Unable to determine data type"); } + if (*out_type == nullptr) { + return Status::TypeError("Unable to determine data type"); + } return Status::OK(); } -Status InferArrowTypeAndSize( - PyObject* obj, int64_t* size, std::shared_ptr* out_type) { +Status InferArrowTypeAndSize(PyObject* obj, int64_t* size, + std::shared_ptr* out_type) { RETURN_NOT_OK(InferArrowSize(obj, size)); // For 0-length sequences, refuse to guess @@ -372,7 +380,9 @@ class TypedConverterVisitor : public TypedConverter { RETURN_NOT_OK(static_cast(this)->AppendItem(ref)); ++i; } - if (size != i) { RETURN_NOT_OK(this->typed_builder_->Resize(i)); } + if (size != i) { + RETURN_NOT_OK(this->typed_builder_->Resize(i)); + } } else { return Status::TypeError("Object is not a sequence or iterable"); } @@ -487,8 +497,9 @@ class FixedWidthBytesConverter inline Status AppendItem(const OwnedRef& item) { PyObject* bytes_obj; OwnedRef tmp; - Py_ssize_t expected_length = std::dynamic_pointer_cast( - typed_builder_->type())->byte_width(); + Py_ssize_t expected_length = + std::dynamic_pointer_cast(typed_builder_->type()) + ->byte_width(); if (item.obj() == Py_None) { RETURN_NOT_OK(typed_builder_->AppendNull()); return Status::OK(); @@ -636,7 +647,7 @@ Status ListConverter::Init(ArrayBuilder* builder) { } Status AppendPySequence(PyObject* obj, int64_t size, - const std::shared_ptr& type, ArrayBuilder* builder) { + const std::shared_ptr& type, ArrayBuilder* builder) { PyDateTime_IMPORT; std::shared_ptr converter = GetConverter(type); if (converter == nullptr) { @@ -656,7 +667,7 @@ Status ConvertPySequence(PyObject* obj, MemoryPool* pool, std::shared_ptr } Status ConvertPySequence(PyObject* obj, MemoryPool* pool, std::shared_ptr* out, - const std::shared_ptr& type, int64_t size) { + const std::shared_ptr& type, int64_t size) { // Handle NA / NullType case if (type->id() == Type::NA) { out->reset(new NullArray(size)); @@ -671,7 +682,7 @@ Status ConvertPySequence(PyObject* obj, MemoryPool* pool, std::shared_ptr } Status ConvertPySequence(PyObject* obj, MemoryPool* pool, std::shared_ptr* out, - const std::shared_ptr& type) { + const std::shared_ptr& type) { int64_t size; RETURN_NOT_OK(InferArrowSize(obj, &size)); return ConvertPySequence(obj, pool, out, type, size); diff --git a/cpp/src/arrow/python/builtin_convert.h b/cpp/src/arrow/python/builtin_convert.h index 4f84fbb7caca9..cde7a1bd4cfdc 100644 --- a/cpp/src/arrow/python/builtin_convert.h +++ b/cpp/src/arrow/python/builtin_convert.h @@ -39,14 +39,15 @@ class Status; namespace py { -ARROW_EXPORT arrow::Status InferArrowType( - PyObject* obj, std::shared_ptr* out_type); +ARROW_EXPORT arrow::Status InferArrowType(PyObject* obj, + std::shared_ptr* out_type); ARROW_EXPORT arrow::Status InferArrowTypeAndSize( PyObject* obj, int64_t* size, std::shared_ptr* out_type); ARROW_EXPORT arrow::Status InferArrowSize(PyObject* obj, int64_t* size); ARROW_EXPORT arrow::Status AppendPySequence(PyObject* obj, int64_t size, - const std::shared_ptr& type, arrow::ArrayBuilder* builder); + const std::shared_ptr& type, + arrow::ArrayBuilder* builder); // Type and size inference ARROW_EXPORT @@ -55,19 +56,19 @@ Status ConvertPySequence(PyObject* obj, MemoryPool* pool, std::shared_ptr // Size inference ARROW_EXPORT Status ConvertPySequence(PyObject* obj, MemoryPool* pool, std::shared_ptr* out, - const std::shared_ptr& type); + const std::shared_ptr& type); // No inference ARROW_EXPORT Status ConvertPySequence(PyObject* obj, MemoryPool* pool, std::shared_ptr* out, - const std::shared_ptr& type, int64_t size); + const std::shared_ptr& type, int64_t size); ARROW_EXPORT -Status InvalidConversion( - PyObject* obj, const std::string& expected_type_name, std::ostream* out); +Status InvalidConversion(PyObject* obj, const std::string& expected_type_name, + std::ostream* out); -ARROW_EXPORT Status CheckPythonBytesAreFixedLength( - PyObject* obj, Py_ssize_t expected_length); +ARROW_EXPORT Status CheckPythonBytesAreFixedLength(PyObject* obj, + Py_ssize_t expected_length); } // namespace py } // namespace arrow diff --git a/cpp/src/arrow/python/config.cc b/cpp/src/arrow/python/config.cc index 3cec7c41a2f31..92ca9db9cc391 100644 --- a/cpp/src/arrow/python/config.cc +++ b/cpp/src/arrow/python/config.cc @@ -16,8 +16,6 @@ // under the License. #include "arrow/python/platform.h" -#include - #include "arrow/python/config.h" namespace arrow { diff --git a/cpp/src/arrow/python/helpers.cc b/cpp/src/arrow/python/helpers.cc index 76ec3a1ba8746..164e42e52e48e 100644 --- a/cpp/src/arrow/python/helpers.cc +++ b/cpp/src/arrow/python/helpers.cc @@ -89,8 +89,8 @@ Status PythonDecimalToString(PyObject* python_decimal, std::string* out) { return Status::OK(); } -Status InferDecimalPrecisionAndScale( - PyObject* python_decimal, int* precision, int* scale) { +Status InferDecimalPrecisionAndScale(PyObject* python_decimal, int* precision, + int* scale) { // Call Python's str(decimal_object) OwnedRef str_obj(PyObject_Str(python_decimal)); RETURN_IF_PYERROR(); @@ -102,12 +102,12 @@ Status InferDecimalPrecisionAndScale( auto size = str.size; std::string c_string(bytes, size); - return FromString( - c_string, static_cast(nullptr), precision, scale); + return FromString(c_string, static_cast(nullptr), precision, + scale); } -Status DecimalFromString( - PyObject* decimal_constructor, const std::string& decimal_string, PyObject** out) { +Status DecimalFromString(PyObject* decimal_constructor, const std::string& decimal_string, + PyObject** out) { DCHECK_NE(decimal_constructor, nullptr); DCHECK_NE(out, nullptr); @@ -117,8 +117,8 @@ Status DecimalFromString( auto string_bytes = decimal_string.c_str(); DCHECK_NE(string_bytes, nullptr); - *out = PyObject_CallFunction( - decimal_constructor, const_cast("s#"), string_bytes, string_size); + *out = PyObject_CallFunction(decimal_constructor, const_cast("s#"), string_bytes, + string_size); RETURN_IF_PYERROR(); return Status::OK(); } diff --git a/cpp/src/arrow/python/helpers.h b/cpp/src/arrow/python/helpers.h index e0656699ce4c2..8b8c6673c8ebb 100644 --- a/cpp/src/arrow/python/helpers.h +++ b/cpp/src/arrow/python/helpers.h @@ -36,16 +36,17 @@ class OwnedRef; ARROW_EXPORT std::shared_ptr GetPrimitiveType(Type::type type); Status ARROW_EXPORT ImportModule(const std::string& module_name, OwnedRef* ref); -Status ARROW_EXPORT ImportFromModule( - const OwnedRef& module, const std::string& module_name, OwnedRef* ref); +Status ARROW_EXPORT ImportFromModule(const OwnedRef& module, + const std::string& module_name, OwnedRef* ref); Status ARROW_EXPORT PythonDecimalToString(PyObject* python_decimal, std::string* out); -Status ARROW_EXPORT InferDecimalPrecisionAndScale( - PyObject* python_decimal, int* precision = nullptr, int* scale = nullptr); +Status ARROW_EXPORT InferDecimalPrecisionAndScale(PyObject* python_decimal, + int* precision = nullptr, + int* scale = nullptr); -Status ARROW_EXPORT DecimalFromString( - PyObject* decimal_constructor, const std::string& decimal_string, PyObject** out); +Status ARROW_EXPORT DecimalFromString(PyObject* decimal_constructor, + const std::string& decimal_string, PyObject** out); } // namespace py } // namespace arrow diff --git a/cpp/src/arrow/python/init.cc b/cpp/src/arrow/python/init.cc index db648915465a8..dba293bbe2366 100644 --- a/cpp/src/arrow/python/init.cc +++ b/cpp/src/arrow/python/init.cc @@ -21,6 +21,4 @@ #include "arrow/python/init.h" #include "arrow/python/numpy_interop.h" -int arrow_init_numpy() { - return arrow::py::import_numpy(); -} +int arrow_init_numpy() { return arrow::py::import_numpy(); } diff --git a/cpp/src/arrow/python/io.cc b/cpp/src/arrow/python/io.cc index a7193854c4d01..4c73fd6401cb6 100644 --- a/cpp/src/arrow/python/io.cc +++ b/cpp/src/arrow/python/io.cc @@ -33,23 +33,19 @@ namespace py { // ---------------------------------------------------------------------- // Python file -PythonFile::PythonFile(PyObject* file) : file_(file) { - Py_INCREF(file_); -} +PythonFile::PythonFile(PyObject* file) : file_(file) { Py_INCREF(file_); } -PythonFile::~PythonFile() { - Py_DECREF(file_); -} +PythonFile::~PythonFile() { Py_DECREF(file_); } // This is annoying: because C++11 does not allow implicit conversion of string // literals to non-const char*, we need to go through some gymnastics to use // PyObject_CallMethod without a lot of pain (its arguments are non-const // char*) template -static inline PyObject* cpp_PyObject_CallMethod( - PyObject* obj, const char* method_name, const char* argspec, ArgTypes... args) { - return PyObject_CallMethod( - obj, const_cast(method_name), const_cast(argspec), args...); +static inline PyObject* cpp_PyObject_CallMethod(PyObject* obj, const char* method_name, + const char* argspec, ArgTypes... args) { + return PyObject_CallMethod(obj, const_cast(method_name), + const_cast(argspec), args...); } Status PythonFile::Close() { @@ -103,9 +99,7 @@ Status PythonFile::Tell(int64_t* position) { // ---------------------------------------------------------------------- // Seekable input stream -PyReadableFile::PyReadableFile(PyObject* file) { - file_.reset(new PythonFile(file)); -} +PyReadableFile::PyReadableFile(PyObject* file) { file_.reset(new PythonFile(file)); } PyReadableFile::~PyReadableFile() {} @@ -167,9 +161,7 @@ Status PyReadableFile::GetSize(int64_t* size) { return Status::OK(); } -bool PyReadableFile::supports_zero_copy() const { - return false; -} +bool PyReadableFile::supports_zero_copy() const { return false; } // ---------------------------------------------------------------------- // Output stream diff --git a/cpp/src/arrow/python/numpy_convert.cc b/cpp/src/arrow/python/numpy_convert.cc index c391b5d7a1018..95d63b8fecb5b 100644 --- a/cpp/src/arrow/python/numpy_convert.cc +++ b/cpp/src/arrow/python/numpy_convert.cc @@ -38,7 +38,7 @@ namespace py { bool is_contiguous(PyObject* array) { if (PyArray_Check(array)) { return (PyArray_FLAGS(reinterpret_cast(array)) & - (NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS)) != 0; + (NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS)) != 0; } else { return false; } @@ -49,8 +49,12 @@ int cast_npy_type_compat(int type_num) { // U/LONGLONG to U/INT64 so things work properly. #if (NPY_INT64 == NPY_LONGLONG) && (NPY_SIZEOF_LONGLONG == 8) - if (type_num == NPY_LONGLONG) { type_num = NPY_INT64; } - if (type_num == NPY_ULONGLONG) { type_num = NPY_UINT64; } + if (type_num == NPY_LONGLONG) { + type_num = NPY_INT64; + } + if (type_num == NPY_ULONGLONG) { + type_num = NPY_UINT64; + } #endif return type_num; @@ -66,13 +70,13 @@ NumPyBuffer::NumPyBuffer(PyObject* ao) : Buffer(nullptr, 0) { size_ = PyArray_SIZE(ndarray) * PyArray_DESCR(ndarray)->elsize; capacity_ = size_; - if (PyArray_FLAGS(ndarray) & NPY_ARRAY_WRITEABLE) { is_mutable_ = true; } + if (PyArray_FLAGS(ndarray) & NPY_ARRAY_WRITEABLE) { + is_mutable_ = true; + } } } -NumPyBuffer::~NumPyBuffer() { - Py_XDECREF(arr_); -} +NumPyBuffer::~NumPyBuffer() { Py_XDECREF(arr_); } #define TO_ARROW_TYPE_CASE(NPY_NAME, FACTORY) \ case NPY_##NPY_NAME: \ @@ -198,7 +202,9 @@ Status NumPyDtypeToArrow(PyObject* dtype, std::shared_ptr* out) { #undef TO_ARROW_TYPE_CASE Status NdarrayToTensor(MemoryPool* pool, PyObject* ao, std::shared_ptr* out) { - if (!PyArray_Check(ao)) { return Status::TypeError("Did not pass ndarray object"); } + if (!PyArray_Check(ao)) { + return Status::TypeError("Did not pass ndarray object"); + } PyArrayObject* ndarray = reinterpret_cast(ao); @@ -242,18 +248,27 @@ Status TensorToNdarray(const Tensor& tensor, PyObject* base, PyObject** out) { } const void* immutable_data = nullptr; - if (tensor.data()) { immutable_data = tensor.data()->data(); } + if (tensor.data()) { + immutable_data = tensor.data()->data(); + } // Remove const =( void* mutable_data = const_cast(immutable_data); int array_flags = 0; - if (tensor.is_row_major()) { array_flags |= NPY_ARRAY_C_CONTIGUOUS; } - if (tensor.is_column_major()) { array_flags |= NPY_ARRAY_F_CONTIGUOUS; } - if (tensor.is_mutable()) { array_flags |= NPY_ARRAY_WRITEABLE; } + if (tensor.is_row_major()) { + array_flags |= NPY_ARRAY_C_CONTIGUOUS; + } + if (tensor.is_column_major()) { + array_flags |= NPY_ARRAY_F_CONTIGUOUS; + } + if (tensor.is_mutable()) { + array_flags |= NPY_ARRAY_WRITEABLE; + } - PyObject* result = PyArray_NewFromDescr(&PyArray_Type, dtype, tensor.ndim(), - npy_shape.data(), npy_strides.data(), mutable_data, array_flags, nullptr); + PyObject* result = + PyArray_NewFromDescr(&PyArray_Type, dtype, tensor.ndim(), npy_shape.data(), + npy_strides.data(), mutable_data, array_flags, nullptr); RETURN_IF_PYERROR() if (base != Py_None) { diff --git a/cpp/src/arrow/python/numpy_convert.h b/cpp/src/arrow/python/numpy_convert.h index a486646cdec64..7b3b3b7c9a2a0 100644 --- a/cpp/src/arrow/python/numpy_convert.h +++ b/cpp/src/arrow/python/numpy_convert.h @@ -63,8 +63,8 @@ Status GetTensorType(PyObject* dtype, std::shared_ptr* out); ARROW_EXPORT Status GetNumPyType(const DataType& type, int* type_num); -ARROW_EXPORT Status NdarrayToTensor( - MemoryPool* pool, PyObject* ao, std::shared_ptr* out); +ARROW_EXPORT Status NdarrayToTensor(MemoryPool* pool, PyObject* ao, + std::shared_ptr* out); ARROW_EXPORT Status TensorToNdarray(const Tensor& tensor, PyObject* base, PyObject** out); diff --git a/cpp/src/arrow/python/pandas_to_arrow.cc b/cpp/src/arrow/python/pandas_to_arrow.cc index 1368c3605a4e3..be5634b53bbfe 100644 --- a/cpp/src/arrow/python/pandas_to_arrow.cc +++ b/cpp/src/arrow/python/pandas_to_arrow.cc @@ -75,9 +75,7 @@ static inline bool PyObject_is_string(const PyObject* obj) { #endif } -static inline bool PyObject_is_float(const PyObject* obj) { - return PyFloat_Check(obj); -} +static inline bool PyObject_is_float(const PyObject* obj) { return PyFloat_Check(obj); } static inline bool PyObject_is_integer(const PyObject* obj) { return (!PyBool_Check(obj)) && PyArray_IsIntegerScalar(obj); @@ -120,8 +118,8 @@ static int64_t MaskToBitmap(PyArrayObject* mask, int64_t length, uint8_t* bitmap } template -static int64_t ValuesToValidBytes( - const void* data, int64_t length, uint8_t* valid_bytes) { +static int64_t ValuesToValidBytes(const void* data, int64_t length, + uint8_t* valid_bytes) { typedef npy_traits traits; typedef typename traits::value_type T; @@ -163,7 +161,8 @@ constexpr int64_t kBinaryMemoryLimit = std::numeric_limits::max(); /// be length of arr if fully consumed /// \param[out] have_bytes true if we encountered any PyBytes object static Status AppendObjectStrings(PyArrayObject* arr, PyArrayObject* mask, int64_t offset, - StringBuilder* builder, int64_t* end_offset, bool* have_bytes) { + StringBuilder* builder, int64_t* end_offset, + bool* have_bytes) { PyObject* obj; Ndarray1DIndexer objects(arr); @@ -210,8 +209,9 @@ static Status AppendObjectStrings(PyArrayObject* arr, PyArrayObject* mask, int64 } static Status AppendObjectFixedWidthBytes(PyArrayObject* arr, PyArrayObject* mask, - int byte_width, int64_t offset, FixedSizeBinaryBuilder* builder, - int64_t* end_offset) { + int byte_width, int64_t offset, + FixedSizeBinaryBuilder* builder, + int64_t* end_offset) { PyObject* obj; Ndarray1DIndexer objects(arr); @@ -245,8 +245,8 @@ static Status AppendObjectFixedWidthBytes(PyArrayObject* arr, PyArrayObject* mas } RETURN_NOT_OK(CheckPythonBytesAreFixedLength(obj, byte_width)); - if (ARROW_PREDICT_FALSE( - builder->value_data_length() + byte_width > kBinaryMemoryLimit)) { + if (ARROW_PREDICT_FALSE(builder->value_data_length() + byte_width > + kBinaryMemoryLimit)) { break; } RETURN_NOT_OK( @@ -263,13 +263,15 @@ static Status AppendObjectFixedWidthBytes(PyArrayObject* arr, PyArrayObject* mas class PandasConverter { public: - PandasConverter( - MemoryPool* pool, PyObject* ao, PyObject* mo, const std::shared_ptr& type) + PandasConverter(MemoryPool* pool, PyObject* ao, PyObject* mo, + const std::shared_ptr& type) : pool_(pool), type_(type), arr_(reinterpret_cast(ao)), mask_(nullptr) { - if (mo != nullptr && mo != Py_None) { mask_ = reinterpret_cast(mo); } + if (mo != nullptr && mo != Py_None) { + mask_ = reinterpret_cast(mo); + } length_ = static_cast(PyArray_SIZE(arr_)); } @@ -315,7 +317,9 @@ class PandasConverter { Status VisitNative() { using traits = arrow_traits; - if (mask_ != nullptr || traits::supports_nulls) { RETURN_NOT_OK(InitNullBitmap()); } + if (mask_ != nullptr || traits::supports_nulls) { + RETURN_NOT_OK(InitNullBitmap()); + } std::shared_ptr data; RETURN_NOT_OK(ConvertData(&data)); @@ -337,7 +341,7 @@ class PandasConverter { template typename std::enable_if::value || std::is_same::value, - Status>::type + Status>::type Visit(const T& type) { return VisitNative(); } @@ -373,7 +377,9 @@ class PandasConverter { return Status::Invalid("only handle 1-dimensional arrays"); } - if (type_ == nullptr) { return Status::Invalid("Must pass data type"); } + if (type_ == nullptr) { + return Status::Invalid("Must pass data type"); + } // Visit the type to perform conversion return VisitTypeInline(*type_, this); @@ -385,8 +391,8 @@ class PandasConverter { // Conversion logic for various object dtype arrays template - Status ConvertTypedLists( - const std::shared_ptr& type, ListBuilder* builder, PyObject* list); + Status ConvertTypedLists(const std::shared_ptr& type, ListBuilder* builder, + PyObject* list); template Status ConvertDates(); @@ -397,8 +403,8 @@ class PandasConverter { Status ConvertObjectFixedWidthBytes(const std::shared_ptr& type); Status ConvertObjectIntegers(); Status ConvertLists(const std::shared_ptr& type); - Status ConvertLists( - const std::shared_ptr& type, ListBuilder* builder, PyObject* list); + Status ConvertLists(const std::shared_ptr& type, ListBuilder* builder, + PyObject* list); Status ConvertObjects(); Status ConvertDecimals(); Status ConvertTimes(); @@ -428,12 +434,14 @@ void CopyStrided(T* input_data, int64_t length, int64_t stride, T* output_data) } template <> -void CopyStrided( - PyObject** input_data, int64_t length, int64_t stride, PyObject** output_data) { +void CopyStrided(PyObject** input_data, int64_t length, int64_t stride, + PyObject** output_data) { int64_t j = 0; for (int64_t i = 0; i < length; ++i) { output_data[i] = input_data[j]; - if (output_data[i] != nullptr) { Py_INCREF(output_data[i]); } + if (output_data[i] != nullptr) { + Py_INCREF(output_data[i]); + } j += stride; } } @@ -458,7 +466,7 @@ inline Status PandasConverter::ConvertData(std::shared_ptr* data) { auto new_buffer = std::make_shared(pool_); RETURN_NOT_OK(new_buffer->Resize(sizeof(T) * length_)); CopyStrided(reinterpret_cast(PyArray_DATA(arr_)), length_, stride_elements, - reinterpret_cast(new_buffer->mutable_data())); + reinterpret_cast(new_buffer->mutable_data())); *data = new_buffer; } else { // Can zero-copy @@ -479,7 +487,9 @@ inline Status PandasConverter::ConvertData(std::shared_ptr* memset(bitmap, 0, nbytes); for (int i = 0; i < length_; ++i) { - if (values[i] > 0) { BitUtil::SetBit(bitmap, i); } + if (values[i] > 0) { + BitUtil::SetBit(bitmap, i); + } } *data = buffer; @@ -913,8 +923,8 @@ Status LoopPySequence(PyObject* sequence, T func) { } template -inline Status PandasConverter::ConvertTypedLists( - const std::shared_ptr& type, ListBuilder* builder, PyObject* list) { +inline Status PandasConverter::ConvertTypedLists(const std::shared_ptr& type, + ListBuilder* builder, PyObject* list) { typedef npy_traits traits; typedef typename traits::value_type T; typedef typename traits::BuilderClass BuilderT; @@ -1002,8 +1012,8 @@ inline Status PandasConverter::ConvertTypedLists( RETURN_NOT_OK(CheckFlatNumpyArray(numpy_array, NPY_OBJECT)); int64_t offset = 0; - RETURN_NOT_OK(AppendObjectStrings( - numpy_array, nullptr, 0, value_builder, &offset, &have_bytes)); + RETURN_NOT_OK(AppendObjectStrings(numpy_array, nullptr, 0, value_builder, &offset, + &have_bytes)); if (offset < PyArray_SIZE(numpy_array)) { return Status::Invalid("Array cell value exceeded 2GB"); } @@ -1032,8 +1042,8 @@ inline Status PandasConverter::ConvertTypedLists( return ConvertTypedLists(type, builder, list); \ } -Status PandasConverter::ConvertLists( - const std::shared_ptr& type, ListBuilder* builder, PyObject* list) { +Status PandasConverter::ConvertLists(const std::shared_ptr& type, + ListBuilder* builder, PyObject* list) { switch (type->id()) { LIST_CASE(UINT8, NPY_UINT8, UInt8Type) LIST_CASE(INT8, NPY_INT8, Int8Type) @@ -1080,7 +1090,7 @@ Status PandasConverter::ConvertLists(const std::shared_ptr& type) { } Status PandasToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, - const std::shared_ptr& type, std::shared_ptr* out) { + const std::shared_ptr& type, std::shared_ptr* out) { PandasConverter converter(pool, ao, mo, type); RETURN_NOT_OK(converter.Convert()); *out = converter.result()[0]; @@ -1088,7 +1098,8 @@ Status PandasToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, } Status PandasObjectsToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, - const std::shared_ptr& type, std::shared_ptr* out) { + const std::shared_ptr& type, + std::shared_ptr* out) { PandasConverter converter(pool, ao, mo, type); RETURN_NOT_OK(converter.ConvertObjects()); *out = std::make_shared(converter.result()); diff --git a/cpp/src/arrow/python/pandas_to_arrow.h b/cpp/src/arrow/python/pandas_to_arrow.h index 8f1862470bc94..3e655ba3feec0 100644 --- a/cpp/src/arrow/python/pandas_to_arrow.h +++ b/cpp/src/arrow/python/pandas_to_arrow.h @@ -38,7 +38,7 @@ namespace py { ARROW_EXPORT Status PandasToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, - const std::shared_ptr& type, std::shared_ptr* out); + const std::shared_ptr& type, std::shared_ptr* out); /// Convert dtype=object arrays. If target data type is not known, pass a type /// with nullptr @@ -50,7 +50,8 @@ Status PandasToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, /// \param[out] out a ChunkedArray, to accommodate chunked output ARROW_EXPORT Status PandasObjectsToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, - const std::shared_ptr& type, std::shared_ptr* out); + const std::shared_ptr& type, + std::shared_ptr* out); } // namespace py } // namespace arrow diff --git a/cpp/src/arrow/python/platform.h b/cpp/src/arrow/python/platform.h index a354b38f04cea..ae394695fac0d 100644 --- a/cpp/src/arrow/python/platform.h +++ b/cpp/src/arrow/python/platform.h @@ -23,6 +23,7 @@ #include #include +#include // Work around C2528 error #if _MSC_VER >= 1900 diff --git a/cpp/src/arrow/python/pyarrow.cc b/cpp/src/arrow/python/pyarrow.cc index 5d88051117b78..d080cc0a8147f 100644 --- a/cpp/src/arrow/python/pyarrow.cc +++ b/cpp/src/arrow/python/pyarrow.cc @@ -31,13 +31,9 @@ namespace { namespace arrow { namespace py { -int import_pyarrow() { - return ::import_pyarrow__lib(); -} +int import_pyarrow() { return ::import_pyarrow__lib(); } -bool is_buffer(PyObject* buffer) { - return ::pyarrow_is_buffer(buffer) != 0; -} +bool is_buffer(PyObject* buffer) { return ::pyarrow_is_buffer(buffer) != 0; } Status unwrap_buffer(PyObject* buffer, std::shared_ptr* out) { *out = ::pyarrow_unwrap_buffer(buffer); @@ -52,9 +48,7 @@ PyObject* wrap_buffer(const std::shared_ptr& buffer) { return ::pyarrow_wrap_buffer(buffer); } -bool is_data_type(PyObject* data_type) { - return ::pyarrow_is_data_type(data_type) != 0; -} +bool is_data_type(PyObject* data_type) { return ::pyarrow_is_data_type(data_type) != 0; } Status unwrap_data_type(PyObject* object, std::shared_ptr* out) { *out = ::pyarrow_unwrap_data_type(object); @@ -69,9 +63,7 @@ PyObject* wrap_data_type(const std::shared_ptr& type) { return ::pyarrow_wrap_data_type(type); } -bool is_field(PyObject* field) { - return ::pyarrow_is_field(field) != 0; -} +bool is_field(PyObject* field) { return ::pyarrow_is_field(field) != 0; } Status unwrap_field(PyObject* field, std::shared_ptr* out) { *out = ::pyarrow_unwrap_field(field); @@ -86,9 +78,7 @@ PyObject* wrap_field(const std::shared_ptr& field) { return ::pyarrow_wrap_field(field); } -bool is_schema(PyObject* schema) { - return ::pyarrow_is_schema(schema) != 0; -} +bool is_schema(PyObject* schema) { return ::pyarrow_is_schema(schema) != 0; } Status unwrap_schema(PyObject* schema, std::shared_ptr* out) { *out = ::pyarrow_unwrap_schema(schema); @@ -103,9 +93,7 @@ PyObject* wrap_schema(const std::shared_ptr& schema) { return ::pyarrow_wrap_schema(schema); } -bool is_array(PyObject* array) { - return ::pyarrow_is_array(array) != 0; -} +bool is_array(PyObject* array) { return ::pyarrow_is_array(array) != 0; } Status unwrap_array(PyObject* array, std::shared_ptr* out) { *out = ::pyarrow_unwrap_array(array); @@ -120,9 +108,7 @@ PyObject* wrap_array(const std::shared_ptr& array) { return ::pyarrow_wrap_array(array); } -bool is_tensor(PyObject* tensor) { - return ::pyarrow_is_tensor(tensor) != 0; -} +bool is_tensor(PyObject* tensor) { return ::pyarrow_is_tensor(tensor) != 0; } Status unwrap_tensor(PyObject* tensor, std::shared_ptr* out) { *out = ::pyarrow_unwrap_tensor(tensor); @@ -137,9 +123,7 @@ PyObject* wrap_tensor(const std::shared_ptr& tensor) { return ::pyarrow_wrap_tensor(tensor); } -bool is_column(PyObject* column) { - return ::pyarrow_is_column(column) != 0; -} +bool is_column(PyObject* column) { return ::pyarrow_is_column(column) != 0; } Status unwrap_column(PyObject* column, std::shared_ptr* out) { *out = ::pyarrow_unwrap_column(column); @@ -154,9 +138,7 @@ PyObject* wrap_column(const std::shared_ptr& column) { return ::pyarrow_wrap_column(column); } -bool is_table(PyObject* table) { - return ::pyarrow_is_table(table) != 0; -} +bool is_table(PyObject* table) { return ::pyarrow_is_table(table) != 0; } Status unwrap_table(PyObject* table, std::shared_ptr
* out) { *out = ::pyarrow_unwrap_table(table); @@ -171,9 +153,7 @@ PyObject* wrap_table(const std::shared_ptr
& table) { return ::pyarrow_wrap_table(table); } -bool is_record_batch(PyObject* batch) { - return ::pyarrow_is_batch(batch) != 0; -} +bool is_record_batch(PyObject* batch) { return ::pyarrow_is_batch(batch) != 0; } Status unwrap_record_batch(PyObject* batch, std::shared_ptr* out) { *out = ::pyarrow_unwrap_batch(batch); diff --git a/cpp/src/arrow/python/pyarrow.h b/cpp/src/arrow/python/pyarrow.h index 7278d1c285785..e637627006177 100644 --- a/cpp/src/arrow/python/pyarrow.h +++ b/cpp/src/arrow/python/pyarrow.h @@ -74,8 +74,8 @@ ARROW_EXPORT Status unwrap_table(PyObject* table, std::shared_ptr
* out); ARROW_EXPORT PyObject* wrap_table(const std::shared_ptr
& table); ARROW_EXPORT bool is_record_batch(PyObject* batch); -ARROW_EXPORT Status unwrap_record_batch( - PyObject* batch, std::shared_ptr* out); +ARROW_EXPORT Status unwrap_record_batch(PyObject* batch, + std::shared_ptr* out); ARROW_EXPORT PyObject* wrap_record_batch(const std::shared_ptr& batch); } // namespace py diff --git a/cpp/src/arrow/python/python-test.cc b/cpp/src/arrow/python/python-test.cc index c0e555d4904d5..b50699d1ae9d4 100644 --- a/cpp/src/arrow/python/python-test.cc +++ b/cpp/src/arrow/python/python-test.cc @@ -36,9 +36,7 @@ namespace arrow { namespace py { -TEST(PyBuffer, InvalidInputObject) { - PyBuffer buffer(Py_None); -} +TEST(PyBuffer, InvalidInputObject) { PyBuffer buffer(Py_None); } TEST(DecimalTest, TestPythonDecimalToString) { PyAcquireGIL lock; @@ -58,8 +56,8 @@ TEST(DecimalTest, TestPythonDecimalToString) { auto c_string_size = decimal_string.size(); ASSERT_GT(c_string_size, 0); - OwnedRef pydecimal(PyObject_CallFunction( - Decimal.obj(), const_cast(format), c_string, c_string_size)); + OwnedRef pydecimal(PyObject_CallFunction(Decimal.obj(), const_cast(format), + c_string, c_string_size)); ASSERT_NE(pydecimal.obj(), nullptr); ASSERT_EQ(PyErr_Occurred(), nullptr); @@ -88,7 +86,8 @@ TEST(PandasConversionTest, TestObjectBlockWriteFails) { auto f3 = field("f3", utf8()); std::vector> fields = {f1, f2, f3}; std::vector> cols = {std::make_shared(f1, arr), - std::make_shared(f2, arr), std::make_shared(f3, arr)}; + std::make_shared(f2, arr), + std::make_shared(f3, arr)}; auto schema = std::make_shared(fields); auto table = std::make_shared
(schema, cols); diff --git a/cpp/src/arrow/python/util/datetime.h b/cpp/src/arrow/python/util/datetime.h index d32421e8e3652..de7515101518a 100644 --- a/cpp/src/arrow/python/util/datetime.h +++ b/cpp/src/arrow/python/util/datetime.h @@ -18,8 +18,8 @@ #ifndef PYARROW_UTIL_DATETIME_H #define PYARROW_UTIL_DATETIME_H -#include "arrow/python/platform.h" #include +#include "arrow/python/platform.h" namespace arrow { namespace py { @@ -31,8 +31,8 @@ static inline int64_t PyTime_to_us(PyObject* pytime) { PyDateTime_TIME_GET_MICROSECOND(pytime)); } -static inline Status PyTime_from_int( - int64_t val, const TimeUnit::type unit, PyObject** out) { +static inline Status PyTime_from_int(int64_t val, const TimeUnit::type unit, + PyObject** out) { int64_t hour = 0, minute = 0, second = 0, microsecond = 0; switch (unit) { case TimeUnit::NANO: @@ -65,7 +65,7 @@ static inline Status PyTime_from_int( break; } *out = PyTime_FromTime(static_cast(hour), static_cast(minute), - static_cast(second), static_cast(microsecond)); + static_cast(second), static_cast(microsecond)); return Status::OK(); } diff --git a/cpp/src/arrow/status.cc b/cpp/src/arrow/status.cc index 99897428eaed3..9b509b4835126 100644 --- a/cpp/src/arrow/status.cc +++ b/cpp/src/arrow/status.cc @@ -33,7 +33,9 @@ void Status::CopyFrom(const State* state) { } std::string Status::CodeAsString() const { - if (state_ == NULL) { return "OK"; } + if (state_ == NULL) { + return "OK"; + } const char* type; switch (code()) { @@ -70,7 +72,9 @@ std::string Status::CodeAsString() const { std::string Status::ToString() const { std::string result(CodeAsString()); - if (state_ == NULL) { return result; } + if (state_ == NULL) { + return result; + } result += ": "; result += state_->msg; return result; diff --git a/cpp/src/arrow/status.h b/cpp/src/arrow/status.h index 1bea1fca84ebb..a02752f21e4b9 100644 --- a/cpp/src/arrow/status.h +++ b/cpp/src/arrow/status.h @@ -23,10 +23,12 @@ #include "arrow/util/visibility.h" // Return the given status if it is not OK. -#define ARROW_RETURN_NOT_OK(s) \ - do { \ - ::arrow::Status _s = (s); \ - if (ARROW_PREDICT_FALSE(!_s.ok())) { return _s; } \ +#define ARROW_RETURN_NOT_OK(s) \ + do { \ + ::arrow::Status _s = (s); \ + if (ARROW_PREDICT_FALSE(!_s.ok())) { \ + return _s; \ + } \ } while (0) // If 'to_call' returns a bad status, CHECK immediately with a logged message @@ -43,10 +45,12 @@ namespace arrow { -#define RETURN_NOT_OK(s) \ - do { \ - Status _s = (s); \ - if (ARROW_PREDICT_FALSE(!_s.ok())) { return _s; } \ +#define RETURN_NOT_OK(s) \ + do { \ + Status _s = (s); \ + if (ARROW_PREDICT_FALSE(!_s.ok())) { \ + return _s; \ + } \ } while (0) #define RETURN_NOT_OK_ELSE(s, else_) \ @@ -187,7 +191,9 @@ inline Status::Status(const Status& s) inline void Status::operator=(const Status& s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. - if (state_ != s.state_) { CopyFrom(s.state_); } + if (state_ != s.state_) { + CopyFrom(s.state_); + } } } // namespace arrow diff --git a/cpp/src/arrow/table-test.cc b/cpp/src/arrow/table-test.cc index e46fdc77cf761..8dba8c052e922 100644 --- a/cpp/src/arrow/table-test.cc +++ b/cpp/src/arrow/table-test.cc @@ -198,11 +198,11 @@ class TestTable : public TestBase { schema_ = std::make_shared(fields); arrays_ = {MakePrimitive(length), MakePrimitive(length), - MakePrimitive(length)}; + MakePrimitive(length)}; columns_ = {std::make_shared(schema_->field(0), arrays_[0]), - std::make_shared(schema_->field(1), arrays_[1]), - std::make_shared(schema_->field(2), arrays_[2])}; + std::make_shared(schema_->field(1), arrays_[1]), + std::make_shared(schema_->field(2), arrays_[2])}; } protected: @@ -412,8 +412,8 @@ TEST_F(TestTable, AddColumn) { ASSERT_OK(table.AddColumn(0, columns_[0], &result)); auto ex_schema = std::shared_ptr(new Schema( {schema_->field(0), schema_->field(0), schema_->field(1), schema_->field(2)})); - std::vector> ex_columns = { - table.column(0), table.column(0), table.column(1), table.column(2)}; + std::vector> ex_columns = {table.column(0), table.column(0), + table.column(1), table.column(2)}; ASSERT_TRUE(result->Equals(Table(ex_schema, ex_columns))); ASSERT_OK(table.AddColumn(1, columns_[0], &result)); diff --git a/cpp/src/arrow/table.cc b/cpp/src/arrow/table.cc index c09628ed395c4..a0a25079e6ed7 100644 --- a/cpp/src/arrow/table.cc +++ b/cpp/src/arrow/table.cc @@ -43,8 +43,12 @@ ChunkedArray::ChunkedArray(const ArrayVector& chunks) : chunks_(chunks) { } bool ChunkedArray::Equals(const ChunkedArray& other) const { - if (length_ != other.length()) { return false; } - if (null_count_ != other.null_count()) { return false; } + if (length_ != other.length()) { + return false; + } + if (null_count_ != other.null_count()) { + return false; + } // Check contents of the underlying arrays. This checks for equality of // the underlying data independently of the chunk size. @@ -57,10 +61,10 @@ bool ChunkedArray::Equals(const ChunkedArray& other) const { while (elements_compared < length_) { const std::shared_ptr this_array = chunks_[this_chunk_idx]; const std::shared_ptr other_array = other.chunk(other_chunk_idx); - int64_t common_length = std::min( - this_array->length() - this_start_idx, other_array->length() - other_start_idx); + int64_t common_length = std::min(this_array->length() - this_start_idx, + other_array->length() - other_start_idx); if (!this_array->RangeEquals(this_start_idx, this_start_idx + common_length, - other_start_idx, other_array)) { + other_start_idx, other_array)) { return false; } @@ -85,8 +89,12 @@ bool ChunkedArray::Equals(const ChunkedArray& other) const { } bool ChunkedArray::Equals(const std::shared_ptr& other) const { - if (this == other.get()) { return true; } - if (!other) { return false; } + if (this == other.get()) { + return true; + } + if (!other) { + return false; + } return Equals(*other.get()); } @@ -107,18 +115,24 @@ Column::Column(const std::shared_ptr& field, const std::shared_ptr Column::Column(const std::string& name, const std::shared_ptr& data) : Column(::arrow::field(name, data->type()), data) {} -Column::Column( - const std::shared_ptr& field, const std::shared_ptr& data) +Column::Column(const std::shared_ptr& field, + const std::shared_ptr& data) : field_(field), data_(data) {} bool Column::Equals(const Column& other) const { - if (!field_->Equals(other.field())) { return false; } + if (!field_->Equals(other.field())) { + return false; + } return data_->Equals(other.data()); } bool Column::Equals(const std::shared_ptr& other) const { - if (this == other.get()) { return true; } - if (!other) { return false; } + if (this == other.get()) { + return true; + } + if (!other) { + return false; + } return Equals(*other.get()); } @@ -141,11 +155,13 @@ Status Column::ValidateData() { void AssertBatchValid(const RecordBatch& batch) { Status s = batch.Validate(); - if (!s.ok()) { DCHECK(false) << s.ToString(); } + if (!s.ok()) { + DCHECK(false) << s.ToString(); + } } RecordBatch::RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - const std::vector>& columns) + const std::vector>& columns) : schema_(schema), num_rows_(num_rows), columns_(columns.size()) { for (size_t i = 0; i < columns.size(); ++i) { columns_[i] = columns[i]->data(); @@ -153,7 +169,7 @@ RecordBatch::RecordBatch(const std::shared_ptr& schema, int64_t num_rows } RecordBatch::RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - std::vector>&& columns) + std::vector>&& columns) : schema_(schema), num_rows_(num_rows), columns_(columns.size()) { for (size_t i = 0; i < columns.size(); ++i) { columns_[i] = columns[i]->data(); @@ -161,11 +177,11 @@ RecordBatch::RecordBatch(const std::shared_ptr& schema, int64_t num_rows } RecordBatch::RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - std::vector>&& columns) + std::vector>&& columns) : schema_(schema), num_rows_(num_rows), columns_(std::move(columns)) {} RecordBatch::RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - const std::vector>& columns) + const std::vector>& columns) : schema_(schema), num_rows_(num_rows), columns_(columns) {} std::shared_ptr RecordBatch::column(int i) const { @@ -184,7 +200,9 @@ bool RecordBatch::Equals(const RecordBatch& other) const { } for (int i = 0; i < num_columns(); ++i) { - if (!column(i)->Equals(other.column(i))) { return false; } + if (!column(i)->Equals(other.column(i))) { + return false; + } } return true; @@ -196,7 +214,9 @@ bool RecordBatch::ApproxEquals(const RecordBatch& other) const { } for (int i = 0; i < num_columns(); ++i) { - if (!column(i)->ApproxEquals(other.column(i))) { return false; } + if (!column(i)->ApproxEquals(other.column(i))) { + return false; + } } return true; @@ -253,7 +273,7 @@ Status RecordBatch::Validate() const { // Table methods Table::Table(const std::shared_ptr& schema, - const std::vector>& columns) + const std::vector>& columns) : schema_(schema), columns_(columns) { if (columns.size() == 0) { num_rows_ = 0; @@ -263,7 +283,7 @@ Table::Table(const std::shared_ptr& schema, } Table::Table(const std::shared_ptr& schema, - const std::vector>& columns, int64_t num_rows) + const std::vector>& columns, int64_t num_rows) : schema_(schema), columns_(columns), num_rows_(num_rows) {} std::shared_ptr
Table::ReplaceSchemaMetadata( @@ -273,7 +293,7 @@ std::shared_ptr
Table::ReplaceSchemaMetadata( } Status Table::FromRecordBatches(const std::vector>& batches, - std::shared_ptr
* table) { + std::shared_ptr
* table) { if (batches.size() == 0) { return Status::Invalid("Must pass at least one record batch"); } @@ -307,9 +327,11 @@ Status Table::FromRecordBatches(const std::vector>& return Status::OK(); } -Status ConcatenateTables( - const std::vector>& tables, std::shared_ptr
* table) { - if (tables.size() == 0) { return Status::Invalid("Must pass at least one table"); } +Status ConcatenateTables(const std::vector>& tables, + std::shared_ptr
* table) { + if (tables.size() == 0) { + return Status::Invalid("Must pass at least one table"); + } std::shared_ptr schema = tables[0]->schema(); @@ -343,12 +365,20 @@ Status ConcatenateTables( } bool Table::Equals(const Table& other) const { - if (this == &other) { return true; } - if (!schema_->Equals(*other.schema())) { return false; } - if (static_cast(columns_.size()) != other.num_columns()) { return false; } + if (this == &other) { + return true; + } + if (!schema_->Equals(*other.schema())) { + return false; + } + if (static_cast(columns_.size()) != other.num_columns()) { + return false; + } for (int i = 0; i < static_cast(columns_.size()); i++) { - if (!columns_[i]->Equals(other.column(i))) { return false; } + if (!columns_[i]->Equals(other.column(i))) { + return false; + } } return true; } @@ -361,9 +391,11 @@ Status Table::RemoveColumn(int i, std::shared_ptr
* out) const { return Status::OK(); } -Status Table::AddColumn( - int i, const std::shared_ptr& col, std::shared_ptr
* out) const { - if (i < 0 || i > num_columns() + 1) { return Status::Invalid("Invalid column index."); } +Status Table::AddColumn(int i, const std::shared_ptr& col, + std::shared_ptr
* out) const { + if (i < 0 || i > num_columns() + 1) { + return Status::Invalid("Invalid column index."); + } if (col == nullptr) { std::stringstream ss; ss << "Column " << i << " was null"; @@ -407,7 +439,8 @@ Status Table::ValidateColumns() const { } Status ARROW_EXPORT MakeTable(const std::shared_ptr& schema, - const std::vector>& arrays, std::shared_ptr
* table) { + const std::vector>& arrays, + std::shared_ptr
* table) { // Make sure the length of the schema corresponds to the length of the vector if (schema->num_fields() != static_cast(arrays.size())) { std::stringstream ss; diff --git a/cpp/src/arrow/table.h b/cpp/src/arrow/table.h index 7ada0e9709f05..6afd618da043b 100644 --- a/cpp/src/arrow/table.h +++ b/cpp/src/arrow/table.h @@ -121,11 +121,11 @@ class ARROW_EXPORT RecordBatch { /// num_rows RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - const std::vector>& columns); + const std::vector>& columns); /// \brief Deprecated move constructor for a vector of Array instances RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - std::vector>&& columns); + std::vector>&& columns); /// \brief Construct record batch from vector of internal data structures /// \since 0.5.0 @@ -138,12 +138,12 @@ class ARROW_EXPORT RecordBatch { /// should be equal to the length of each field /// \param columns the data for the batch's columns RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - std::vector>&& columns); + std::vector>&& columns); /// \brief Construct record batch by copying vector of array data /// \since 0.5.0 RecordBatch(const std::shared_ptr& schema, int64_t num_rows, - const std::vector>& columns); + const std::vector>& columns); bool Equals(const RecordBatch& other) const; @@ -194,14 +194,14 @@ class ARROW_EXPORT Table { public: // If columns is zero-length, the table's number of rows is zero Table(const std::shared_ptr& schema, - const std::vector>& columns); + const std::vector>& columns); // num_rows is a parameter to allow for tables of a particular size not // having any materialized columns. Each column should therefore have the // same length as num_rows -- you can validate this using // Table::ValidateColumns Table(const std::shared_ptr& schema, - const std::vector>& columns, int64_t num_rows); + const std::vector>& columns, int64_t num_rows); // Construct table from RecordBatch, but only if all of the batch schemas are // equal. Returns Status::Invalid if there is some problem @@ -221,8 +221,8 @@ class ARROW_EXPORT Table { Status RemoveColumn(int i, std::shared_ptr
* out) const; /// Add column to the table, producing a new Table - Status AddColumn( - int i, const std::shared_ptr& column, std::shared_ptr
* out) const; + Status AddColumn(int i, const std::shared_ptr& column, + std::shared_ptr
* out) const; /// \brief Replace schema key-value metadata with new metadata (EXPERIMENTAL) /// \since 0.5.0 @@ -252,11 +252,12 @@ class ARROW_EXPORT Table { // Construct table from multiple input tables. Return Status::Invalid if // schemas are not equal -Status ARROW_EXPORT ConcatenateTables( - const std::vector>& tables, std::shared_ptr
* table); +Status ARROW_EXPORT ConcatenateTables(const std::vector>& tables, + std::shared_ptr
* table); Status ARROW_EXPORT MakeTable(const std::shared_ptr& schema, - const std::vector>& arrays, std::shared_ptr
* table); + const std::vector>& arrays, + std::shared_ptr
* table); } // namespace arrow diff --git a/cpp/src/arrow/tensor.cc b/cpp/src/arrow/tensor.cc index bcd9d8d94c6b4..31b1a359219a6 100644 --- a/cpp/src/arrow/tensor.cc +++ b/cpp/src/arrow/tensor.cc @@ -35,7 +35,8 @@ namespace arrow { static void ComputeRowMajorStrides(const FixedWidthType& type, - const std::vector& shape, std::vector* strides) { + const std::vector& shape, + std::vector* strides) { int64_t remaining = type.bit_width() / 8; for (int64_t dimsize : shape) { remaining *= dimsize; @@ -53,7 +54,8 @@ static void ComputeRowMajorStrides(const FixedWidthType& type, } static void ComputeColumnMajorStrides(const FixedWidthType& type, - const std::vector& shape, std::vector* strides) { + const std::vector& shape, + std::vector* strides) { int64_t total = type.bit_width() / 8; for (int64_t dimsize : shape) { if (dimsize == 0) { @@ -69,8 +71,8 @@ static void ComputeColumnMajorStrides(const FixedWidthType& type, /// Constructor with strides and dimension names Tensor::Tensor(const std::shared_ptr& type, const std::shared_ptr& data, - const std::vector& shape, const std::vector& strides, - const std::vector& dim_names) + const std::vector& shape, const std::vector& strides, + const std::vector& dim_names) : type_(type), data_(data), shape_(shape), strides_(strides), dim_names_(dim_names) { DCHECK(is_tensor_supported(type->id())); if (shape.size() > 0 && strides.size() == 0) { @@ -79,11 +81,11 @@ Tensor::Tensor(const std::shared_ptr& type, const std::shared_ptr& type, const std::shared_ptr& data, - const std::vector& shape, const std::vector& strides) + const std::vector& shape, const std::vector& strides) : Tensor(type, data, shape, strides, {}) {} Tensor::Tensor(const std::shared_ptr& type, const std::shared_ptr& data, - const std::vector& shape) + const std::vector& shape) : Tensor(type, data, shape, {}, {}) {} const std::string& Tensor::dim_name(int i) const { @@ -100,9 +102,7 @@ int64_t Tensor::size() const { return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies()); } -bool Tensor::is_contiguous() const { - return is_row_major() || is_column_major(); -} +bool Tensor::is_contiguous() const { return is_row_major() || is_column_major(); } bool Tensor::is_row_major() const { std::vector c_strides; @@ -118,14 +118,14 @@ bool Tensor::is_column_major() const { return strides_ == f_strides; } -Type::type Tensor::type_id() const { - return type_->id(); -} +Type::type Tensor::type_id() const { return type_->id(); } bool Tensor::Equals(const Tensor& other) const { bool are_equal = false; Status error = TensorEquals(*this, other, &are_equal); - if (!error.ok()) { DCHECK(false) << "Tensors not comparable: " << error.ToString(); } + if (!error.ok()) { + DCHECK(false) << "Tensors not comparable: " << error.ToString(); + } return are_equal; } diff --git a/cpp/src/arrow/tensor.h b/cpp/src/arrow/tensor.h index 371f5911a4396..b074b8c309ba1 100644 --- a/cpp/src/arrow/tensor.h +++ b/cpp/src/arrow/tensor.h @@ -62,16 +62,16 @@ class ARROW_EXPORT Tensor { /// Constructor with no dimension names or strides, data assumed to be row-major Tensor(const std::shared_ptr& type, const std::shared_ptr& data, - const std::vector& shape); + const std::vector& shape); /// Constructor with non-negative strides Tensor(const std::shared_ptr& type, const std::shared_ptr& data, - const std::vector& shape, const std::vector& strides); + const std::vector& shape, const std::vector& strides); /// Constructor with strides and dimension names Tensor(const std::shared_ptr& type, const std::shared_ptr& data, - const std::vector& shape, const std::vector& strides, - const std::vector& dim_names); + const std::vector& shape, const std::vector& strides, + const std::vector& dim_names); std::shared_ptr type() const { return type_; } std::shared_ptr data() const { return data_; } diff --git a/cpp/src/arrow/test-util.h b/cpp/src/arrow/test-util.h index 2bc662526713e..1a3376cee6053 100644 --- a/cpp/src/arrow/test-util.h +++ b/cpp/src/arrow/test-util.h @@ -39,16 +39,20 @@ #include "arrow/util/logging.h" #include "arrow/util/random.h" -#define ASSERT_RAISES(ENUM, expr) \ - do { \ - ::arrow::Status s = (expr); \ - if (!s.Is##ENUM()) { FAIL() << s.ToString(); } \ +#define ASSERT_RAISES(ENUM, expr) \ + do { \ + ::arrow::Status s = (expr); \ + if (!s.Is##ENUM()) { \ + FAIL() << s.ToString(); \ + } \ } while (0) -#define ASSERT_OK(expr) \ - do { \ - ::arrow::Status s = (expr); \ - if (!s.ok()) { FAIL() << s.ToString(); } \ +#define ASSERT_OK(expr) \ + do { \ + ::arrow::Status s = (expr); \ + if (!s.ok()) { \ + FAIL() << s.ToString(); \ + } \ } while (0) #define ASSERT_OK_NO_THROW(expr) ASSERT_NO_THROW(ASSERT_OK(expr)) @@ -59,10 +63,12 @@ EXPECT_TRUE(s.ok()); \ } while (0) -#define ABORT_NOT_OK(s) \ - do { \ - ::arrow::Status _s = (s); \ - if (ARROW_PREDICT_FALSE(!_s.ok())) { exit(-1); } \ +#define ABORT_NOT_OK(s) \ + do { \ + ::arrow::Status _s = (s); \ + if (ARROW_PREDICT_FALSE(!_s.ok())) { \ + exit(-1); \ + } \ } while (0); namespace arrow { @@ -85,8 +91,8 @@ void randint(int64_t N, T lower, T upper, std::vector* out) { } template -void random_real( - int64_t n, uint32_t seed, T min_value, T max_value, std::vector* out) { +void random_real(int64_t n, uint32_t seed, T min_value, T max_value, + std::vector* out) { std::mt19937 gen(seed); std::uniform_real_distribution d(min_value, max_value); for (int64_t i = 0; i < n; ++i) { @@ -96,13 +102,13 @@ void random_real( template std::shared_ptr GetBufferFromVector(const std::vector& values) { - return std::make_shared( - reinterpret_cast(values.data()), values.size() * sizeof(T)); + return std::make_shared(reinterpret_cast(values.data()), + values.size() * sizeof(T)); } template -inline Status CopyBufferFromVector( - const std::vector& values, MemoryPool* pool, std::shared_ptr* result) { +inline Status CopyBufferFromVector(const std::vector& values, MemoryPool* pool, + std::shared_ptr* result) { int64_t nbytes = static_cast(values.size()) * sizeof(T); auto buffer = std::make_shared(pool); @@ -114,8 +120,8 @@ inline Status CopyBufferFromVector( } template -static inline Status GetBitmapFromVector( - const std::vector& is_valid, std::shared_ptr* result) { +static inline Status GetBitmapFromVector(const std::vector& is_valid, + std::shared_ptr* result) { size_t length = is_valid.size(); std::shared_ptr buffer; @@ -123,7 +129,9 @@ static inline Status GetBitmapFromVector( uint8_t* bitmap = buffer->mutable_data(); for (size_t i = 0; i < static_cast(length); ++i) { - if (is_valid[i]) { BitUtil::SetBit(bitmap, i); } + if (is_valid[i]) { + BitUtil::SetBit(bitmap, i); + } } *result = buffer; @@ -139,8 +147,8 @@ static inline void random_null_bytes(int64_t n, double pct_null, uint8_t* null_b } } -static inline void random_is_valid( - int64_t n, double pct_null, std::vector* is_valid) { +static inline void random_is_valid(int64_t n, double pct_null, + std::vector* is_valid) { Random rng(random_seed()); for (int64_t i = 0; i < n; ++i) { is_valid->push_back(rng.NextDoubleFraction() > pct_null); @@ -178,24 +186,28 @@ void rand_uniform_int(int64_t n, uint32_t seed, T min_value, T max_value, T* out static inline int64_t null_count(const std::vector& valid_bytes) { int64_t result = 0; for (size_t i = 0; i < valid_bytes.size(); ++i) { - if (valid_bytes[i] == 0) { ++result; } + if (valid_bytes[i] == 0) { + ++result; + } } return result; } Status MakeRandomInt32PoolBuffer(int64_t length, MemoryPool* pool, - std::shared_ptr* pool_buffer, uint32_t seed = 0) { + std::shared_ptr* pool_buffer, + uint32_t seed = 0) { DCHECK(pool); auto data = std::make_shared(pool); RETURN_NOT_OK(data->Resize(length * sizeof(int32_t))); test::rand_uniform_int(length, seed, 0, std::numeric_limits::max(), - reinterpret_cast(data->mutable_data())); + reinterpret_cast(data->mutable_data())); *pool_buffer = data; return Status::OK(); } Status MakeRandomBytePoolBuffer(int64_t length, MemoryPool* pool, - std::shared_ptr* pool_buffer, uint32_t seed = 0) { + std::shared_ptr* pool_buffer, + uint32_t seed = 0) { auto bytes = std::make_shared(pool); RETURN_NOT_OK(bytes->Resize(length)); test::random_bytes(length, seed, bytes->mutable_data()); @@ -207,8 +219,8 @@ Status MakeRandomBytePoolBuffer(int64_t length, MemoryPool* pool, template void ArrayFromVector(const std::shared_ptr& type, - const std::vector& is_valid, const std::vector& values, - std::shared_ptr* out) { + const std::vector& is_valid, const std::vector& values, + std::shared_ptr* out) { MemoryPool* pool = default_memory_pool(); typename TypeTraits::BuilderType builder(pool, type); for (size_t i = 0; i < values.size(); ++i) { @@ -223,7 +235,7 @@ void ArrayFromVector(const std::shared_ptr& type, template void ArrayFromVector(const std::vector& is_valid, const std::vector& values, - std::shared_ptr* out) { + std::shared_ptr* out) { MemoryPool* pool = default_memory_pool(); typename TypeTraits::BuilderType builder(pool); for (size_t i = 0; i < values.size(); ++i) { @@ -248,7 +260,7 @@ void ArrayFromVector(const std::vector& values, std::shared_ptr* template Status MakeArray(const std::vector& valid_bytes, const std::vector& values, - int64_t size, Builder* builder, std::shared_ptr* out) { + int64_t size, Builder* builder, std::shared_ptr* out) { // Append the first 1000 for (int64_t i = 0; i < size; ++i) { if (valid_bytes[i] > 0) { diff --git a/cpp/src/arrow/type-test.cc b/cpp/src/arrow/type-test.cc index 7f3adef633767..6b86b4d2f1024 100644 --- a/cpp/src/arrow/type-test.cc +++ b/cpp/src/arrow/type-test.cc @@ -345,16 +345,16 @@ TEST(TestTimestampType, ToString) { } TEST(TestNestedType, Equals) { - auto create_struct = []( - std::string inner_name, std::string struct_name) -> shared_ptr { + auto create_struct = [](std::string inner_name, + std::string struct_name) -> shared_ptr { auto f_type = field(inner_name, int32()); vector> fields = {f_type}; auto s_type = std::make_shared(fields); return field(struct_name, s_type); }; - auto create_union = []( - std::string inner_name, std::string union_name) -> shared_ptr { + auto create_union = [](std::string inner_name, + std::string union_name) -> shared_ptr { auto f_type = field(inner_name, int32()); vector> fields = {f_type}; vector codes = {Type::INT32}; diff --git a/cpp/src/arrow/type.cc b/cpp/src/arrow/type.cc index 623c1934f875e..586da2d86d909 100644 --- a/cpp/src/arrow/type.cc +++ b/cpp/src/arrow/type.cc @@ -37,7 +37,7 @@ std::shared_ptr Field::AddMetadata( } Status Field::AddMetadata(const std::shared_ptr& metadata, - std::shared_ptr* out) const { + std::shared_ptr* out) const { *out = AddMetadata(metadata); return Status::OK(); } @@ -47,7 +47,9 @@ std::shared_ptr Field::RemoveMetadata() const { } bool Field::Equals(const Field& other) const { - if (this == &other) { return true; } + if (this == &other) { + return true; + } if (this->name_ == other.name_ && this->nullable_ == other.nullable_ && this->type_->Equals(*other.type_.get())) { if (metadata_ == nullptr && other.metadata_ == nullptr) { @@ -68,7 +70,9 @@ bool Field::Equals(const std::shared_ptr& other) const { std::string Field::ToString() const { std::stringstream ss; ss << this->name_ << ": " << this->type_->ToString(); - if (!this->nullable_) { ss << " not null"; } + if (!this->nullable_) { + ss << " not null"; + } return ss.str(); } @@ -77,34 +81,28 @@ DataType::~DataType() {} bool DataType::Equals(const DataType& other) const { bool are_equal = false; Status error = TypeEquals(*this, other, &are_equal); - if (!error.ok()) { DCHECK(false) << "Types not comparable: " << error.ToString(); } + if (!error.ok()) { + DCHECK(false) << "Types not comparable: " << error.ToString(); + } return are_equal; } bool DataType::Equals(const std::shared_ptr& other) const { - if (!other) { return false; } + if (!other) { + return false; + } return Equals(*other.get()); } -std::string BooleanType::ToString() const { - return name(); -} +std::string BooleanType::ToString() const { return name(); } -FloatingPoint::Precision HalfFloatType::precision() const { - return FloatingPoint::HALF; -} +FloatingPoint::Precision HalfFloatType::precision() const { return FloatingPoint::HALF; } -FloatingPoint::Precision FloatType::precision() const { - return FloatingPoint::SINGLE; -} +FloatingPoint::Precision FloatType::precision() const { return FloatingPoint::SINGLE; } -FloatingPoint::Precision DoubleType::precision() const { - return FloatingPoint::DOUBLE; -} +FloatingPoint::Precision DoubleType::precision() const { return FloatingPoint::DOUBLE; } -std::string StringType::ToString() const { - return std::string("string"); -} +std::string StringType::ToString() const { return std::string("string"); } std::string ListType::ToString() const { std::stringstream s; @@ -112,13 +110,9 @@ std::string ListType::ToString() const { return s.str(); } -std::string BinaryType::ToString() const { - return std::string("binary"); -} +std::string BinaryType::ToString() const { return std::string("binary"); } -int FixedSizeBinaryType::bit_width() const { - return CHAR_BIT * byte_width(); -} +int FixedSizeBinaryType::bit_width() const { return CHAR_BIT * byte_width(); } std::string FixedSizeBinaryType::ToString() const { std::stringstream ss; @@ -130,7 +124,9 @@ std::string StructType::ToString() const { std::stringstream s; s << "struct<"; for (int i = 0; i < this->num_children(); ++i) { - if (i > 0) { s << ", "; } + if (i > 0) { + s << ", "; + } std::shared_ptr field = this->child(i); s << field->name() << ": " << field->type()->ToString(); } @@ -148,13 +144,9 @@ Date32Type::Date32Type() : DateType(Type::DATE32, DateUnit::DAY) {} Date64Type::Date64Type() : DateType(Type::DATE64, DateUnit::MILLI) {} -std::string Date64Type::ToString() const { - return std::string("date64[ms]"); -} +std::string Date64Type::ToString() const { return std::string("date64[ms]"); } -std::string Date32Type::ToString() const { - return std::string("date32[day]"); -} +std::string Date32Type::ToString() const { return std::string("date32[day]"); } // ---------------------------------------------------------------------- // Time types @@ -190,7 +182,9 @@ std::string Time64Type::ToString() const { std::string TimestampType::ToString() const { std::stringstream ss; ss << "timestamp[" << this->unit_; - if (this->timezone_.size() > 0) { ss << ", tz=" << this->timezone_; } + if (this->timezone_.size() > 0) { + ss << ", tz=" << this->timezone_; + } ss << "]"; return ss.str(); } @@ -199,7 +193,7 @@ std::string TimestampType::ToString() const { // Union type UnionType::UnionType(const std::vector>& fields, - const std::vector& type_codes, UnionMode mode) + const std::vector& type_codes, UnionMode mode) : NestedType(Type::UNION), mode_(mode), type_codes_(type_codes) { children_ = fields; } @@ -214,7 +208,9 @@ std::string UnionType::ToString() const { } for (size_t i = 0; i < children_.size(); ++i) { - if (i) { s << ", "; } + if (i) { + s << ", "; + } s << children_[i]->ToString() << "=" << static_cast(type_codes_[i]); } s << ">"; @@ -225,7 +221,7 @@ std::string UnionType::ToString() const { // DictionaryType DictionaryType::DictionaryType(const std::shared_ptr& index_type, - const std::shared_ptr& dictionary, bool ordered) + const std::shared_ptr& dictionary, bool ordered) : FixedWidthType(Type::DICTIONARY), index_type_(index_type), dictionary_(dictionary), @@ -235,9 +231,7 @@ int DictionaryType::bit_width() const { return static_cast(index_type_.get())->bit_width(); } -std::shared_ptr DictionaryType::dictionary() const { - return dictionary_; -} +std::shared_ptr DictionaryType::dictionary() const { return dictionary_; } std::string DictionaryType::ToString() const { std::stringstream ss; @@ -249,23 +243,27 @@ std::string DictionaryType::ToString() const { // ---------------------------------------------------------------------- // Null type -std::string NullType::ToString() const { - return name(); -} +std::string NullType::ToString() const { return name(); } // ---------------------------------------------------------------------- // Schema implementation Schema::Schema(const std::vector>& fields, - const std::shared_ptr& metadata) + const std::shared_ptr& metadata) : fields_(fields), metadata_(metadata) {} bool Schema::Equals(const Schema& other) const { - if (this == &other) { return true; } + if (this == &other) { + return true; + } - if (num_fields() != other.num_fields()) { return false; } + if (num_fields() != other.num_fields()) { + return false; + } for (int i = 0; i < num_fields(); ++i) { - if (!field(i)->Equals(*other.field(i).get())) { return false; } + if (!field(i)->Equals(*other.field(i).get())) { + return false; + } } return true; } @@ -290,8 +288,8 @@ int64_t Schema::GetFieldIndex(const std::string& name) const { } } -Status Schema::AddField( - int i, const std::shared_ptr& field, std::shared_ptr* out) const { +Status Schema::AddField(int i, const std::shared_ptr& field, + std::shared_ptr* out) const { DCHECK_GE(i, 0); DCHECK_LE(i, this->num_fields()); @@ -305,7 +303,7 @@ std::shared_ptr Schema::AddMetadata( } Status Schema::AddMetadata(const std::shared_ptr& metadata, - std::shared_ptr* out) const { + std::shared_ptr* out) const { *out = AddMetadata(metadata); return Status::OK(); } @@ -327,7 +325,9 @@ std::string Schema::ToString() const { int i = 0; for (auto field : fields_) { - if (i > 0) { buffer << std::endl; } + if (i > 0) { + buffer << std::endl; + } buffer << field->ToString(); ++i; } @@ -422,18 +422,18 @@ std::shared_ptr struct_(const std::vector>& fie } std::shared_ptr union_(const std::vector>& child_fields, - const std::vector& type_codes, UnionMode mode) { + const std::vector& type_codes, UnionMode mode) { return std::make_shared(child_fields, type_codes, mode); } std::shared_ptr dictionary(const std::shared_ptr& index_type, - const std::shared_ptr& dict_values) { + const std::shared_ptr& dict_values) { return std::make_shared(index_type, dict_values); } std::shared_ptr field(const std::string& name, - const std::shared_ptr& type, bool nullable, - const std::shared_ptr& metadata) { + const std::shared_ptr& type, bool nullable, + const std::shared_ptr& metadata) { return std::make_shared(name, type, nullable, metadata); } @@ -454,9 +454,7 @@ std::vector FixedWidthType::GetBufferLayout() const { return {kValidityBuffer, BufferDescr(BufferType::DATA, bit_width())}; } -std::vector NullType::GetBufferLayout() const { - return {}; -} +std::vector NullType::GetBufferLayout() const { return {}; } std::vector BinaryType::GetBufferLayout() const { return {kValidityBuffer, kOffsetBuffer, kValues8}; @@ -474,9 +472,7 @@ std::vector ListType::GetBufferLayout() const { return {kValidityBuffer, kOffsetBuffer}; } -std::vector StructType::GetBufferLayout() const { - return {kValidityBuffer}; -} +std::vector StructType::GetBufferLayout() const { return {kValidityBuffer}; } std::vector UnionType::GetBufferLayout() const { if (mode_ == UnionMode::SPARSE) { diff --git a/cpp/src/arrow/type.h b/cpp/src/arrow/type.h index fffb840e3cef7..e0df722e5668a 100644 --- a/cpp/src/arrow/type.h +++ b/cpp/src/arrow/type.h @@ -204,15 +204,15 @@ class NoExtraMeta {}; class ARROW_EXPORT Field { public: Field(const std::string& name, const std::shared_ptr& type, - bool nullable = true, - const std::shared_ptr& metadata = nullptr) + bool nullable = true, + const std::shared_ptr& metadata = nullptr) : name_(name), type_(type), nullable_(nullable), metadata_(metadata) {} std::shared_ptr metadata() const { return metadata_; } /// \deprecated Status AddMetadata(const std::shared_ptr& metadata, - std::shared_ptr* out) const; + std::shared_ptr* out) const; std::shared_ptr AddMetadata( const std::shared_ptr& metadata) const; @@ -489,7 +489,7 @@ class ARROW_EXPORT UnionType : public NestedType { static constexpr Type::type type_id = Type::UNION; UnionType(const std::vector>& fields, - const std::vector& type_codes, UnionMode mode = UnionMode::SPARSE); + const std::vector& type_codes, UnionMode mode = UnionMode::SPARSE); std::string ToString() const override; static std::string name() { return "union"; } @@ -669,7 +669,7 @@ class ARROW_EXPORT DictionaryType : public FixedWidthType { static constexpr Type::type type_id = Type::DICTIONARY; DictionaryType(const std::shared_ptr& index_type, - const std::shared_ptr& dictionary, bool ordered = false); + const std::shared_ptr& dictionary, bool ordered = false); int bit_width() const override; @@ -699,7 +699,7 @@ class ARROW_EXPORT DictionaryType : public FixedWidthType { class ARROW_EXPORT Schema { public: explicit Schema(const std::vector>& fields, - const std::shared_ptr& metadata = nullptr); + const std::shared_ptr& metadata = nullptr); virtual ~Schema() = default; /// Returns true if all of the schema fields are equal @@ -724,13 +724,13 @@ class ARROW_EXPORT Schema { /// \brief Render a string representation of the schema suitable for debugging std::string ToString() const; - Status AddField( - int i, const std::shared_ptr& field, std::shared_ptr* out) const; + Status AddField(int i, const std::shared_ptr& field, + std::shared_ptr* out) const; Status RemoveField(int i, std::shared_ptr* out) const; /// \deprecated Status AddMetadata(const std::shared_ptr& metadata, - std::shared_ptr* out) const; + std::shared_ptr* out) const; /// \brief Replace key-value metadata with new metadata /// @@ -761,8 +761,8 @@ std::shared_ptr ARROW_EXPORT list(const std::shared_ptr& value_ std::shared_ptr ARROW_EXPORT list(const std::shared_ptr& value_type); std::shared_ptr ARROW_EXPORT timestamp(TimeUnit::type unit); -std::shared_ptr ARROW_EXPORT timestamp( - TimeUnit::type unit, const std::string& timezone); +std::shared_ptr ARROW_EXPORT timestamp(TimeUnit::type unit, + const std::string& timezone); /// Unit can be either SECOND or MILLI std::shared_ptr ARROW_EXPORT time32(TimeUnit::type unit); @@ -770,18 +770,18 @@ std::shared_ptr ARROW_EXPORT time32(TimeUnit::type unit); /// Unit can be either MICRO or NANO std::shared_ptr ARROW_EXPORT time64(TimeUnit::type unit); -std::shared_ptr ARROW_EXPORT struct_( - const std::vector>& fields); +std::shared_ptr ARROW_EXPORT +struct_(const std::vector>& fields); -std::shared_ptr ARROW_EXPORT union_( - const std::vector>& child_fields, - const std::vector& type_codes, UnionMode mode = UnionMode::SPARSE); +std::shared_ptr ARROW_EXPORT +union_(const std::vector>& child_fields, + const std::vector& type_codes, UnionMode mode = UnionMode::SPARSE); std::shared_ptr ARROW_EXPORT dictionary( const std::shared_ptr& index_type, const std::shared_ptr& values); -std::shared_ptr ARROW_EXPORT field(const std::string& name, - const std::shared_ptr& type, bool nullable = true, +std::shared_ptr ARROW_EXPORT field( + const std::string& name, const std::shared_ptr& type, bool nullable = true, const std::shared_ptr& metadata = nullptr); // ---------------------------------------------------------------------- diff --git a/cpp/src/arrow/type_traits.h b/cpp/src/arrow/type_traits.h index 3e8ea23432b98..8be67b2a3829c 100644 --- a/cpp/src/arrow/type_traits.h +++ b/cpp/src/arrow/type_traits.h @@ -319,9 +319,10 @@ GET_ATTR(TypeClass, void); #undef GET_ATTR -#define PRIMITIVE_TRAITS(T) \ - using TypeClass = typename std::conditional::value, T, \ - typename GetAttr_TypeClass::type>::type; \ +#define PRIMITIVE_TRAITS(T) \ + using TypeClass = \ + typename std::conditional::value, T, \ + typename GetAttr_TypeClass::type>::type; \ using c_type = typename GetAttr_c_type::type; template diff --git a/cpp/src/arrow/util/bit-stream-utils.h b/cpp/src/arrow/util/bit-stream-utils.h index 537fdc3045ca5..318f5ba8b0e17 100644 --- a/cpp/src/arrow/util/bit-stream-utils.h +++ b/cpp/src/arrow/util/bit-stream-utils.h @@ -20,9 +20,9 @@ #ifndef ARROW_UTIL_BIT_STREAM_UTILS_H #define ARROW_UTIL_BIT_STREAM_UTILS_H +#include #include #include -#include #include "arrow/util/bit-util.h" #include "arrow/util/bpacking.h" @@ -229,13 +229,13 @@ inline bool BitWriter::PutVlqInt(uint32_t v) { template inline void GetValue_(int num_bits, T* v, int max_bytes, const uint8_t* buffer, - int* bit_offset, int* byte_offset, uint64_t* buffered_values) { + int* bit_offset, int* byte_offset, uint64_t* buffered_values) { #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4800) #endif - *v = static_cast( - BitUtil::TrailingBits(*buffered_values, *bit_offset + num_bits) >> *bit_offset); + *v = static_cast(BitUtil::TrailingBits(*buffered_values, *bit_offset + num_bits) >> + *bit_offset); #ifdef _MSC_VER #pragma warning(pop) #endif @@ -292,13 +292,14 @@ inline int BitReader::GetBatch(int num_bits, T* v, int batch_size) { if (UNLIKELY(bit_offset != 0)) { for (; i < batch_size && bit_offset != 0; ++i) { GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset, - &buffered_values); + &buffered_values); } } if (sizeof(T) == 4) { - int num_unpacked = unpack32(reinterpret_cast(buffer + byte_offset), - reinterpret_cast(v + i), batch_size - i, num_bits); + int num_unpacked = + unpack32(reinterpret_cast(buffer + byte_offset), + reinterpret_cast(v + i), batch_size - i, num_bits); i += num_unpacked; byte_offset += num_unpacked * num_bits / 8; } else { @@ -307,8 +308,10 @@ inline int BitReader::GetBatch(int num_bits, T* v, int batch_size) { while (i < batch_size) { int unpack_size = std::min(buffer_size, batch_size - i); int num_unpacked = unpack32(reinterpret_cast(buffer + byte_offset), - unpack_buffer, unpack_size, num_bits); - if (num_unpacked == 0) { break; } + unpack_buffer, unpack_size, num_bits); + if (num_unpacked == 0) { + break; + } for (int k = 0; k < num_unpacked; ++k) { #ifdef _MSC_VER #pragma warning(push) @@ -332,8 +335,8 @@ inline int BitReader::GetBatch(int num_bits, T* v, int batch_size) { } for (; i < batch_size; ++i) { - GetValue_( - num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset, &buffered_values); + GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset, + &buffered_values); } bit_offset_ = bit_offset; diff --git a/cpp/src/arrow/util/bit-util-test.cc b/cpp/src/arrow/util/bit-util-test.cc index cd945585ba210..231bf54a2a3b6 100644 --- a/cpp/src/arrow/util/bit-util-test.cc +++ b/cpp/src/arrow/util/bit-util-test.cc @@ -35,7 +35,9 @@ namespace arrow { static void EnsureCpuInfoInitialized() { - if (!CpuInfo::initialized()) { CpuInfo::Init(); } + if (!CpuInfo::initialized()) { + CpuInfo::Init(); + } } TEST(BitUtilTests, TestIsMultipleOf64) { @@ -68,11 +70,13 @@ TEST(BitUtilTests, TestNextPower2) { ASSERT_EQ(1LL << 62, NextPower2((1LL << 62) - 1)); } -static inline int64_t SlowCountBits( - const uint8_t* data, int64_t bit_offset, int64_t length) { +static inline int64_t SlowCountBits(const uint8_t* data, int64_t bit_offset, + int64_t length) { int64_t count = 0; for (int64_t i = bit_offset; i < bit_offset + length; ++i) { - if (BitUtil::GetBit(data, i)) { ++count; } + if (BitUtil::GetBit(data, i)) { + ++count; + } } return count; } @@ -175,9 +179,9 @@ TEST(BitUtil, TrailingBits) { EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 0), 0); EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 1), 1); EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 64), - BOOST_BINARY(1 1 1 1 1 1 1 1)); + BOOST_BINARY(1 1 1 1 1 1 1 1)); EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 100), - BOOST_BINARY(1 1 1 1 1 1 1 1)); + BOOST_BINARY(1 1 1 1 1 1 1 1)); EXPECT_EQ(BitUtil::TrailingBits(0, 1), 0); EXPECT_EQ(BitUtil::TrailingBits(0, 64), 0); EXPECT_EQ(BitUtil::TrailingBits(1LL << 63, 0), 0); @@ -193,12 +197,12 @@ TEST(BitUtil, ByteSwap) { EXPECT_EQ(BitUtil::ByteSwap(static_cast(0x11223344)), 0x44332211); EXPECT_EQ(BitUtil::ByteSwap(static_cast(0)), 0); - EXPECT_EQ( - BitUtil::ByteSwap(static_cast(0x1122334455667788)), 0x8877665544332211); + EXPECT_EQ(BitUtil::ByteSwap(static_cast(0x1122334455667788)), + 0x8877665544332211); EXPECT_EQ(BitUtil::ByteSwap(static_cast(0)), 0); - EXPECT_EQ( - BitUtil::ByteSwap(static_cast(0x1122334455667788)), 0x8877665544332211); + EXPECT_EQ(BitUtil::ByteSwap(static_cast(0x1122334455667788)), + 0x8877665544332211); EXPECT_EQ(BitUtil::ByteSwap(static_cast(0)), 0); EXPECT_EQ(BitUtil::ByteSwap(static_cast(0x1122)), 0x2211); diff --git a/cpp/src/arrow/util/bit-util.cc b/cpp/src/arrow/util/bit-util.cc index 5bbec6f23111e..f255f95f30a76 100644 --- a/cpp/src/arrow/util/bit-util.cc +++ b/cpp/src/arrow/util/bit-util.cc @@ -36,12 +36,14 @@ namespace arrow { void BitUtil::FillBitsFromBytes(const std::vector& bytes, uint8_t* bits) { for (size_t i = 0; i < bytes.size(); ++i) { - if (bytes[i] > 0) { SetBit(bits, i); } + if (bytes[i] > 0) { + SetBit(bits, i); + } } } -Status BitUtil::BytesToBits( - const std::vector& bytes, std::shared_ptr* out) { +Status BitUtil::BytesToBits(const std::vector& bytes, + std::shared_ptr* out) { int64_t bit_length = BitUtil::BytesForBits(bytes.size()); std::shared_ptr buffer; @@ -65,7 +67,9 @@ int64_t CountSetBits(const uint8_t* data, int64_t bit_offset, int64_t length) { // The number of bits until fast_count_start const int64_t initial_bits = std::min(length, fast_count_start - bit_offset); for (int64_t i = bit_offset; i < bit_offset + initial_bits; ++i) { - if (BitUtil::GetBit(data, i)) { ++count; } + if (BitUtil::GetBit(data, i)) { + ++count; + } } const int64_t fast_counts = (length - initial_bits) / pop_len; @@ -85,21 +89,23 @@ int64_t CountSetBits(const uint8_t* data, int64_t bit_offset, int64_t length) { // versions of popcount but the code complexity is likely not worth it) const int64_t tail_index = bit_offset + initial_bits + fast_counts * pop_len; for (int64_t i = tail_index; i < bit_offset + length; ++i) { - if (BitUtil::GetBit(data, i)) { ++count; } + if (BitUtil::GetBit(data, i)) { + ++count; + } } return count; } -Status GetEmptyBitmap( - MemoryPool* pool, int64_t length, std::shared_ptr* result) { +Status GetEmptyBitmap(MemoryPool* pool, int64_t length, + std::shared_ptr* result) { RETURN_NOT_OK(AllocateBuffer(pool, BitUtil::BytesForBits(length), result)); memset((*result)->mutable_data(), 0, static_cast((*result)->size())); return Status::OK(); } Status CopyBitmap(MemoryPool* pool, const uint8_t* data, int64_t offset, int64_t length, - std::shared_ptr* out) { + std::shared_ptr* out) { std::shared_ptr buffer; RETURN_NOT_OK(GetEmptyBitmap(pool, length, &buffer)); uint8_t* dest = buffer->mutable_data(); @@ -111,12 +117,14 @@ Status CopyBitmap(MemoryPool* pool, const uint8_t* data, int64_t offset, int64_t } bool BitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right, - int64_t right_offset, int64_t bit_length) { + int64_t right_offset, int64_t bit_length) { if (left_offset % 8 == 0 && right_offset % 8 == 0) { // byte aligned, can use memcmp bool bytes_equal = std::memcmp(left + left_offset / 8, right + right_offset / 8, - bit_length / 8) == 0; - if (!bytes_equal) { return false; } + bit_length / 8) == 0; + if (!bytes_equal) { + return false; + } for (int64_t i = (bit_length / 8) * 8; i < bit_length; ++i) { if (BitUtil::GetBit(left, left_offset + i) != BitUtil::GetBit(right, right_offset + i)) { diff --git a/cpp/src/arrow/util/bit-util.h b/cpp/src/arrow/util/bit-util.h index d055c751d16fa..f036763b8106e 100644 --- a/cpp/src/arrow/util/bit-util.h +++ b/cpp/src/arrow/util/bit-util.h @@ -101,17 +101,11 @@ static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128}; // the ~i byte version of kBitmaks static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127}; -static inline int64_t CeilByte(int64_t size) { - return (size + 7) & ~7; -} +static inline int64_t CeilByte(int64_t size) { return (size + 7) & ~7; } -static inline int64_t BytesForBits(int64_t size) { - return CeilByte(size) / 8; -} +static inline int64_t BytesForBits(int64_t size) { return CeilByte(size) / 8; } -static inline int64_t Ceil2Bytes(int64_t size) { - return (size + 15) & ~15; -} +static inline int64_t Ceil2Bytes(int64_t size) { return (size + 15) & ~15; } static inline bool GetBit(const uint8_t* bits, int64_t i) { return (bits[i / 8] & kBitmask[i % 8]) != 0; @@ -125,13 +119,13 @@ static inline void ClearBit(uint8_t* bits, int64_t i) { bits[i / 8] &= kFlippedBitmask[i % 8]; } -static inline void SetBit(uint8_t* bits, int64_t i) { - bits[i / 8] |= kBitmask[i % 8]; -} +static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; } /// Set bit if is_set is true, but cannot clear bit static inline void SetArrayBit(uint8_t* bits, int i, bool is_set) { - if (is_set) { SetBit(bits, i); } + if (is_set) { + SetBit(bits, i); + } } static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) { @@ -168,13 +162,9 @@ static inline int64_t NextPower2(int64_t n) { return n; } -static inline bool IsMultipleOf64(int64_t n) { - return (n & 63) == 0; -} +static inline bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; } -static inline bool IsMultipleOf8(int64_t n) { - return (n & 7) == 0; -} +static inline bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; } /// Returns the ceil of value/divisor static inline int64_t Ceil(int64_t value, int64_t divisor) { @@ -206,34 +196,22 @@ static inline int RoundDownToPowerOf2(int value, int factor) { /// Specialized round up and down functions for frequently used factors, /// like 8 (bits->bytes), 32 (bits->i32), and 64 (bits->i64). /// Returns the rounded up number of bytes that fit the number of bits. -static inline uint32_t RoundUpNumBytes(uint32_t bits) { - return (bits + 7) >> 3; -} +static inline uint32_t RoundUpNumBytes(uint32_t bits) { return (bits + 7) >> 3; } /// Returns the rounded down number of bytes that fit the number of bits. -static inline uint32_t RoundDownNumBytes(uint32_t bits) { - return bits >> 3; -} +static inline uint32_t RoundDownNumBytes(uint32_t bits) { return bits >> 3; } /// Returns the rounded up to 32 multiple. Used for conversions of bits to i32. -static inline uint32_t RoundUpNumi32(uint32_t bits) { - return (bits + 31) >> 5; -} +static inline uint32_t RoundUpNumi32(uint32_t bits) { return (bits + 31) >> 5; } /// Returns the rounded up 32 multiple. -static inline uint32_t RoundDownNumi32(uint32_t bits) { - return bits >> 5; -} +static inline uint32_t RoundDownNumi32(uint32_t bits) { return bits >> 5; } /// Returns the rounded up to 64 multiple. Used for conversions of bits to i64. -static inline uint32_t RoundUpNumi64(uint32_t bits) { - return (bits + 63) >> 6; -} +static inline uint32_t RoundUpNumi64(uint32_t bits) { return (bits + 63) >> 6; } /// Returns the rounded down to 64 multiple. -static inline uint32_t RoundDownNumi64(uint32_t bits) { - return bits >> 6; -} +static inline uint32_t RoundDownNumi64(uint32_t bits) { return bits >> 6; } static inline int64_t RoundUpToMultipleOf64(int64_t num) { // TODO(wesm): is this definitely needed? @@ -242,7 +220,9 @@ static inline int64_t RoundUpToMultipleOf64(int64_t num) { constexpr int64_t force_carry_addend = round_to - 1; constexpr int64_t truncate_bitmask = ~(round_to - 1); constexpr int64_t max_roundable_num = std::numeric_limits::max() - round_to; - if (num <= max_roundable_num) { return (num + force_carry_addend) & truncate_bitmask; } + if (num <= max_roundable_num) { + return (num + force_carry_addend) & truncate_bitmask; + } // handle overflow case. This should result in a malloc error upstream return num; } @@ -252,8 +232,7 @@ static inline int64_t RoundUpToMultipleOf64(int64_t num) { /// might be a much faster way to implement this. static inline int PopcountNoHw(uint64_t x) { int count = 0; - for (; x != 0; ++count) - x &= x - 1; + for (; x != 0; ++count) x &= x - 1; return count; } @@ -297,21 +276,16 @@ static inline int Log2(uint64_t x) { // (floor(log2(n)) = MSB(n) (0-indexed)) --x; int result = 1; - while (x >>= 1) - ++result; + while (x >>= 1) ++result; return result; } /// Swaps the byte order (i.e. endianess) -static inline int64_t ByteSwap(int64_t value) { - return ARROW_BYTE_SWAP64(value); -} +static inline int64_t ByteSwap(int64_t value) { return ARROW_BYTE_SWAP64(value); } static inline uint64_t ByteSwap(uint64_t value) { return static_cast(ARROW_BYTE_SWAP64(value)); } -static inline int32_t ByteSwap(int32_t value) { - return ARROW_BYTE_SWAP32(value); -} +static inline int32_t ByteSwap(int32_t value) { return ARROW_BYTE_SWAP32(value); } static inline uint32_t ByteSwap(uint32_t value) { return static_cast(ARROW_BYTE_SWAP32(value)); } @@ -352,84 +326,36 @@ static inline void ByteSwap(void* dst, const void* src, int len) { /// Converts to big endian format (if not already in big endian) from the /// machine's native endian format. #if __BYTE_ORDER == __LITTLE_ENDIAN -static inline int64_t ToBigEndian(int64_t value) { - return ByteSwap(value); -} -static inline uint64_t ToBigEndian(uint64_t value) { - return ByteSwap(value); -} -static inline int32_t ToBigEndian(int32_t value) { - return ByteSwap(value); -} -static inline uint32_t ToBigEndian(uint32_t value) { - return ByteSwap(value); -} -static inline int16_t ToBigEndian(int16_t value) { - return ByteSwap(value); -} -static inline uint16_t ToBigEndian(uint16_t value) { - return ByteSwap(value); -} +static inline int64_t ToBigEndian(int64_t value) { return ByteSwap(value); } +static inline uint64_t ToBigEndian(uint64_t value) { return ByteSwap(value); } +static inline int32_t ToBigEndian(int32_t value) { return ByteSwap(value); } +static inline uint32_t ToBigEndian(uint32_t value) { return ByteSwap(value); } +static inline int16_t ToBigEndian(int16_t value) { return ByteSwap(value); } +static inline uint16_t ToBigEndian(uint16_t value) { return ByteSwap(value); } #else -static inline int64_t ToBigEndian(int64_t val) { - return val; -} -static inline uint64_t ToBigEndian(uint64_t val) { - return val; -} -static inline int32_t ToBigEndian(int32_t val) { - return val; -} -static inline uint32_t ToBigEndian(uint32_t val) { - return val; -} -static inline int16_t ToBigEndian(int16_t val) { - return val; -} -static inline uint16_t ToBigEndian(uint16_t val) { - return val; -} +static inline int64_t ToBigEndian(int64_t val) { return val; } +static inline uint64_t ToBigEndian(uint64_t val) { return val; } +static inline int32_t ToBigEndian(int32_t val) { return val; } +static inline uint32_t ToBigEndian(uint32_t val) { return val; } +static inline int16_t ToBigEndian(int16_t val) { return val; } +static inline uint16_t ToBigEndian(uint16_t val) { return val; } #endif /// Converts from big endian format to the machine's native endian format. #if __BYTE_ORDER == __LITTLE_ENDIAN -static inline int64_t FromBigEndian(int64_t value) { - return ByteSwap(value); -} -static inline uint64_t FromBigEndian(uint64_t value) { - return ByteSwap(value); -} -static inline int32_t FromBigEndian(int32_t value) { - return ByteSwap(value); -} -static inline uint32_t FromBigEndian(uint32_t value) { - return ByteSwap(value); -} -static inline int16_t FromBigEndian(int16_t value) { - return ByteSwap(value); -} -static inline uint16_t FromBigEndian(uint16_t value) { - return ByteSwap(value); -} +static inline int64_t FromBigEndian(int64_t value) { return ByteSwap(value); } +static inline uint64_t FromBigEndian(uint64_t value) { return ByteSwap(value); } +static inline int32_t FromBigEndian(int32_t value) { return ByteSwap(value); } +static inline uint32_t FromBigEndian(uint32_t value) { return ByteSwap(value); } +static inline int16_t FromBigEndian(int16_t value) { return ByteSwap(value); } +static inline uint16_t FromBigEndian(uint16_t value) { return ByteSwap(value); } #else -static inline int64_t FromBigEndian(int64_t val) { - return val; -} -static inline uint64_t FromBigEndian(uint64_t val) { - return val; -} -static inline int32_t FromBigEndian(int32_t val) { - return val; -} -static inline uint32_t FromBigEndian(uint32_t val) { - return val; -} -static inline int16_t FromBigEndian(int16_t val) { - return val; -} -static inline uint16_t FromBigEndian(uint16_t val) { - return val; -} +static inline int64_t FromBigEndian(int64_t val) { return val; } +static inline uint64_t FromBigEndian(uint64_t val) { return val; } +static inline int32_t FromBigEndian(int32_t val) { return val; } +static inline uint32_t FromBigEndian(uint32_t val) { return val; } +static inline int16_t FromBigEndian(int16_t val) { return val; } +static inline uint16_t FromBigEndian(uint16_t val) { return val; } #endif // Logical right shift for signed integer types @@ -449,8 +375,8 @@ ARROW_EXPORT Status BytesToBits(const std::vector&, std::shared_ptr* result); +Status ARROW_EXPORT GetEmptyBitmap(MemoryPool* pool, int64_t length, + std::shared_ptr* result); /// Copy a bit range of an existing bitmap /// @@ -462,7 +388,7 @@ Status ARROW_EXPORT GetEmptyBitmap( /// /// \return Status message Status ARROW_EXPORT CopyBitmap(MemoryPool* pool, const uint8_t* bitmap, int64_t offset, - int64_t length, std::shared_ptr* out); + int64_t length, std::shared_ptr* out); /// Compute the number of 1's in the given data array /// @@ -471,11 +397,12 @@ Status ARROW_EXPORT CopyBitmap(MemoryPool* pool, const uint8_t* bitmap, int64_t /// \param[in] length the number of bits to inspect in the bitmap relative to the offset /// /// \return The number of set (1) bits in the range -int64_t ARROW_EXPORT CountSetBits( - const uint8_t* data, int64_t bit_offset, int64_t length); +int64_t ARROW_EXPORT CountSetBits(const uint8_t* data, int64_t bit_offset, + int64_t length); bool ARROW_EXPORT BitmapEquals(const uint8_t* left, int64_t left_offset, - const uint8_t* right, int64_t right_offset, int64_t bit_length); + const uint8_t* right, int64_t right_offset, + int64_t bit_length); } // namespace arrow #endif // ARROW_UTIL_BIT_UTIL_H diff --git a/cpp/src/arrow/util/bpacking.h b/cpp/src/arrow/util/bpacking.h index fce5f55224cd4..4d25de0ab060c 100644 --- a/cpp/src/arrow/util/bpacking.h +++ b/cpp/src/arrow/util/bpacking.h @@ -3199,136 +3199,103 @@ inline int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_b switch (num_bits) { case 0: - for (int i = 0; i < num_loops; ++i) - in = nullunpacker32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = nullunpacker32(in, out + i * 32); break; case 1: - for (int i = 0; i < num_loops; ++i) - in = unpack1_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack1_32(in, out + i * 32); break; case 2: - for (int i = 0; i < num_loops; ++i) - in = unpack2_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack2_32(in, out + i * 32); break; case 3: - for (int i = 0; i < num_loops; ++i) - in = unpack3_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack3_32(in, out + i * 32); break; case 4: - for (int i = 0; i < num_loops; ++i) - in = unpack4_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack4_32(in, out + i * 32); break; case 5: - for (int i = 0; i < num_loops; ++i) - in = unpack5_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack5_32(in, out + i * 32); break; case 6: - for (int i = 0; i < num_loops; ++i) - in = unpack6_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack6_32(in, out + i * 32); break; case 7: - for (int i = 0; i < num_loops; ++i) - in = unpack7_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack7_32(in, out + i * 32); break; case 8: - for (int i = 0; i < num_loops; ++i) - in = unpack8_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack8_32(in, out + i * 32); break; case 9: - for (int i = 0; i < num_loops; ++i) - in = unpack9_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack9_32(in, out + i * 32); break; case 10: - for (int i = 0; i < num_loops; ++i) - in = unpack10_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack10_32(in, out + i * 32); break; case 11: - for (int i = 0; i < num_loops; ++i) - in = unpack11_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack11_32(in, out + i * 32); break; case 12: - for (int i = 0; i < num_loops; ++i) - in = unpack12_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack12_32(in, out + i * 32); break; case 13: - for (int i = 0; i < num_loops; ++i) - in = unpack13_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack13_32(in, out + i * 32); break; case 14: - for (int i = 0; i < num_loops; ++i) - in = unpack14_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack14_32(in, out + i * 32); break; case 15: - for (int i = 0; i < num_loops; ++i) - in = unpack15_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack15_32(in, out + i * 32); break; case 16: - for (int i = 0; i < num_loops; ++i) - in = unpack16_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack16_32(in, out + i * 32); break; case 17: - for (int i = 0; i < num_loops; ++i) - in = unpack17_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack17_32(in, out + i * 32); break; case 18: - for (int i = 0; i < num_loops; ++i) - in = unpack18_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack18_32(in, out + i * 32); break; case 19: - for (int i = 0; i < num_loops; ++i) - in = unpack19_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack19_32(in, out + i * 32); break; case 20: - for (int i = 0; i < num_loops; ++i) - in = unpack20_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack20_32(in, out + i * 32); break; case 21: - for (int i = 0; i < num_loops; ++i) - in = unpack21_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack21_32(in, out + i * 32); break; case 22: - for (int i = 0; i < num_loops; ++i) - in = unpack22_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack22_32(in, out + i * 32); break; case 23: - for (int i = 0; i < num_loops; ++i) - in = unpack23_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack23_32(in, out + i * 32); break; case 24: - for (int i = 0; i < num_loops; ++i) - in = unpack24_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack24_32(in, out + i * 32); break; case 25: - for (int i = 0; i < num_loops; ++i) - in = unpack25_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack25_32(in, out + i * 32); break; case 26: - for (int i = 0; i < num_loops; ++i) - in = unpack26_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack26_32(in, out + i * 32); break; case 27: - for (int i = 0; i < num_loops; ++i) - in = unpack27_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack27_32(in, out + i * 32); break; case 28: - for (int i = 0; i < num_loops; ++i) - in = unpack28_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack28_32(in, out + i * 32); break; case 29: - for (int i = 0; i < num_loops; ++i) - in = unpack29_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack29_32(in, out + i * 32); break; case 30: - for (int i = 0; i < num_loops; ++i) - in = unpack30_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack30_32(in, out + i * 32); break; case 31: - for (int i = 0; i < num_loops; ++i) - in = unpack31_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack31_32(in, out + i * 32); break; case 32: - for (int i = 0; i < num_loops; ++i) - in = unpack32_32(in, out + i * 32); + for (int i = 0; i < num_loops; ++i) in = unpack32_32(in, out + i * 32); break; default: DCHECK(false) << "Unsupported num_bits"; diff --git a/cpp/src/arrow/util/compression-test.cc b/cpp/src/arrow/util/compression-test.cc index f7739fc6dd7c3..64896dd6a4a15 100644 --- a/cpp/src/arrow/util/compression-test.cc +++ b/cpp/src/arrow/util/compression-test.cc @@ -15,8 +15,8 @@ // specific language governing permissions and limitations // under the License. -#include #include +#include #include #include @@ -43,25 +43,25 @@ void CheckCodecRoundtrip(const vector& data) { // compress with c1 int64_t actual_size; - ASSERT_OK(c1->Compress( - data.size(), &data[0], max_compressed_len, &compressed[0], &actual_size)); + ASSERT_OK(c1->Compress(data.size(), &data[0], max_compressed_len, &compressed[0], + &actual_size)); compressed.resize(actual_size); // decompress with c2 - ASSERT_OK(c2->Decompress( - compressed.size(), &compressed[0], decompressed.size(), &decompressed[0])); + ASSERT_OK(c2->Decompress(compressed.size(), &compressed[0], decompressed.size(), + &decompressed[0])); ASSERT_EQ(data, decompressed); // compress with c2 int64_t actual_size2; - ASSERT_OK(c2->Compress( - data.size(), &data[0], max_compressed_len, &compressed[0], &actual_size2)); + ASSERT_OK(c2->Compress(data.size(), &data[0], max_compressed_len, &compressed[0], + &actual_size2)); ASSERT_EQ(actual_size2, actual_size); // decompress with c1 - ASSERT_OK(c1->Decompress( - compressed.size(), &compressed[0], decompressed.size(), &decompressed[0])); + ASSERT_OK(c1->Decompress(compressed.size(), &compressed[0], decompressed.size(), + &decompressed[0])); ASSERT_EQ(data, decompressed); } @@ -76,24 +76,14 @@ void CheckCodec() { } } -TEST(TestCompressors, Snappy) { - CheckCodec(); -} +TEST(TestCompressors, Snappy) { CheckCodec(); } -TEST(TestCompressors, Brotli) { - CheckCodec(); -} +TEST(TestCompressors, Brotli) { CheckCodec(); } -TEST(TestCompressors, GZip) { - CheckCodec(); -} +TEST(TestCompressors, GZip) { CheckCodec(); } -TEST(TestCompressors, ZSTD) { - CheckCodec(); -} +TEST(TestCompressors, ZSTD) { CheckCodec(); } -TEST(TestCompressors, Lz4) { - CheckCodec(); -} +TEST(TestCompressors, Lz4) { CheckCodec(); } } // namespace arrow diff --git a/cpp/src/arrow/util/compression.h b/cpp/src/arrow/util/compression.h index 19c61179a502a..ae187a7fcdf1c 100644 --- a/cpp/src/arrow/util/compression.h +++ b/cpp/src/arrow/util/compression.h @@ -37,10 +37,11 @@ class ARROW_EXPORT Codec { static Status Create(Compression::type codec, std::unique_ptr* out); virtual Status Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, - uint8_t* output_buffer) = 0; + uint8_t* output_buffer) = 0; virtual Status Compress(int64_t input_len, const uint8_t* input, - int64_t output_buffer_len, uint8_t* output_buffer, int64_t* output_length) = 0; + int64_t output_buffer_len, uint8_t* output_buffer, + int64_t* output_length) = 0; virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0; diff --git a/cpp/src/arrow/util/compression_brotli.cc b/cpp/src/arrow/util/compression_brotli.cc index c03573bc46c1c..e4639083dfadb 100644 --- a/cpp/src/arrow/util/compression_brotli.cc +++ b/cpp/src/arrow/util/compression_brotli.cc @@ -33,8 +33,8 @@ namespace arrow { // ---------------------------------------------------------------------- // Brotli implementation -Status BrotliCodec::Decompress( - int64_t input_len, const uint8_t* input, int64_t output_len, uint8_t* output_buffer) { +Status BrotliCodec::Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output_buffer) { size_t output_size = output_len; if (BrotliDecoderDecompress(input_len, input, &output_size, output_buffer) != BROTLI_DECODER_RESULT_SUCCESS) { @@ -48,12 +48,13 @@ int64_t BrotliCodec::MaxCompressedLen(int64_t input_len, const uint8_t* input) { } Status BrotliCodec::Compress(int64_t input_len, const uint8_t* input, - int64_t output_buffer_len, uint8_t* output_buffer, int64_t* output_length) { + int64_t output_buffer_len, uint8_t* output_buffer, + int64_t* output_length) { size_t output_len = output_buffer_len; // TODO: Make quality configurable. We use 8 as a default as it is the best // trade-off for Parquet workload if (BrotliEncoderCompress(8, BROTLI_DEFAULT_WINDOW, BROTLI_DEFAULT_MODE, input_len, - input, &output_len, output_buffer) == BROTLI_FALSE) { + input, &output_len, output_buffer) == BROTLI_FALSE) { return Status::IOError("Brotli compression failure."); } *output_length = output_len; diff --git a/cpp/src/arrow/util/compression_brotli.h b/cpp/src/arrow/util/compression_brotli.h index 08bd3379e3489..9e92cb106d422 100644 --- a/cpp/src/arrow/util/compression_brotli.h +++ b/cpp/src/arrow/util/compression_brotli.h @@ -30,10 +30,10 @@ namespace arrow { class ARROW_EXPORT BrotliCodec : public Codec { public: Status Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, - uint8_t* output_buffer) override; + uint8_t* output_buffer) override; Status Compress(int64_t input_len, const uint8_t* input, int64_t output_buffer_len, - uint8_t* output_buffer, int64_t* output_length) override; + uint8_t* output_buffer, int64_t* output_length) override; int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) override; diff --git a/cpp/src/arrow/util/compression_lz4.cc b/cpp/src/arrow/util/compression_lz4.cc index 65eaa08946e79..295e9a438f799 100644 --- a/cpp/src/arrow/util/compression_lz4.cc +++ b/cpp/src/arrow/util/compression_lz4.cc @@ -32,12 +32,14 @@ namespace arrow { // ---------------------------------------------------------------------- // Lz4 implementation -Status Lz4Codec::Decompress( - int64_t input_len, const uint8_t* input, int64_t output_len, uint8_t* output_buffer) { - int64_t decompressed_size = LZ4_decompress_safe(reinterpret_cast(input), - reinterpret_cast(output_buffer), static_cast(input_len), - static_cast(output_len)); - if (decompressed_size < 1) { return Status::IOError("Corrupt Lz4 compressed data."); } +Status Lz4Codec::Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, + uint8_t* output_buffer) { + int64_t decompressed_size = LZ4_decompress_safe( + reinterpret_cast(input), reinterpret_cast(output_buffer), + static_cast(input_len), static_cast(output_len)); + if (decompressed_size < 1) { + return Status::IOError("Corrupt Lz4 compressed data."); + } return Status::OK(); } @@ -46,11 +48,14 @@ int64_t Lz4Codec::MaxCompressedLen(int64_t input_len, const uint8_t* input) { } Status Lz4Codec::Compress(int64_t input_len, const uint8_t* input, - int64_t output_buffer_len, uint8_t* output_buffer, int64_t* output_length) { - *output_length = LZ4_compress_default(reinterpret_cast(input), - reinterpret_cast(output_buffer), static_cast(input_len), - static_cast(output_buffer_len)); - if (*output_length < 1) { return Status::IOError("Lz4 compression failure."); } + int64_t output_buffer_len, uint8_t* output_buffer, + int64_t* output_length) { + *output_length = LZ4_compress_default( + reinterpret_cast(input), reinterpret_cast(output_buffer), + static_cast(input_len), static_cast(output_buffer_len)); + if (*output_length < 1) { + return Status::IOError("Lz4 compression failure."); + } return Status::OK(); } diff --git a/cpp/src/arrow/util/compression_lz4.h b/cpp/src/arrow/util/compression_lz4.h index 9668fec126b12..0af228963f320 100644 --- a/cpp/src/arrow/util/compression_lz4.h +++ b/cpp/src/arrow/util/compression_lz4.h @@ -30,10 +30,10 @@ namespace arrow { class ARROW_EXPORT Lz4Codec : public Codec { public: Status Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, - uint8_t* output_buffer) override; + uint8_t* output_buffer) override; Status Compress(int64_t input_len, const uint8_t* input, int64_t output_buffer_len, - uint8_t* output_buffer, int64_t* output_length) override; + uint8_t* output_buffer, int64_t* output_length) override; int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) override; diff --git a/cpp/src/arrow/util/compression_snappy.cc b/cpp/src/arrow/util/compression_snappy.cc index db2b67355109a..947ffe559bda6 100644 --- a/cpp/src/arrow/util/compression_snappy.cc +++ b/cpp/src/arrow/util/compression_snappy.cc @@ -37,10 +37,11 @@ namespace arrow { // ---------------------------------------------------------------------- // Snappy implementation -Status SnappyCodec::Decompress( - int64_t input_len, const uint8_t* input, int64_t output_len, uint8_t* output_buffer) { +Status SnappyCodec::Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output_buffer) { if (!snappy::RawUncompress(reinterpret_cast(input), - static_cast(input_len), reinterpret_cast(output_buffer))) { + static_cast(input_len), + reinterpret_cast(output_buffer))) { return Status::IOError("Corrupt snappy compressed data."); } return Status::OK(); @@ -51,11 +52,12 @@ int64_t SnappyCodec::MaxCompressedLen(int64_t input_len, const uint8_t* input) { } Status SnappyCodec::Compress(int64_t input_len, const uint8_t* input, - int64_t output_buffer_len, uint8_t* output_buffer, int64_t* output_length) { + int64_t output_buffer_len, uint8_t* output_buffer, + int64_t* output_length) { size_t output_len; snappy::RawCompress(reinterpret_cast(input), - static_cast(input_len), reinterpret_cast(output_buffer), - &output_len); + static_cast(input_len), + reinterpret_cast(output_buffer), &output_len); *output_length = static_cast(output_len); return Status::OK(); } diff --git a/cpp/src/arrow/util/compression_snappy.h b/cpp/src/arrow/util/compression_snappy.h index 25281e1a97a16..5cc10c470af45 100644 --- a/cpp/src/arrow/util/compression_snappy.h +++ b/cpp/src/arrow/util/compression_snappy.h @@ -29,10 +29,10 @@ namespace arrow { class ARROW_EXPORT SnappyCodec : public Codec { public: Status Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, - uint8_t* output_buffer) override; + uint8_t* output_buffer) override; Status Compress(int64_t input_len, const uint8_t* input, int64_t output_buffer_len, - uint8_t* output_buffer, int64_t* output_length) override; + uint8_t* output_buffer, int64_t* output_length) override; int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) override; diff --git a/cpp/src/arrow/util/compression_zlib.cc b/cpp/src/arrow/util/compression_zlib.cc index 3ff33b82028e8..ae6627ea6442f 100644 --- a/cpp/src/arrow/util/compression_zlib.cc +++ b/cpp/src/arrow/util/compression_zlib.cc @@ -69,7 +69,7 @@ class GZipCodec::GZipCodecImpl { window_bits += GZIP_CODEC; } if ((ret = deflateInit2(&stream_, Z_DEFAULT_COMPRESSION, Z_DEFLATED, window_bits, 9, - Z_DEFAULT_STRATEGY)) != Z_OK) { + Z_DEFAULT_STRATEGY)) != Z_OK) { std::stringstream ss; ss << "zlib deflateInit failed: " << std::string(stream_.msg); return Status::IOError(ss.str()); @@ -79,7 +79,9 @@ class GZipCodec::GZipCodecImpl { } void EndCompressor() { - if (compressor_initialized_) { (void)deflateEnd(&stream_); } + if (compressor_initialized_) { + (void)deflateEnd(&stream_); + } compressor_initialized_ = false; } @@ -100,13 +102,17 @@ class GZipCodec::GZipCodecImpl { } void EndDecompressor() { - if (decompressor_initialized_) { (void)inflateEnd(&stream_); } + if (decompressor_initialized_) { + (void)inflateEnd(&stream_); + } decompressor_initialized_ = false; } Status Decompress(int64_t input_length, const uint8_t* input, int64_t output_length, - uint8_t* output) { - if (!decompressor_initialized_) { RETURN_NOT_OK(InitDecompressor()); } + uint8_t* output) { + if (!decompressor_initialized_) { + RETURN_NOT_OK(InitDecompressor()); + } if (output_length == 0) { // The zlib library does not allow *output to be NULL, even when output_length // is 0 (inflate() will return Z_STREAM_ERROR). We don't consider this an @@ -168,8 +174,10 @@ class GZipCodec::GZipCodecImpl { } Status Compress(int64_t input_length, const uint8_t* input, int64_t output_buffer_len, - uint8_t* output, int64_t* output_length) { - if (!compressor_initialized_) { RETURN_NOT_OK(InitCompressor()); } + uint8_t* output, int64_t* output_length) { + if (!compressor_initialized_) { + RETURN_NOT_OK(InitCompressor()); + } stream_.next_in = const_cast(reinterpret_cast(input)); stream_.avail_in = static_cast(input_length); stream_.next_out = reinterpret_cast(output); @@ -218,14 +226,12 @@ class GZipCodec::GZipCodecImpl { bool decompressor_initialized_; }; -GZipCodec::GZipCodec(Format format) { - impl_.reset(new GZipCodecImpl(format)); -} +GZipCodec::GZipCodec(Format format) { impl_.reset(new GZipCodecImpl(format)); } GZipCodec::~GZipCodec() {} Status GZipCodec::Decompress(int64_t input_length, const uint8_t* input, - int64_t output_buffer_len, uint8_t* output) { + int64_t output_buffer_len, uint8_t* output) { return impl_->Decompress(input_length, input, output_buffer_len, output); } @@ -234,12 +240,11 @@ int64_t GZipCodec::MaxCompressedLen(int64_t input_length, const uint8_t* input) } Status GZipCodec::Compress(int64_t input_length, const uint8_t* input, - int64_t output_buffer_len, uint8_t* output, int64_t* output_length) { + int64_t output_buffer_len, uint8_t* output, + int64_t* output_length) { return impl_->Compress(input_length, input, output_buffer_len, output, output_length); } -const char* GZipCodec::name() const { - return "gzip"; -} +const char* GZipCodec::name() const { return "gzip"; } } // namespace arrow diff --git a/cpp/src/arrow/util/compression_zlib.h b/cpp/src/arrow/util/compression_zlib.h index 517a06175ec8f..f55d6689edfa9 100644 --- a/cpp/src/arrow/util/compression_zlib.h +++ b/cpp/src/arrow/util/compression_zlib.h @@ -40,10 +40,10 @@ class ARROW_EXPORT GZipCodec : public Codec { virtual ~GZipCodec(); Status Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, - uint8_t* output_buffer) override; + uint8_t* output_buffer) override; Status Compress(int64_t input_len, const uint8_t* input, int64_t output_buffer_len, - uint8_t* output_buffer, int64_t* output_length) override; + uint8_t* output_buffer, int64_t* output_length) override; int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) override; diff --git a/cpp/src/arrow/util/compression_zstd.cc b/cpp/src/arrow/util/compression_zstd.cc index 5511cb9dd8f37..ac6e9065d22dd 100644 --- a/cpp/src/arrow/util/compression_zstd.cc +++ b/cpp/src/arrow/util/compression_zstd.cc @@ -32,10 +32,11 @@ namespace arrow { // ---------------------------------------------------------------------- // ZSTD implementation -Status ZSTDCodec::Decompress( - int64_t input_len, const uint8_t* input, int64_t output_len, uint8_t* output_buffer) { - int64_t decompressed_size = ZSTD_decompress(output_buffer, - static_cast(output_len), input, static_cast(input_len)); +Status ZSTDCodec::Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, + uint8_t* output_buffer) { + int64_t decompressed_size = + ZSTD_decompress(output_buffer, static_cast(output_len), input, + static_cast(input_len)); if (decompressed_size != output_len) { return Status::IOError("Corrupt ZSTD compressed data."); } @@ -47,9 +48,10 @@ int64_t ZSTDCodec::MaxCompressedLen(int64_t input_len, const uint8_t* input) { } Status ZSTDCodec::Compress(int64_t input_len, const uint8_t* input, - int64_t output_buffer_len, uint8_t* output_buffer, int64_t* output_length) { + int64_t output_buffer_len, uint8_t* output_buffer, + int64_t* output_length) { *output_length = ZSTD_compress(output_buffer, static_cast(output_buffer_len), - input, static_cast(input_len), 1); + input, static_cast(input_len), 1); if (ZSTD_isError(*output_length)) { return Status::IOError("ZSTD compression failure."); } diff --git a/cpp/src/arrow/util/compression_zstd.h b/cpp/src/arrow/util/compression_zstd.h index 2356d5862e01a..6e40e19d280d7 100644 --- a/cpp/src/arrow/util/compression_zstd.h +++ b/cpp/src/arrow/util/compression_zstd.h @@ -30,10 +30,10 @@ namespace arrow { class ARROW_EXPORT ZSTDCodec : public Codec { public: Status Decompress(int64_t input_len, const uint8_t* input, int64_t output_len, - uint8_t* output_buffer) override; + uint8_t* output_buffer) override; Status Compress(int64_t input_len, const uint8_t* input, int64_t output_buffer_len, - uint8_t* output_buffer, int64_t* output_length) override; + uint8_t* output_buffer, int64_t* output_length) override; int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) override; diff --git a/cpp/src/arrow/util/cpu-info.cc b/cpp/src/arrow/util/cpu-info.cc index dcd6b4027d966..b0667cb33ada4 100644 --- a/cpp/src/arrow/util/cpu-info.cc +++ b/cpp/src/arrow/util/cpu-info.cc @@ -66,7 +66,9 @@ static struct { string name; int64_t flag; } flag_mappings[] = { - {"ssse3", CpuInfo::SSSE3}, {"sse4_1", CpuInfo::SSE4_1}, {"sse4_2", CpuInfo::SSE4_2}, + {"ssse3", CpuInfo::SSSE3}, + {"sse4_1", CpuInfo::SSE4_1}, + {"sse4_2", CpuInfo::SSE4_2}, {"popcnt", CpuInfo::POPCNT}, }; static const int64_t num_flags = sizeof(flag_mappings) / sizeof(flag_mappings[0]); @@ -78,14 +80,18 @@ static const int64_t num_flags = sizeof(flag_mappings) / sizeof(flag_mappings[0] int64_t ParseCPUFlags(const string& values) { int64_t flags = 0; for (int i = 0; i < num_flags; ++i) { - if (contains(values, flag_mappings[i].name)) { flags |= flag_mappings[i].flag; } + if (contains(values, flag_mappings[i].name)) { + flags |= flag_mappings[i].flag; + } } return flags; } #ifdef _WIN32 bool RetrieveCacheSize(int64_t* cache_sizes) { - if (!cache_sizes) { return false; } + if (!cache_sizes) { + return false; + } PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = nullptr; PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer_position = nullptr; DWORD buffer_size = 0; @@ -95,7 +101,9 @@ bool RetrieveCacheSize(int64_t* cache_sizes) { (GetLogicalProcessorInformationFuncPointer)GetProcAddress( GetModuleHandle("kernel32"), "GetLogicalProcessorInformation"); - if (!func_pointer) { return false; } + if (!func_pointer) { + return false; + } // Get buffer size if (func_pointer(buffer, &buffer_size) && GetLastError() != ERROR_INSUFFICIENT_BUFFER) @@ -103,7 +111,9 @@ bool RetrieveCacheSize(int64_t* cache_sizes) { buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)malloc(buffer_size); - if (!buffer || !func_pointer(buffer, &buffer_size)) { return false; } + if (!buffer || !func_pointer(buffer, &buffer_size)) { + return false; + } buffer_position = buffer; while (offset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= buffer_size) { @@ -117,7 +127,9 @@ bool RetrieveCacheSize(int64_t* cache_sizes) { buffer_position++; } - if (buffer) { free(buffer); } + if (buffer) { + free(buffer); + } return true; } #endif @@ -125,7 +137,9 @@ bool RetrieveCacheSize(int64_t* cache_sizes) { void CpuInfo::Init() { std::lock_guard cpuinfo_lock(cpuinfo_mutex); - if (initialized()) { return; } + if (initialized()) { + return; + } string line; string name; @@ -186,7 +200,9 @@ void CpuInfo::Init() { cache_sizes_[i] = data[i]; } #elif _WIN32 - if (!RetrieveCacheSize(cache_sizes_)) { SetDefaultCacheSize(); } + if (!RetrieveCacheSize(cache_sizes_)) { + SetDefaultCacheSize(); + } #else SetDefaultCacheSize(); #endif diff --git a/cpp/src/arrow/util/decimal.cc b/cpp/src/arrow/util/decimal.cc index 72ede35bef9b5..1a12e20f9f93f 100644 --- a/cpp/src/arrow/util/decimal.cc +++ b/cpp/src/arrow/util/decimal.cc @@ -21,8 +21,8 @@ namespace arrow { namespace decimal { template -ARROW_EXPORT Status FromString( - const std::string& s, Decimal* out, int* precision, int* scale) { +ARROW_EXPORT Status FromString(const std::string& s, Decimal* out, int* precision, + int* scale) { // Implements this regex: "(\\+?|-?)((0*)(\\d*))(\\.(\\d+))?"; if (s.empty()) { return Status::Invalid("Empty string cannot be converted to decimal"); @@ -34,7 +34,9 @@ ARROW_EXPORT Status FromString( char first_char = *charp; if (first_char == '+' || first_char == '-') { - if (first_char == '-') { sign = -1; } + if (first_char == '-') { + sign = -1; + } ++charp; } @@ -55,7 +57,9 @@ ARROW_EXPORT Status FromString( // all zeros and no decimal point if (charp == end) { - if (out != nullptr) { out->value = static_cast(0); } + if (out != nullptr) { + out->value = static_cast(0); + } // Not sure what other libraries assign precision to for this case (this case of // a string consisting only of one or more zeros) @@ -63,7 +67,9 @@ ARROW_EXPORT Status FromString( *precision = static_cast(charp - numeric_string_start); } - if (scale != nullptr) { *scale = 0; } + if (scale != nullptr) { + *scale = 0; + } return Status::OK(); } @@ -127,22 +133,26 @@ ARROW_EXPORT Status FromString( *precision = static_cast(whole_part.size() + fractional_part.size()); } - if (scale != nullptr) { *scale = static_cast(fractional_part.size()); } + if (scale != nullptr) { + *scale = static_cast(fractional_part.size()); + } - if (out != nullptr) { StringToInteger(whole_part, fractional_part, sign, &out->value); } + if (out != nullptr) { + StringToInteger(whole_part, fractional_part, sign, &out->value); + } return Status::OK(); } -template ARROW_EXPORT Status FromString( - const std::string& s, Decimal32* out, int* precision, int* scale); -template ARROW_EXPORT Status FromString( - const std::string& s, Decimal64* out, int* precision, int* scale); -template ARROW_EXPORT Status FromString( - const std::string& s, Decimal128* out, int* precision, int* scale); +template ARROW_EXPORT Status FromString(const std::string& s, Decimal32* out, + int* precision, int* scale); +template ARROW_EXPORT Status FromString(const std::string& s, Decimal64* out, + int* precision, int* scale); +template ARROW_EXPORT Status FromString(const std::string& s, Decimal128* out, + int* precision, int* scale); -void StringToInteger( - const std::string& whole, const std::string& fractional, int8_t sign, int32_t* out) { +void StringToInteger(const std::string& whole, const std::string& fractional, int8_t sign, + int32_t* out) { DCHECK(sign == -1 || sign == 1); DCHECK_NE(out, nullptr); DCHECK(!whole.empty() || !fractional.empty()); @@ -150,12 +160,14 @@ void StringToInteger( *out = std::stoi(whole, nullptr, 10) * static_cast(pow(10.0, static_cast(fractional.size()))); } - if (!fractional.empty()) { *out += std::stoi(fractional, nullptr, 10); } + if (!fractional.empty()) { + *out += std::stoi(fractional, nullptr, 10); + } *out *= sign; } -void StringToInteger( - const std::string& whole, const std::string& fractional, int8_t sign, int64_t* out) { +void StringToInteger(const std::string& whole, const std::string& fractional, int8_t sign, + int64_t* out) { DCHECK(sign == -1 || sign == 1); DCHECK_NE(out, nullptr); DCHECK(!whole.empty() || !fractional.empty()); @@ -163,12 +175,14 @@ void StringToInteger( *out = static_cast(std::stoll(whole, nullptr, 10)) * static_cast(pow(10.0, static_cast(fractional.size()))); } - if (!fractional.empty()) { *out += std::stoll(fractional, nullptr, 10); } + if (!fractional.empty()) { + *out += std::stoll(fractional, nullptr, 10); + } *out *= sign; } -void StringToInteger( - const std::string& whole, const std::string& fractional, int8_t sign, int128_t* out) { +void StringToInteger(const std::string& whole, const std::string& fractional, int8_t sign, + int128_t* out) { DCHECK(sign == -1 || sign == 1); DCHECK_NE(out, nullptr); DCHECK(!whole.empty() || !fractional.empty()); @@ -200,7 +214,9 @@ void FromBytes(const uint8_t* bytes, bool is_negative, Decimal128* decimal) { int128_t::backend_type& backend(decimal_value.backend()); backend.resize(LIMBS_IN_INT128, LIMBS_IN_INT128); std::memcpy(backend.limbs(), bytes, BYTES_IN_128_BITS); - if (is_negative) { decimal->value = -decimal->value; } + if (is_negative) { + decimal->value = -decimal->value; + } } void ToBytes(const Decimal32& value, uint8_t** bytes) { diff --git a/cpp/src/arrow/util/decimal.h b/cpp/src/arrow/util/decimal.h index 0d84ba89db973..20142faea3ec5 100644 --- a/cpp/src/arrow/util/decimal.h +++ b/cpp/src/arrow/util/decimal.h @@ -37,16 +37,16 @@ using boost::multiprecision::int128_t; template struct ARROW_EXPORT Decimal; -ARROW_EXPORT void StringToInteger( - const std::string& whole, const std::string& fractional, int8_t sign, int32_t* out); -ARROW_EXPORT void StringToInteger( - const std::string& whole, const std::string& fractional, int8_t sign, int64_t* out); -ARROW_EXPORT void StringToInteger( - const std::string& whole, const std::string& fractional, int8_t sign, int128_t* out); +ARROW_EXPORT void StringToInteger(const std::string& whole, const std::string& fractional, + int8_t sign, int32_t* out); +ARROW_EXPORT void StringToInteger(const std::string& whole, const std::string& fractional, + int8_t sign, int64_t* out); +ARROW_EXPORT void StringToInteger(const std::string& whole, const std::string& fractional, + int8_t sign, int128_t* out); template ARROW_EXPORT Status FromString(const std::string& s, Decimal* out, - int* precision = nullptr, int* scale = nullptr); + int* precision = nullptr, int* scale = nullptr); template struct ARROW_EXPORT Decimal { @@ -85,8 +85,8 @@ struct ARROW_EXPORT DecimalPrecision { }; template -ARROW_EXPORT std::string ToString( - const Decimal& decimal_value, int precision, int scale) { +ARROW_EXPORT std::string ToString(const Decimal& decimal_value, int precision, + int scale) { T value = decimal_value.value; // Decimal values are sent to clients as strings so in the interest of @@ -108,8 +108,8 @@ ARROW_EXPORT std::string ToString( if (scale > 0) { int remaining_scale = scale; do { - str[--last_char_idx] = static_cast( - (remaining_value % 10) + static_cast('0')); // Ascii offset + str[--last_char_idx] = static_cast((remaining_value % 10) + + static_cast('0')); // Ascii offset remaining_value /= 10; } while (--remaining_scale > 0); str[--last_char_idx] = '.'; diff --git a/cpp/src/arrow/util/key_value_metadata.cc b/cpp/src/arrow/util/key_value_metadata.cc index 8bddd5d0164c2..6877a6a5382fe 100644 --- a/cpp/src/arrow/util/key_value_metadata.cc +++ b/cpp/src/arrow/util/key_value_metadata.cc @@ -48,8 +48,8 @@ KeyValueMetadata::KeyValueMetadata( const std::unordered_map& map) : keys_(UnorderedMapKeys(map)), values_(UnorderedMapValues(map)) {} -KeyValueMetadata::KeyValueMetadata( - const std::vector& keys, const std::vector& values) +KeyValueMetadata::KeyValueMetadata(const std::vector& keys, + const std::vector& values) : keys_(keys), values_(values) { DCHECK_EQ(keys.size(), values.size()); } diff --git a/cpp/src/arrow/util/key_value_metadata.h b/cpp/src/arrow/util/key_value_metadata.h index a2a4623aee7cc..3d602131684f6 100644 --- a/cpp/src/arrow/util/key_value_metadata.h +++ b/cpp/src/arrow/util/key_value_metadata.h @@ -32,8 +32,8 @@ namespace arrow { class ARROW_EXPORT KeyValueMetadata { public: KeyValueMetadata(); - KeyValueMetadata( - const std::vector& keys, const std::vector& values); + KeyValueMetadata(const std::vector& keys, + const std::vector& values); explicit KeyValueMetadata(const std::unordered_map& map); virtual ~KeyValueMetadata() = default; diff --git a/cpp/src/arrow/util/logging.h b/cpp/src/arrow/util/logging.h index 0edaa9dfc37a3..89e69f932d52d 100644 --- a/cpp/src/arrow/util/logging.h +++ b/cpp/src/arrow/util/logging.h @@ -50,32 +50,25 @@ namespace arrow { #define DCHECK(condition) \ ARROW_IGNORE_EXPR(condition) \ - while (false) \ - ::arrow::internal::NullLog() + while (false) ::arrow::internal::NullLog() #define DCHECK_EQ(val1, val2) \ ARROW_IGNORE_EXPR(val1) \ - while (false) \ - ::arrow::internal::NullLog() + while (false) ::arrow::internal::NullLog() #define DCHECK_NE(val1, val2) \ ARROW_IGNORE_EXPR(val1) \ - while (false) \ - ::arrow::internal::NullLog() + while (false) ::arrow::internal::NullLog() #define DCHECK_LE(val1, val2) \ ARROW_IGNORE_EXPR(val1) \ - while (false) \ - ::arrow::internal::NullLog() + while (false) ::arrow::internal::NullLog() #define DCHECK_LT(val1, val2) \ ARROW_IGNORE_EXPR(val1) \ - while (false) \ - ::arrow::internal::NullLog() + while (false) ::arrow::internal::NullLog() #define DCHECK_GE(val1, val2) \ ARROW_IGNORE_EXPR(val1) \ - while (false) \ - ::arrow::internal::NullLog() + while (false) ::arrow::internal::NullLog() #define DCHECK_GT(val1, val2) \ ARROW_IGNORE_EXPR(val1) \ - while (false) \ - ::arrow::internal::NullLog() + while (false) ::arrow::internal::NullLog() #else #define ARROW_DFATAL ARROW_FATAL @@ -107,8 +100,12 @@ class CerrLog { has_logged_(false) {} virtual ~CerrLog() { - if (has_logged_) { std::cerr << std::endl; } - if (severity_ == ARROW_FATAL) { std::exit(1); } + if (has_logged_) { + std::cerr << std::endl; + } + if (severity_ == ARROW_FATAL) { + std::exit(1); + } } template @@ -133,7 +130,9 @@ class FatalLog : public CerrLog { : CerrLog(ARROW_FATAL){} // NOLINT [[noreturn]] ~FatalLog() { - if (has_logged_) { std::cerr << std::endl; } + if (has_logged_) { + std::cerr << std::endl; + } std::exit(1); } }; diff --git a/cpp/src/arrow/util/memory.h b/cpp/src/arrow/util/memory.h index c5c17ef907c22..fce9e19293249 100644 --- a/cpp/src/arrow/util/memory.h +++ b/cpp/src/arrow/util/memory.h @@ -31,7 +31,7 @@ uint8_t* pointer_logical_and(const uint8_t* address, uintptr_t bits) { // A helper function for doing memcpy with multiple threads. This is required // to saturate the memory bandwidth of modern cpus. void parallel_memcopy(uint8_t* dst, const uint8_t* src, int64_t nbytes, - uintptr_t block_size, int num_threads) { + uintptr_t block_size, int num_threads) { std::vector threadpool(num_threads); uint8_t* left = pointer_logical_and(src + block_size - 1, ~(block_size - 1)); uint8_t* right = pointer_logical_and(src + nbytes, ~(block_size - 1)); @@ -52,15 +52,17 @@ void parallel_memcopy(uint8_t* dst, const uint8_t* src, int64_t nbytes, // Start all threads first and handle leftovers while threads run. for (int i = 0; i < num_threads; i++) { - threadpool[i] = std::thread( - memcpy, dst + prefix + i * chunk_size, left + i * chunk_size, chunk_size); + threadpool[i] = std::thread(memcpy, dst + prefix + i * chunk_size, + left + i * chunk_size, chunk_size); } memcpy(dst, src, prefix); memcpy(dst + prefix + num_threads * chunk_size, right, suffix); for (auto& t : threadpool) { - if (t.joinable()) { t.join(); } + if (t.joinable()) { + t.join(); + } } } diff --git a/cpp/src/arrow/util/random.h b/cpp/src/arrow/util/random.h index 31f2b0680fe0a..ec48d5d4a529c 100644 --- a/cpp/src/arrow/util/random.h +++ b/cpp/src/arrow/util/random.h @@ -27,7 +27,9 @@ class Random { public: explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { // Avoid bad seeds. - if (seed_ == 0 || seed_ == random_internal::M) { seed_ = 1; } + if (seed_ == 0 || seed_ == random_internal::M) { + seed_ = 1; + } } // Next pseudo-random 32-bit unsigned integer. @@ -48,7 +50,9 @@ class Random { // The first reduction may overflow by 1 bit, so we may need to // repeat. mod == M is not possible; using > allows the faster // sign-bit-based test. - if (seed_ > random_internal::M) { seed_ -= random_internal::M; } + if (seed_ > random_internal::M) { + seed_ -= random_internal::M; + } return seed_; } @@ -97,9 +101,9 @@ class Random { double Normal(double mean, double std_dev) { double uniform1 = (Next() + 1.0) / (random_internal::M + 1.0); double uniform2 = (Next() + 1.0) / (random_internal::M + 1.0); - return ( - mean + - std_dev * sqrt(-2 * ::log(uniform1)) * cos(random_internal::kTwoPi * uniform2)); + return (mean + + std_dev * sqrt(-2 * ::log(uniform1)) * + cos(random_internal::kTwoPi * uniform2)); } // Return a random number between 0.0 and 1.0 inclusive. diff --git a/cpp/src/arrow/util/rle-encoding-test.cc b/cpp/src/arrow/util/rle-encoding-test.cc index 7c9b33c349496..7549b874355df 100644 --- a/cpp/src/arrow/util/rle-encoding-test.cc +++ b/cpp/src/arrow/util/rle-encoding-test.cc @@ -178,7 +178,7 @@ TEST(BitArray, TestMixed) { // exactly 'expected_encoding'. // if expected_len is not -1, it will validate the encoded size is correct. void ValidateRle(const vector& values, int bit_width, uint8_t* expected_encoding, - int expected_len) { + int expected_len) { const int len = 64 * 1024; uint8_t buffer[len]; EXPECT_LE(expected_len, len); @@ -190,7 +190,9 @@ void ValidateRle(const vector& values, int bit_width, uint8_t* expected_enc } int encoded_len = encoder.Flush(); - if (expected_len != -1) { EXPECT_EQ(encoded_len, expected_len); } + if (expected_len != -1) { + EXPECT_EQ(encoded_len, expected_len); + } if (expected_encoding != NULL) { EXPECT_EQ(memcmp(buffer, expected_encoding, expected_len), 0); } @@ -211,7 +213,7 @@ void ValidateRle(const vector& values, int bit_width, uint8_t* expected_enc RleDecoder decoder(buffer, len, bit_width); vector values_read(values.size()); ASSERT_EQ(values.size(), - decoder.GetBatch(values_read.data(), static_cast(values.size()))); + decoder.GetBatch(values_read.data(), static_cast(values.size()))); EXPECT_EQ(values, values_read); } } @@ -224,7 +226,9 @@ bool CheckRoundTrip(const vector& values, int bit_width) { RleEncoder encoder(buffer, len, bit_width); for (size_t i = 0; i < values.size(); ++i) { bool result = encoder.Put(values[i]); - if (!result) { return false; } + if (!result) { + return false; + } } int encoded_len = encoder.Flush(); int out = 0; @@ -233,7 +237,9 @@ bool CheckRoundTrip(const vector& values, int bit_width) { RleDecoder decoder(buffer, encoded_len, bit_width); for (size_t i = 0; i < values.size(); ++i) { EXPECT_TRUE(decoder.Get(&out)); - if (values[i] != out) { return false; } + if (values[i] != out) { + return false; + } } } @@ -245,7 +251,9 @@ bool CheckRoundTrip(const vector& values, int bit_width) { decoder.GetBatch(values_read.data(), static_cast(values.size()))) { return false; } - if (values != values_read) { return false; } + if (values != values_read) { + return false; + } } return true; @@ -294,8 +302,8 @@ TEST(Rle, SpecificSequences) { ValidateRle(values, 1, expected_buffer, 1 + num_groups); for (int width = 2; width <= MAX_WIDTH; ++width) { int num_values = static_cast(BitUtil::Ceil(100, 8)) * 8; - ValidateRle( - values, width, NULL, 1 + static_cast(BitUtil::Ceil(width * num_values, 8))); + ValidateRle(values, width, NULL, + 1 + static_cast(BitUtil::Ceil(width * num_values, 8))); } } @@ -352,8 +360,7 @@ TEST(Rle, BitWidthZeroLiteral) { // group but flush before finishing. TEST(BitRle, Flush) { vector values; - for (int i = 0; i < 16; ++i) - values.push_back(1); + for (int i = 0; i < 16; ++i) values.push_back(1); values.push_back(0); ValidateRle(values, 1, NULL, -1); values.push_back(1); @@ -385,7 +392,9 @@ TEST(BitRle, Random) { for (int i = 0; i < ngroups; ++i) { int group_size = dist(gen); - if (group_size > max_group_size) { group_size = 1; } + if (group_size > max_group_size) { + group_size = 1; + } for (int i = 0; i < group_size; ++i) { values.push_back(parity); } diff --git a/cpp/src/arrow/util/rle-encoding.h b/cpp/src/arrow/util/rle-encoding.h index 9ec6235144665..e69077807df3a 100644 --- a/cpp/src/arrow/util/rle-encoding.h +++ b/cpp/src/arrow/util/rle-encoding.h @@ -21,8 +21,8 @@ #ifndef ARROW_UTIL_RLE_ENCODING_H #define ARROW_UTIL_RLE_ENCODING_H -#include #include +#include #include "arrow/util/bit-stream-utils.h" #include "arrow/util/bit-util.h" @@ -122,7 +122,8 @@ class RleDecoder { /// Like GetBatchWithDict but add spacing for null entries template int GetBatchWithDictSpaced(const T* dictionary, T* values, int batch_size, - int null_count, const uint8_t* valid_bits, int64_t valid_bits_offset); + int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset); protected: BitReader bit_reader_; @@ -289,7 +290,7 @@ inline int RleDecoder::GetBatch(T* values, int batch_size) { int repeat_batch = std::min(batch_size - values_read, static_cast(repeat_count_)); std::fill(values + values_read, values + values_read + repeat_batch, - static_cast(current_value_)); + static_cast(current_value_)); repeat_count_ -= repeat_batch; values_read += repeat_batch; } else if (literal_count_ > 0) { @@ -318,7 +319,7 @@ inline int RleDecoder::GetBatchWithDict(const T* dictionary, T* values, int batc int repeat_batch = std::min(batch_size - values_read, static_cast(repeat_count_)); std::fill(values + values_read, values + values_read + repeat_batch, - dictionary[current_value_]); + dictionary[current_value_]); repeat_count_ -= repeat_batch; values_read += repeat_batch; } else if (literal_count_ > 0) { @@ -345,8 +346,9 @@ inline int RleDecoder::GetBatchWithDict(const T* dictionary, T* values, int batc template inline int RleDecoder::GetBatchWithDictSpaced(const T* dictionary, T* values, - int batch_size, int null_count, const uint8_t* valid_bits, - int64_t valid_bits_offset) { + int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { DCHECK_GE(bit_width_, 0); int values_read = 0; int remaining_nulls = null_count; @@ -379,8 +381,8 @@ inline int RleDecoder::GetBatchWithDictSpaced(const T* dictionary, T* values, std::fill(values + values_read, values + values_read + repeat_batch, value); values_read += repeat_batch; } else if (literal_count_ > 0) { - int literal_batch = std::min( - batch_size - values_read - remaining_nulls, static_cast(literal_count_)); + int literal_batch = std::min(batch_size - values_read - remaining_nulls, + static_cast(literal_count_)); // Decode the literals constexpr int kBufferSize = 1024; @@ -434,7 +436,7 @@ bool RleDecoder::NextCounts() { repeat_count_ = indicator_value >> 1; bool result = bit_reader_.GetAligned(static_cast(BitUtil::Ceil(bit_width_, 8)), - reinterpret_cast(¤t_value_)); + reinterpret_cast(¤t_value_)); DCHECK(result); } return true; @@ -509,8 +511,8 @@ inline void RleEncoder::FlushRepeatedRun() { // The lsb of 0 indicates this is a repeated run int32_t indicator_value = repeat_count_ << 1 | 0; result &= bit_writer_.PutVlqInt(indicator_value); - result &= bit_writer_.PutAligned( - current_value_, static_cast(BitUtil::Ceil(bit_width_, 8))); + result &= bit_writer_.PutAligned(current_value_, + static_cast(BitUtil::Ceil(bit_width_, 8))); DCHECK(result); num_buffered_values_ = 0; repeat_count_ = 0; @@ -552,7 +554,7 @@ inline void RleEncoder::FlushBufferedValues(bool done) { inline int RleEncoder::Flush() { if (literal_count_ > 0 || repeat_count_ > 0 || num_buffered_values_ > 0) { bool all_repeat = literal_count_ == 0 && (repeat_count_ == num_buffered_values_ || - num_buffered_values_ == 0); + num_buffered_values_ == 0); // There is something pending, figure out if it's a repeated or literal run if (repeat_count_ > 0 && all_repeat) { FlushRepeatedRun(); diff --git a/cpp/src/arrow/util/sse-util.h b/cpp/src/arrow/util/sse-util.h index 570c4057a7573..a0ec8a2e93911 100644 --- a/cpp/src/arrow/util/sse-util.h +++ b/cpp/src/arrow/util/sse-util.h @@ -53,8 +53,8 @@ static const int STRCMP_MODE = /// Precomputed mask values up to 16 bits. static const int SSE_BITMASK[CHARS_PER_128_BIT_REGISTER] = { - 1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 8, 1 << 9, - 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, + 1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, + 1 << 8, 1 << 9, 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, }; } // namespace SSEUtil diff --git a/cpp/src/arrow/util/stl.h b/cpp/src/arrow/util/stl.h index d58689b748896..4b8916f6eaa54 100644 --- a/cpp/src/arrow/util/stl.h +++ b/cpp/src/arrow/util/stl.h @@ -40,8 +40,8 @@ inline std::vector DeleteVectorElement(const std::vector& values, size_t i } template -inline std::vector AddVectorElement( - const std::vector& values, size_t index, const T& new_element) { +inline std::vector AddVectorElement(const std::vector& values, size_t index, + const T& new_element) { DCHECK_LE(index, values.size()); std::vector out; out.reserve(values.size() + 1); diff --git a/cpp/src/arrow/util/string.h b/cpp/src/arrow/util/string.h index 5d9fdc88ced7e..6e70ddcccefec 100644 --- a/cpp/src/arrow/util/string.h +++ b/cpp/src/arrow/util/string.h @@ -46,7 +46,9 @@ static inline Status ParseHexValue(const char* data, uint8_t* out) { const char* pos2 = std::lower_bound(kAsciiTable, kAsciiTable + 16, c2); // Error checking - if (*pos1 != c1 || *pos2 != c2) { return Status::Invalid("Encountered non-hex digit"); } + if (*pos1 != c1 || *pos2 != c2) { + return Status::Invalid("Encountered non-hex digit"); + } *out = static_cast((pos1 - kAsciiTable) << 4 | (pos2 - kAsciiTable)); return Status::OK(); diff --git a/cpp/src/plasma/client.cc b/cpp/src/plasma/client.cc index 62bfbec21c466..bbbeb55813ccf 100644 --- a/cpp/src/plasma/client.cc +++ b/cpp/src/plasma/client.cc @@ -88,7 +88,9 @@ uint8_t* PlasmaClient::lookup_or_mmap(int fd, int store_fd_val, int64_t map_size uint8_t* result = reinterpret_cast( mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)); // TODO(pcm): Don't fail here, instead return a Status. - if (result == MAP_FAILED) { ARROW_LOG(FATAL) << "mmap failed"; } + if (result == MAP_FAILED) { + ARROW_LOG(FATAL) << "mmap failed"; + } close(fd); ClientMmapTableEntry& entry = mmap_table_[store_fd_val]; entry.pointer = result; @@ -106,8 +108,8 @@ uint8_t* PlasmaClient::lookup_mmapped_file(int store_fd_val) { return entry->second.pointer; } -void PlasmaClient::increment_object_count( - const ObjectID& object_id, PlasmaObject* object, bool is_sealed) { +void PlasmaClient::increment_object_count(const ObjectID& object_id, PlasmaObject* object, + bool is_sealed) { // Increment the count of the object to track the fact that it is being used. // The corresponding decrement should happen in PlasmaClient::Release. auto elem = objects_in_use_.find(object_id); @@ -142,7 +144,7 @@ void PlasmaClient::increment_object_count( } Status PlasmaClient::Create(const ObjectID& object_id, int64_t data_size, - uint8_t* metadata, int64_t metadata_size, uint8_t** data) { + uint8_t* metadata, int64_t metadata_size, uint8_t** data) { ARROW_LOG(DEBUG) << "called plasma_create on conn " << store_conn_ << " with size " << data_size << " and metadata size " << metadata_size; RETURN_NOT_OK(SendCreateRequest(store_conn_, object_id, data_size, metadata_size)); @@ -183,7 +185,7 @@ Status PlasmaClient::Create(const ObjectID& object_id, int64_t data_size, } Status PlasmaClient::Get(const ObjectID* object_ids, int64_t num_objects, - int64_t timeout_ms, ObjectBuffer* object_buffers) { + int64_t timeout_ms, ObjectBuffer* object_buffers) { // Fill out the info for the objects that are already in use locally. bool all_present = true; for (int i = 0; i < num_objects; ++i) { @@ -213,7 +215,9 @@ Status PlasmaClient::Get(const ObjectID* object_ids, int64_t num_objects, } } - if (all_present) { return Status::OK(); } + if (all_present) { + return Status::OK(); + } // If we get here, then the objects aren't all currently in use by this // client, so we need to send a request to the plasma store. @@ -223,8 +227,8 @@ Status PlasmaClient::Get(const ObjectID* object_ids, int64_t num_objects, std::vector received_object_ids(num_objects); std::vector object_data(num_objects); PlasmaObject* object; - RETURN_NOT_OK(ReadGetReply( - buffer.data(), received_object_ids.data(), object_data.data(), num_objects)); + RETURN_NOT_OK(ReadGetReply(buffer.data(), received_object_ids.data(), + object_data.data(), num_objects)); for (int i = 0; i < num_objects; ++i) { DCHECK(received_object_ids[i] == object_ids[i]); @@ -330,7 +334,7 @@ Status PlasmaClient::Release(const ObjectID& object_id) { // pending release calls, and there are at least some pending release calls in // the release_history list, then release some objects. while ((in_use_object_bytes_ > std::min(kL3CacheSizeBytes, store_capacity_ / 100) || - release_history_.size() > config_.release_delay) && + release_history_.size() > config_.release_delay) && release_history_.size() > 0) { // Perform a release for the object ID for the first pending release. RETURN_NOT_OK(PerformRelease(release_history_.back())); @@ -364,8 +368,9 @@ static void ComputeBlockHash(const unsigned char* data, int64_t nbytes, uint64_t *hash = XXH64_digest(&hash_state); } -static inline bool compute_object_hash_parallel( - XXH64_state_t* hash_state, const unsigned char* data, int64_t nbytes) { +static inline bool compute_object_hash_parallel(XXH64_state_t* hash_state, + const unsigned char* data, + int64_t nbytes) { // Note that this function will likely be faster if the address of data is // aligned on a 64-byte boundary. const int num_threads = kThreadPoolSize; @@ -380,16 +385,18 @@ static inline bool compute_object_hash_parallel( // Each thread gets a "chunk" of k blocks, except the suffix thread. for (int i = 0; i < num_threads; i++) { - threadpool_[i] = std::thread(ComputeBlockHash, - reinterpret_cast(data_address) + i * chunk_size, chunk_size, - &threadhash[i]); + threadpool_[i] = std::thread( + ComputeBlockHash, reinterpret_cast(data_address) + i * chunk_size, + chunk_size, &threadhash[i]); } - ComputeBlockHash( - reinterpret_cast(right_address), suffix, &threadhash[num_threads]); + ComputeBlockHash(reinterpret_cast(right_address), suffix, + &threadhash[num_threads]); // Join the threads. for (auto& t : threadpool_) { - if (t.joinable()) { t.join(); } + if (t.joinable()) { + t.join(); + } } XXH64_update(hash_state, (unsigned char*)threadhash, sizeof(threadhash)); @@ -400,13 +407,13 @@ static uint64_t compute_object_hash(const ObjectBuffer& obj_buffer) { XXH64_state_t hash_state; XXH64_reset(&hash_state, XXH64_DEFAULT_SEED); if (obj_buffer.data_size >= kBytesInMB) { - compute_object_hash_parallel( - &hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size); + compute_object_hash_parallel(&hash_state, (unsigned char*)obj_buffer.data, + obj_buffer.data_size); } else { XXH64_update(&hash_state, (unsigned char*)obj_buffer.data, obj_buffer.data_size); } - XXH64_update( - &hash_state, (unsigned char*)obj_buffer.metadata, obj_buffer.metadata_size); + XXH64_update(&hash_state, (unsigned char*)obj_buffer.metadata, + obj_buffer.metadata_size); return XXH64_digest(&hash_state); } @@ -483,8 +490,8 @@ Status PlasmaClient::Subscribe(int* fd) { return Status::OK(); } -Status PlasmaClient::GetNotification( - int fd, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size) { +Status PlasmaClient::GetNotification(int fd, ObjectID* object_id, int64_t* data_size, + int64_t* metadata_size) { uint8_t* notification = read_message_async(fd); if (notification == NULL) { return Status::IOError("Failed to read object notification from Plasma socket"); @@ -504,7 +511,7 @@ Status PlasmaClient::GetNotification( } Status PlasmaClient::Connect(const std::string& store_socket_name, - const std::string& manager_socket_name, int release_delay) { + const std::string& manager_socket_name, int release_delay) { store_conn_ = connect_ipc_sock_retry(store_socket_name, -1, -1); if (manager_socket_name != "") { manager_conn_ = connect_ipc_sock_retry(manager_socket_name, -1, -1); @@ -548,9 +555,7 @@ Status PlasmaClient::Fetch(int num_object_ids, const ObjectID* object_ids) { return SendFetchRequest(manager_conn_, object_ids, num_object_ids); } -int PlasmaClient::get_manager_fd() { - return manager_conn_; -} +int PlasmaClient::get_manager_fd() { return manager_conn_; } Status PlasmaClient::Info(const ObjectID& object_id, int* object_status) { ARROW_CHECK(manager_conn_ >= 0); @@ -565,7 +570,8 @@ Status PlasmaClient::Info(const ObjectID& object_id, int* object_status) { } Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_requests, - int num_ready_objects, int64_t timeout_ms, int* num_objects_ready) { + int num_ready_objects, int64_t timeout_ms, + int* num_objects_ready) { ARROW_CHECK(manager_conn_ >= 0); ARROW_CHECK(num_object_requests > 0); ARROW_CHECK(num_ready_objects > 0); @@ -577,7 +583,7 @@ Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_req } RETURN_NOT_OK(SendWaitRequest(manager_conn_, object_requests, num_object_requests, - num_ready_objects, timeout_ms)); + num_ready_objects, timeout_ms)); std::vector buffer; RETURN_NOT_OK(PlasmaReceive(manager_conn_, MessageType_PlasmaWaitReply, &buffer)); RETURN_NOT_OK(ReadWaitReply(buffer.data(), object_requests, &num_ready_objects)); @@ -588,7 +594,9 @@ Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_req int status = object_requests[i].status; switch (type) { case PLASMA_QUERY_LOCAL: - if (status == ObjectStatus_Local) { *num_objects_ready += 1; } + if (status == ObjectStatus_Local) { + *num_objects_ready += 1; + } break; case PLASMA_QUERY_ANYWHERE: if (status == ObjectStatus_Local || status == ObjectStatus_Remote) { @@ -604,4 +612,4 @@ Status PlasmaClient::Wait(int64_t num_object_requests, ObjectRequest* object_req return Status::OK(); } -} // namespace plasma +} // namespace plasma diff --git a/cpp/src/plasma/client.h b/cpp/src/plasma/client.h index d9ed9f7c26698..cc05a064511fe 100644 --- a/cpp/src/plasma/client.h +++ b/cpp/src/plasma/client.h @@ -91,7 +91,7 @@ class ARROW_EXPORT PlasmaClient { /// and not evicted to avoid too many munmaps. /// @return The return status. Status Connect(const std::string& store_socket_name, - const std::string& manager_socket_name, int release_delay); + const std::string& manager_socket_name, int release_delay); /// Create an object in the Plasma Store. Any metadata for this object must be /// be passed in when the object is created. @@ -108,7 +108,7 @@ class ARROW_EXPORT PlasmaClient { /// @param data The address of the newly created object will be written here. /// @return The return status. Status Create(const ObjectID& object_id, int64_t data_size, uint8_t* metadata, - int64_t metadata_size, uint8_t** data); + int64_t metadata_size, uint8_t** data); /// Get some objects from the Plasma Store. This function will block until the /// objects have all been created and sealed in the Plasma Store or the @@ -126,7 +126,7 @@ class ARROW_EXPORT PlasmaClient { /// size field is -1, then the object was not retrieved. /// @return The return status. Status Get(const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms, - ObjectBuffer* object_buffers); + ObjectBuffer* object_buffers); /// Tell Plasma that the client no longer needs the object. This should be /// called @@ -203,8 +203,8 @@ class ARROW_EXPORT PlasmaClient { /// @param data_size Out parameter, the data size of the object that was sealed. /// @param metadata_size Out parameter, the metadata size of the object that was sealed. /// @return The return status. - Status GetNotification( - int fd, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size); + Status GetNotification(int fd, ObjectID* object_id, int64_t* data_size, + int64_t* metadata_size); /// Disconnect from the local plasma instance, including the local store and /// manager. @@ -271,7 +271,7 @@ class ARROW_EXPORT PlasmaClient { /// min_num_ready_objects this means that timeout expired. /// @return The return status. Status Wait(int64_t num_object_requests, ObjectRequest* object_requests, - int num_ready_objects, int64_t timeout_ms, int* num_objects_ready); + int num_ready_objects, int64_t timeout_ms, int* num_objects_ready); /// Transfer local object to a different plasma manager. /// @@ -315,8 +315,8 @@ class ARROW_EXPORT PlasmaClient { uint8_t* lookup_mmapped_file(int store_fd_val); - void increment_object_count( - const ObjectID& object_id, PlasmaObject* object, bool is_sealed); + void increment_object_count(const ObjectID& object_id, PlasmaObject* object, + bool is_sealed); /// File descriptor of the Unix domain socket that connects to the store. int store_conn_; @@ -348,6 +348,6 @@ class ARROW_EXPORT PlasmaClient { int64_t store_capacity_; }; -} // namespace plasma +} // namespace plasma #endif // PLASMA_CLIENT_H diff --git a/cpp/src/plasma/common.cc b/cpp/src/plasma/common.cc index a5f530e202ff4..d7a7965078533 100644 --- a/cpp/src/plasma/common.cc +++ b/cpp/src/plasma/common.cc @@ -41,13 +41,9 @@ UniqueID UniqueID::from_binary(const std::string& binary) { return id; } -const uint8_t* UniqueID::data() const { - return id_; -} +const uint8_t* UniqueID::data() const { return id_; } -uint8_t* UniqueID::mutable_data() { - return id_; -} +uint8_t* UniqueID::mutable_data() { return id_; } std::string UniqueID::binary() const { return std::string(reinterpret_cast(id_), kUniqueIDSize); @@ -87,4 +83,4 @@ Status plasma_error_status(int plasma_error) { ARROW_EXPORT int ObjectStatusLocal = ObjectStatus_Local; ARROW_EXPORT int ObjectStatusRemote = ObjectStatus_Remote; -} // namespace plasma +} // namespace plasma diff --git a/cpp/src/plasma/common.h b/cpp/src/plasma/common.h index 6f2d4dd841b88..2b71da67015cd 100644 --- a/cpp/src/plasma/common.h +++ b/cpp/src/plasma/common.h @@ -95,6 +95,6 @@ enum ObjectRequestType { extern int ObjectStatusLocal; extern int ObjectStatusRemote; -} // namespace plasma +} // namespace plasma #endif // PLASMA_COMMON_H diff --git a/cpp/src/plasma/events.cc b/cpp/src/plasma/events.cc index 675424d5c2f1c..f98ced2faf180 100644 --- a/cpp/src/plasma/events.cc +++ b/cpp/src/plasma/events.cc @@ -21,8 +21,8 @@ namespace plasma { -void EventLoop::file_event_callback( - aeEventLoop* loop, int fd, void* context, int events) { +void EventLoop::file_event_callback(aeEventLoop* loop, int fd, void* context, + int events) { FileCallback* callback = reinterpret_cast(context); (*callback)(events); } @@ -34,12 +34,12 @@ int EventLoop::timer_event_callback(aeEventLoop* loop, TimerID timer_id, void* c constexpr int kInitialEventLoopSize = 1024; -EventLoop::EventLoop() { - loop_ = aeCreateEventLoop(kInitialEventLoopSize); -} +EventLoop::EventLoop() { loop_ = aeCreateEventLoop(kInitialEventLoopSize); } bool EventLoop::add_file_event(int fd, int events, const FileCallback& callback) { - if (file_callbacks_.find(fd) != file_callbacks_.end()) { return false; } + if (file_callbacks_.find(fd) != file_callbacks_.end()) { + return false; + } auto data = std::unique_ptr(new FileCallback(callback)); void* context = reinterpret_cast(data.get()); // Try to add the file descriptor. @@ -47,7 +47,9 @@ bool EventLoop::add_file_event(int fd, int events, const FileCallback& callback) // If it cannot be added, increase the size of the event loop. if (err == AE_ERR && errno == ERANGE) { err = aeResizeSetSize(loop_, 3 * aeGetSetSize(loop_) / 2); - if (err != AE_OK) { return false; } + if (err != AE_OK) { + return false; + } err = aeCreateFileEvent(loop_, fd, events, EventLoop::file_event_callback, context); } // In any case, test if there were errors. @@ -63,9 +65,7 @@ void EventLoop::remove_file_event(int fd) { file_callbacks_.erase(fd); } -void EventLoop::run() { - aeMain(loop_); -} +void EventLoop::run() { aeMain(loop_); } int64_t EventLoop::add_timer(int64_t timeout, const TimerCallback& callback) { auto data = std::unique_ptr(new TimerCallback(callback)); @@ -82,4 +82,4 @@ int EventLoop::remove_timer(int64_t timer_id) { return err; } -} // namespace plasma +} // namespace plasma diff --git a/cpp/src/plasma/events.h b/cpp/src/plasma/events.h index b989b7fac2476..6cb5b73fe94eb 100644 --- a/cpp/src/plasma/events.h +++ b/cpp/src/plasma/events.h @@ -98,6 +98,6 @@ class EventLoop { std::unordered_map> timer_callbacks_; }; -} // namespace plasma +} // namespace plasma #endif // PLASMA_EVENTS diff --git a/cpp/src/plasma/eviction_policy.cc b/cpp/src/plasma/eviction_policy.cc index ef18e33372998..6c2309f1709d2 100644 --- a/cpp/src/plasma/eviction_policy.cc +++ b/cpp/src/plasma/eviction_policy.cc @@ -36,8 +36,8 @@ void LRUCache::remove(const ObjectID& key) { item_map_.erase(it); } -int64_t LRUCache::choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict) { +int64_t LRUCache::choose_objects_to_evict(int64_t num_bytes_required, + std::vector* objects_to_evict) { int64_t bytes_evicted = 0; auto it = item_list_.end(); while (bytes_evicted < num_bytes_required && it != item_list_.begin()) { @@ -51,8 +51,8 @@ int64_t LRUCache::choose_objects_to_evict( EvictionPolicy::EvictionPolicy(PlasmaStoreInfo* store_info) : memory_used_(0), store_info_(store_info) {} -int64_t EvictionPolicy::choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict) { +int64_t EvictionPolicy::choose_objects_to_evict(int64_t num_bytes_required, + std::vector* objects_to_evict) { int64_t bytes_evicted = cache_.choose_objects_to_evict(num_bytes_required, objects_to_evict); /* Update the LRU cache. */ @@ -69,8 +69,8 @@ void EvictionPolicy::object_created(const ObjectID& object_id) { cache_.add(object_id, entry->info.data_size + entry->info.metadata_size); } -bool EvictionPolicy::require_space( - int64_t size, std::vector* objects_to_evict) { +bool EvictionPolicy::require_space(int64_t size, + std::vector* objects_to_evict) { /* Check if there is enough space to create the object. */ int64_t required_space = memory_used_ + size - store_info_->memory_capacity; int64_t num_bytes_evicted; @@ -95,17 +95,17 @@ bool EvictionPolicy::require_space( return num_bytes_evicted >= required_space; } -void EvictionPolicy::begin_object_access( - const ObjectID& object_id, std::vector* objects_to_evict) { +void EvictionPolicy::begin_object_access(const ObjectID& object_id, + std::vector* objects_to_evict) { /* If the object is in the LRU cache, remove it. */ cache_.remove(object_id); } -void EvictionPolicy::end_object_access( - const ObjectID& object_id, std::vector* objects_to_evict) { +void EvictionPolicy::end_object_access(const ObjectID& object_id, + std::vector* objects_to_evict) { auto entry = store_info_->objects[object_id].get(); /* Add the object to the LRU cache.*/ cache_.add(object_id, entry->info.data_size + entry->info.metadata_size); } -} // namespace plasma +} // namespace plasma diff --git a/cpp/src/plasma/eviction_policy.h b/cpp/src/plasma/eviction_policy.h index c4f218328312d..dd1c873466ec9 100644 --- a/cpp/src/plasma/eviction_policy.h +++ b/cpp/src/plasma/eviction_policy.h @@ -42,8 +42,8 @@ class LRUCache { void remove(const ObjectID& key); - int64_t choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict); + int64_t choose_objects_to_evict(int64_t num_bytes_required, + std::vector* objects_to_evict); private: /// A doubly-linked list containing the items in the cache and @@ -95,8 +95,8 @@ class EvictionPolicy { /// @param objects_to_evict The object IDs that were chosen for eviction will /// be stored into this vector. /// @return Void. - void begin_object_access( - const ObjectID& object_id, std::vector* objects_to_evict); + void begin_object_access(const ObjectID& object_id, + std::vector* objects_to_evict); /// This method will be called whenever an object in the Plasma store that was /// being used is no longer being used. When this method is called, the @@ -107,8 +107,8 @@ class EvictionPolicy { /// @param objects_to_evict The object IDs that were chosen for eviction will /// be stored into this vector. /// @return Void. - void end_object_access( - const ObjectID& object_id, std::vector* objects_to_evict); + void end_object_access(const ObjectID& object_id, + std::vector* objects_to_evict); /// Choose some objects to evict from the Plasma store. When this method is /// called, the eviction policy will assume that the objects chosen to be @@ -121,8 +121,8 @@ class EvictionPolicy { /// @param objects_to_evict The object IDs that were chosen for eviction will /// be stored into this vector. /// @return The total number of bytes of space chosen to be evicted. - int64_t choose_objects_to_evict( - int64_t num_bytes_required, std::vector* objects_to_evict); + int64_t choose_objects_to_evict(int64_t num_bytes_required, + std::vector* objects_to_evict); private: /// The amount of memory (in bytes) currently being used. @@ -133,6 +133,6 @@ class EvictionPolicy { LRUCache cache_; }; -} // namespace plasma +} // namespace plasma #endif // PLASMA_EVICTION_POLICY_H diff --git a/cpp/src/plasma/io.cc b/cpp/src/plasma/io.cc index 5875ebb7ae611..e3b6b617fbc06 100644 --- a/cpp/src/plasma/io.cc +++ b/cpp/src/plasma/io.cc @@ -38,7 +38,9 @@ Status WriteBytes(int fd, uint8_t* cursor, size_t length) { * advance the cursor, and decrease the amount left to write. */ nbytes = write(fd, cursor + offset, bytesleft); if (nbytes < 0) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; } + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { + continue; + } return Status::IOError(std::string(strerror(errno))); } else if (nbytes == 0) { return Status::IOError("Encountered unexpected EOF"); @@ -67,7 +69,9 @@ Status ReadBytes(int fd, uint8_t* cursor, size_t length) { while (bytesleft > 0) { nbytes = read(fd, cursor + offset, bytesleft); if (nbytes < 0) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { continue; } + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { + continue; + } return Status::IOError(std::string(strerror(errno))); } else if (0 == nbytes) { return Status::IOError("Encountered unexpected EOF"); @@ -83,14 +87,16 @@ Status ReadBytes(int fd, uint8_t* cursor, size_t length) { Status ReadMessage(int fd, int64_t* type, std::vector* buffer) { int64_t version; RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(&version), sizeof(version)), - *type = DISCONNECT_CLIENT); + *type = DISCONNECT_CLIENT); ARROW_CHECK(version == PLASMA_PROTOCOL_VERSION) << "version = " << version; size_t length; RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(type), sizeof(*type)), - *type = DISCONNECT_CLIENT); + *type = DISCONNECT_CLIENT); RETURN_NOT_OK_ELSE(ReadBytes(fd, reinterpret_cast(&length), sizeof(length)), - *type = DISCONNECT_CLIENT); - if (length > buffer->size()) { buffer->resize(length); } + *type = DISCONNECT_CLIENT); + if (length > buffer->size()) { + buffer->resize(length); + } RETURN_NOT_OK_ELSE(ReadBytes(fd, buffer->data(), length), *type = DISCONNECT_CLIENT); return Status::OK(); } @@ -105,7 +111,7 @@ int bind_ipc_sock(const std::string& pathname, bool shall_listen) { /* Tell the system to allow the port to be reused. */ int on = 1; if (setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&on), - sizeof(on)) < 0) { + sizeof(on)) < 0) { ARROW_LOG(ERROR) << "setsockopt failed for pathname " << pathname; close(socket_fd); return -1; @@ -134,16 +140,22 @@ int bind_ipc_sock(const std::string& pathname, bool shall_listen) { return socket_fd; } -int connect_ipc_sock_retry( - const std::string& pathname, int num_retries, int64_t timeout) { +int connect_ipc_sock_retry(const std::string& pathname, int num_retries, + int64_t timeout) { /* Pick the default values if the user did not specify. */ - if (num_retries < 0) { num_retries = NUM_CONNECT_ATTEMPTS; } - if (timeout < 0) { timeout = CONNECT_TIMEOUT_MS; } + if (num_retries < 0) { + num_retries = NUM_CONNECT_ATTEMPTS; + } + if (timeout < 0) { + timeout = CONNECT_TIMEOUT_MS; + } int fd = -1; for (int num_attempts = 0; num_attempts < num_retries; ++num_attempts) { fd = connect_ipc_sock(pathname); - if (fd >= 0) { break; } + if (fd >= 0) { + break; + } if (num_attempts == 0) { ARROW_LOG(ERROR) << "Connection to socket failed for pathname " << pathname; } @@ -151,7 +163,9 @@ int connect_ipc_sock_retry( usleep(static_cast(timeout * 1000)); } /* If we could not connect to the socket, exit. */ - if (fd == -1) { ARROW_LOG(FATAL) << "Could not connect to socket " << pathname; } + if (fd == -1) { + ARROW_LOG(FATAL) << "Could not connect to socket " << pathname; + } return fd; } diff --git a/cpp/src/plasma/malloc.cc b/cpp/src/plasma/malloc.cc index 97c9a16c0c0bd..77a8afea75424 100644 --- a/cpp/src/plasma/malloc.cc +++ b/cpp/src/plasma/malloc.cc @@ -69,13 +69,9 @@ std::unordered_map mmap_records; constexpr int GRANULARITY_MULTIPLIER = 2; -static void* pointer_advance(void* p, ptrdiff_t n) { - return (unsigned char*)p + n; -} +static void* pointer_advance(void* p, ptrdiff_t n) { return (unsigned char*)p + n; } -static void* pointer_retreat(void* p, ptrdiff_t n) { - return (unsigned char*)p - n; -} +static void* pointer_retreat(void* p, ptrdiff_t n) { return (unsigned char*)p - n; } static ptrdiff_t pointer_distance(void const* pfrom, void const* pto) { return (unsigned char const*)pto - (unsigned char const*)pfrom; @@ -87,8 +83,8 @@ int create_buffer(int64_t size) { int fd; #ifdef _WIN32 if (!CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, - (DWORD)((uint64_t)size >> (CHAR_BIT * sizeof(DWORD))), (DWORD)(uint64_t)size, - NULL)) { + (DWORD)((uint64_t)size >> (CHAR_BIT * sizeof(DWORD))), + (DWORD)(uint64_t)size, NULL)) { fd = -1; } #else @@ -127,7 +123,9 @@ void* fake_mmap(size_t size) { int fd = create_buffer(size); ARROW_CHECK(fd >= 0) << "Failed to create buffer during mmap"; void* pointer = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if (pointer == MAP_FAILED) { return pointer; } + if (pointer == MAP_FAILED) { + return pointer; + } /* Increase dlmalloc's allocation granularity directly. */ mparams.granularity *= GRANULARITY_MULTIPLIER; @@ -156,7 +154,9 @@ int fake_munmap(void* addr, int64_t size) { } int r = munmap(addr, size); - if (r == 0) { close(entry->second.fd); } + if (r == 0) { + close(entry->second.fd); + } mmap_records.erase(entry); return r; diff --git a/cpp/src/plasma/plasma.cc b/cpp/src/plasma/plasma.cc index bfed5009b6157..87082817f12e9 100644 --- a/cpp/src/plasma/plasma.cc +++ b/cpp/src/plasma/plasma.cc @@ -27,7 +27,9 @@ namespace plasma { int warn_if_sigpipe(int status, int client_sock) { - if (status >= 0) { return 0; } + if (status >= 0) { + return 0; + } if (errno == EPIPE || errno == EBADF || errno == ECONNRESET) { ARROW_LOG(WARNING) << "Received SIGPIPE, BAD FILE DESCRIPTOR, or ECONNRESET when " "sending a message to client on fd " @@ -58,11 +60,13 @@ uint8_t* create_object_info_buffer(ObjectInfoT* object_info) { return notification; } -ObjectTableEntry* get_object_table_entry( - PlasmaStoreInfo* store_info, const ObjectID& object_id) { +ObjectTableEntry* get_object_table_entry(PlasmaStoreInfo* store_info, + const ObjectID& object_id) { auto it = store_info->objects.find(object_id); - if (it == store_info->objects.end()) { return NULL; } + if (it == store_info->objects.end()) { + return NULL; + } return it->second.get(); } -} // namespace plasma +} // namespace plasma diff --git a/cpp/src/plasma/plasma.h b/cpp/src/plasma/plasma.h index db8669ff0ddc2..d60e5a8363035 100644 --- a/cpp/src/plasma/plasma.h +++ b/cpp/src/plasma/plasma.h @@ -138,8 +138,8 @@ struct PlasmaStoreInfo { /// @param object_id The object_id of the entry we are looking for. /// @return The entry associated with the object_id or NULL if the object_id /// is not present. -ObjectTableEntry* get_object_table_entry( - PlasmaStoreInfo* store_info, const ObjectID& object_id); +ObjectTableEntry* get_object_table_entry(PlasmaStoreInfo* store_info, + const ObjectID& object_id); /// Print a warning if the status is less than zero. This should be used to check /// the success of messages sent to plasma clients. We print a warning instead of @@ -159,6 +159,6 @@ int warn_if_sigpipe(int status, int client_sock); uint8_t* create_object_info_buffer(ObjectInfoT* object_info); -} // namespace plasma +} // namespace plasma #endif // PLASMA_PLASMA_H diff --git a/cpp/src/plasma/protocol.cc b/cpp/src/plasma/protocol.cc index 2998c68b82785..19240bb4b8a1f 100644 --- a/cpp/src/plasma/protocol.cc +++ b/cpp/src/plasma/protocol.cc @@ -29,7 +29,7 @@ using flatbuffers::uoffset_t; flatbuffers::Offset>> to_flatbuffer(flatbuffers::FlatBufferBuilder* fbb, const ObjectID* object_ids, - int64_t num_objects) { + int64_t num_objects) { std::vector> results; for (int64_t i = 0; i < num_objects; i++) { results.push_back(fbb->CreateString(object_ids[i].binary())); @@ -47,23 +47,23 @@ Status PlasmaReceive(int sock, int64_t message_type, std::vector* buffe template Status PlasmaSend(int sock, int64_t message_type, flatbuffers::FlatBufferBuilder* fbb, - const Message& message) { + const Message& message) { fbb->Finish(message); return WriteMessage(sock, message_type, fbb->GetSize(), fbb->GetBufferPointer()); } // Create messages. -Status SendCreateRequest( - int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size) { +Status SendCreateRequest(int sock, ObjectID object_id, int64_t data_size, + int64_t metadata_size) { flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaCreateRequest( - fbb, fbb.CreateString(object_id.binary()), data_size, metadata_size); + auto message = CreatePlasmaCreateRequest(fbb, fbb.CreateString(object_id.binary()), + data_size, metadata_size); return PlasmaSend(sock, MessageType_PlasmaCreateRequest, &fbb, message); } -Status ReadCreateRequest( - uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size) { +Status ReadCreateRequest(uint8_t* data, ObjectID* object_id, int64_t* data_size, + int64_t* metadata_size) { DCHECK(data); auto message = flatbuffers::GetRoot(data); *data_size = message->data_size(); @@ -72,14 +72,14 @@ Status ReadCreateRequest( return Status::OK(); } -Status SendCreateReply( - int sock, ObjectID object_id, PlasmaObject* object, int error_code) { +Status SendCreateReply(int sock, ObjectID object_id, PlasmaObject* object, + int error_code) { flatbuffers::FlatBufferBuilder fbb; PlasmaObjectSpec plasma_object(object->handle.store_fd, object->handle.mmap_size, - object->data_offset, object->data_size, object->metadata_offset, - object->metadata_size); - auto message = CreatePlasmaCreateReply( - fbb, fbb.CreateString(object_id.binary()), &plasma_object, (PlasmaError)error_code); + object->data_offset, object->data_size, + object->metadata_offset, object->metadata_size); + auto message = CreatePlasmaCreateReply(fbb, fbb.CreateString(object_id.binary()), + &plasma_object, (PlasmaError)error_code); return PlasmaSend(sock, MessageType_PlasmaCreateReply, &fbb, message); } @@ -117,8 +117,8 @@ Status ReadSealRequest(uint8_t* data, ObjectID* object_id, unsigned char* digest Status SendSealReply(int sock, ObjectID object_id, int error) { flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaSealReply( - fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); + auto message = CreatePlasmaSealReply(fbb, fbb.CreateString(object_id.binary()), + (PlasmaError)error); return PlasmaSend(sock, MessageType_PlasmaSealReply, &fbb, message); } @@ -146,8 +146,8 @@ Status ReadReleaseRequest(uint8_t* data, ObjectID* object_id) { Status SendReleaseReply(int sock, ObjectID object_id, int error) { flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaReleaseReply( - fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); + auto message = CreatePlasmaReleaseReply(fbb, fbb.CreateString(object_id.binary()), + (PlasmaError)error); return PlasmaSend(sock, MessageType_PlasmaReleaseReply, &fbb, message); } @@ -175,8 +175,8 @@ Status ReadDeleteRequest(uint8_t* data, ObjectID* object_id) { Status SendDeleteReply(int sock, ObjectID object_id, int error) { flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaDeleteReply( - fbb, fbb.CreateString(object_id.binary()), (PlasmaError)error); + auto message = CreatePlasmaDeleteReply(fbb, fbb.CreateString(object_id.binary()), + (PlasmaError)error); return PlasmaSend(sock, MessageType_PlasmaDeleteReply, &fbb, message); } @@ -205,12 +205,12 @@ Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objec return Status::OK(); } -Status SendStatusReply( - int sock, ObjectID object_ids[], int object_status[], int64_t num_objects) { +Status SendStatusReply(int sock, ObjectID object_ids[], int object_status[], + int64_t num_objects) { flatbuffers::FlatBufferBuilder fbb; auto message = CreatePlasmaStatusReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects), - fbb.CreateVector(object_status, num_objects)); + fbb.CreateVector(object_status, num_objects)); return PlasmaSend(sock, MessageType_PlasmaStatusReply, &fbb, message); } @@ -220,8 +220,8 @@ int64_t ReadStatusReply_num_objects(uint8_t* data) { return message->object_ids()->size(); } -Status ReadStatusReply( - uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects) { +Status ReadStatusReply(uint8_t* data, ObjectID object_ids[], int object_status[], + int64_t num_objects) { DCHECK(data); auto message = flatbuffers::GetRoot(data); for (uoffset_t i = 0; i < num_objects; ++i) { @@ -271,9 +271,7 @@ Status SendConnectRequest(int sock) { return PlasmaSend(sock, MessageType_PlasmaConnectRequest, &fbb, message); } -Status ReadConnectRequest(uint8_t* data) { - return Status::OK(); -} +Status ReadConnectRequest(uint8_t* data) { return Status::OK(); } Status SendConnectReply(int sock, int64_t memory_capacity) { flatbuffers::FlatBufferBuilder fbb; @@ -318,16 +316,16 @@ Status ReadEvictReply(uint8_t* data, int64_t& num_bytes) { // Get messages. -Status SendGetRequest( - int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms) { +Status SendGetRequest(int sock, const ObjectID* object_ids, int64_t num_objects, + int64_t timeout_ms) { flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaGetRequest( - fbb, to_flatbuffer(&fbb, object_ids, num_objects), timeout_ms); + auto message = CreatePlasmaGetRequest(fbb, to_flatbuffer(&fbb, object_ids, num_objects), + timeout_ms); return PlasmaSend(sock, MessageType_PlasmaGetRequest, &fbb, message); } -Status ReadGetRequest( - uint8_t* data, std::vector& object_ids, int64_t* timeout_ms) { +Status ReadGetRequest(uint8_t* data, std::vector& object_ids, + int64_t* timeout_ms) { DCHECK(data); auto message = flatbuffers::GetRoot(data); for (uoffset_t i = 0; i < message->object_ids()->size(); ++i) { @@ -338,7 +336,8 @@ Status ReadGetRequest( return Status::OK(); } -Status SendGetReply(int sock, ObjectID object_ids[], +Status SendGetReply( + int sock, ObjectID object_ids[], std::unordered_map& plasma_objects, int64_t num_objects) { flatbuffers::FlatBufferBuilder fbb; @@ -347,16 +346,17 @@ Status SendGetReply(int sock, ObjectID object_ids[], for (int i = 0; i < num_objects; ++i) { const PlasmaObject& object = plasma_objects[object_ids[i]]; objects.push_back(PlasmaObjectSpec(object.handle.store_fd, object.handle.mmap_size, - object.data_offset, object.data_size, object.metadata_offset, - object.metadata_size)); + object.data_offset, object.data_size, + object.metadata_offset, object.metadata_size)); } - auto message = CreatePlasmaGetReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects), - fbb.CreateVectorOfStructs(objects.data(), num_objects)); + auto message = + CreatePlasmaGetReply(fbb, to_flatbuffer(&fbb, object_ids, num_objects), + fbb.CreateVectorOfStructs(objects.data(), num_objects)); return PlasmaSend(sock, MessageType_PlasmaGetReply, &fbb, message); } Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[], - int64_t num_objects) { + int64_t num_objects) { DCHECK(data); auto message = flatbuffers::GetRoot(data); for (uoffset_t i = 0; i < num_objects; ++i) { @@ -395,23 +395,23 @@ Status ReadFetchRequest(uint8_t* data, std::vector& object_ids) { // Wait messages. Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests, - int num_ready_objects, int64_t timeout_ms) { + int num_ready_objects, int64_t timeout_ms) { flatbuffers::FlatBufferBuilder fbb; std::vector> object_request_specs; for (int i = 0; i < num_requests; i++) { - object_request_specs.push_back(CreateObjectRequestSpec(fbb, - fbb.CreateString(object_requests[i].object_id.binary()), + object_request_specs.push_back(CreateObjectRequestSpec( + fbb, fbb.CreateString(object_requests[i].object_id.binary()), object_requests[i].type)); } - auto message = CreatePlasmaWaitRequest( - fbb, fbb.CreateVector(object_request_specs), num_ready_objects, timeout_ms); + auto message = CreatePlasmaWaitRequest(fbb, fbb.CreateVector(object_request_specs), + num_ready_objects, timeout_ms); return PlasmaSend(sock, MessageType_PlasmaWaitRequest, &fbb, message); } Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests, - int64_t* timeout_ms, int* num_ready_objects) { + int64_t* timeout_ms, int* num_ready_objects) { DCHECK(data); auto message = flatbuffers::GetRoot(data); *num_ready_objects = message->num_ready_objects(); @@ -421,14 +421,14 @@ Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests, ObjectID object_id = ObjectID::from_binary(message->object_requests()->Get(i)->object_id()->str()); ObjectRequest object_request({object_id, message->object_requests()->Get(i)->type(), - ObjectStatus_Nonexistent}); + ObjectStatus_Nonexistent}); object_requests[object_id] = object_request; } return Status::OK(); } -Status SendWaitReply( - int sock, const ObjectRequestMap& object_requests, int num_ready_objects) { +Status SendWaitReply(int sock, const ObjectRequestMap& object_requests, + int num_ready_objects) { flatbuffers::FlatBufferBuilder fbb; std::vector> object_replies; @@ -443,8 +443,8 @@ Status SendWaitReply( return PlasmaSend(sock, MessageType_PlasmaWaitReply, &fbb, message); } -Status ReadWaitReply( - uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects) { +Status ReadWaitReply(uint8_t* data, ObjectRequest object_requests[], + int* num_ready_objects) { DCHECK(data); auto message = flatbuffers::GetRoot(data); @@ -485,16 +485,16 @@ Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* return Status::OK(); } -Status SendDataReply( - int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size) { +Status SendDataReply(int sock, ObjectID object_id, int64_t object_size, + int64_t metadata_size) { flatbuffers::FlatBufferBuilder fbb; - auto message = CreatePlasmaDataReply( - fbb, fbb.CreateString(object_id.binary()), object_size, metadata_size); + auto message = CreatePlasmaDataReply(fbb, fbb.CreateString(object_id.binary()), + object_size, metadata_size); return PlasmaSend(sock, MessageType_PlasmaDataReply, &fbb, message); } -Status ReadDataReply( - uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size) { +Status ReadDataReply(uint8_t* data, ObjectID* object_id, int64_t* object_size, + int64_t* metadata_size) { DCHECK(data); auto message = flatbuffers::GetRoot(data); *object_id = ObjectID::from_binary(message->object_id()->str()); @@ -503,4 +503,4 @@ Status ReadDataReply( return Status::OK(); } -} // namespace plasma +} // namespace plasma diff --git a/cpp/src/plasma/protocol.h b/cpp/src/plasma/protocol.h index 835c5a0b58978..bab08b6cbd8ef 100644 --- a/cpp/src/plasma/protocol.h +++ b/cpp/src/plasma/protocol.h @@ -21,8 +21,8 @@ #include #include "arrow/status.h" -#include "plasma/plasma_generated.h" #include "plasma/plasma.h" +#include "plasma/plasma_generated.h" namespace plasma { @@ -34,11 +34,11 @@ Status PlasmaReceive(int sock, int64_t message_type, std::vector* buffe /* Plasma Create message functions. */ -Status SendCreateRequest( - int sock, ObjectID object_id, int64_t data_size, int64_t metadata_size); +Status SendCreateRequest(int sock, ObjectID object_id, int64_t data_size, + int64_t metadata_size); -Status ReadCreateRequest( - uint8_t* data, ObjectID* object_id, int64_t* data_size, int64_t* metadata_size); +Status ReadCreateRequest(uint8_t* data, ObjectID* object_id, int64_t* data_size, + int64_t* metadata_size); Status SendCreateReply(int sock, ObjectID object_id, PlasmaObject* object, int error); @@ -56,18 +56,19 @@ Status ReadSealReply(uint8_t* data, ObjectID* object_id); /* Plasma Get message functions. */ -Status SendGetRequest( - int sock, const ObjectID* object_ids, int64_t num_objects, int64_t timeout_ms); +Status SendGetRequest(int sock, const ObjectID* object_ids, int64_t num_objects, + int64_t timeout_ms); -Status ReadGetRequest( - uint8_t* data, std::vector& object_ids, int64_t* timeout_ms); +Status ReadGetRequest(uint8_t* data, std::vector& object_ids, + int64_t* timeout_ms); -Status SendGetReply(int sock, ObjectID object_ids[], +Status SendGetReply( + int sock, ObjectID object_ids[], std::unordered_map& plasma_objects, int64_t num_objects); Status ReadGetReply(uint8_t* data, ObjectID object_ids[], PlasmaObject plasma_objects[], - int64_t num_objects); + int64_t num_objects); /* Plasma Release message functions. */ @@ -95,13 +96,13 @@ Status SendStatusRequest(int sock, const ObjectID* object_ids, int64_t num_objec Status ReadStatusRequest(uint8_t* data, ObjectID object_ids[], int64_t num_objects); -Status SendStatusReply( - int sock, ObjectID object_ids[], int object_status[], int64_t num_objects); +Status SendStatusReply(int sock, ObjectID object_ids[], int object_status[], + int64_t num_objects); int64_t ReadStatusReply_num_objects(uint8_t* data); -Status ReadStatusReply( - uint8_t* data, ObjectID object_ids[], int object_status[], int64_t num_objects); +Status ReadStatusReply(uint8_t* data, ObjectID object_ids[], int object_status[], + int64_t num_objects); /* Plasma Constains message functions. */ @@ -142,16 +143,16 @@ Status ReadFetchRequest(uint8_t* data, std::vector& object_ids); /* Plasma Wait message functions. */ Status SendWaitRequest(int sock, ObjectRequest object_requests[], int64_t num_requests, - int num_ready_objects, int64_t timeout_ms); + int num_ready_objects, int64_t timeout_ms); Status ReadWaitRequest(uint8_t* data, ObjectRequestMap& object_requests, - int64_t* timeout_ms, int* num_ready_objects); + int64_t* timeout_ms, int* num_ready_objects); -Status SendWaitReply( - int sock, const ObjectRequestMap& object_requests, int num_ready_objects); +Status SendWaitReply(int sock, const ObjectRequestMap& object_requests, + int num_ready_objects); -Status ReadWaitReply( - uint8_t* data, ObjectRequest object_requests[], int* num_ready_objects); +Status ReadWaitReply(uint8_t* data, ObjectRequest object_requests[], + int* num_ready_objects); /* Plasma Subscribe message functions. */ @@ -163,12 +164,12 @@ Status SendDataRequest(int sock, ObjectID object_id, const char* address, int po Status ReadDataRequest(uint8_t* data, ObjectID* object_id, char** address, int* port); -Status SendDataReply( - int sock, ObjectID object_id, int64_t object_size, int64_t metadata_size); +Status SendDataReply(int sock, ObjectID object_id, int64_t object_size, + int64_t metadata_size); -Status ReadDataReply( - uint8_t* data, ObjectID* object_id, int64_t* object_size, int64_t* metadata_size); +Status ReadDataReply(uint8_t* data, ObjectID* object_id, int64_t* object_size, + int64_t* metadata_size); -} // namespace plasma +} // namespace plasma #endif /* PLASMA_PROTOCOL */ diff --git a/cpp/src/plasma/store.cc b/cpp/src/plasma/store.cc index 8d4fb106f5367..9ceecdceadc5c 100644 --- a/cpp/src/plasma/store.cc +++ b/cpp/src/plasma/store.cc @@ -49,8 +49,8 @@ #include #include -#include "plasma/common_generated.h" #include "plasma/common.h" +#include "plasma/common_generated.h" #include "plasma/fling.h" #include "plasma/io.h" #include "plasma/malloc.h" @@ -89,8 +89,8 @@ GetRequest::GetRequest(Client* client, const std::vector& object_ids) object_ids(object_ids.begin(), object_ids.end()), objects(object_ids.size()), num_satisfied(0) { - std::unordered_set unique_ids( - object_ids.begin(), object_ids.end()); + std::unordered_set unique_ids(object_ids.begin(), + object_ids.end()); num_objects_to_wait_for = unique_ids.size(); } @@ -118,7 +118,9 @@ PlasmaStore::~PlasmaStore() { // object's list of clients, otherwise do nothing. void PlasmaStore::add_client_to_object_clients(ObjectTableEntry* entry, Client* client) { // Check if this client is already using the object. - if (entry->clients.find(client) != entry->clients.end()) { return; } + if (entry->clients.find(client) != entry->clients.end()) { + return; + } // If there are no other clients using this object, notify the eviction policy // that the object is being used. if (entry->clients.size() == 0) { @@ -133,7 +135,8 @@ void PlasmaStore::add_client_to_object_clients(ObjectTableEntry* entry, Client* // Create a new object buffer in the hash table. int PlasmaStore::create_object(const ObjectID& object_id, int64_t data_size, - int64_t metadata_size, Client* client, PlasmaObject* result) { + int64_t metadata_size, Client* client, + PlasmaObject* result) { ARROW_LOG(DEBUG) << "creating object " << object_id.hex(); if (store_info_.objects.count(object_id) != 0) { // There is already an object with the same ID in the Plasma Store, so @@ -160,7 +163,9 @@ int PlasmaStore::create_object(const ObjectID& object_id, int64_t data_size, delete_objects(objects_to_evict); // Return an error to the client if not enough space could be freed to // create the object. - if (!success) { return PlasmaError_OutOfMemory; } + if (!success) { + return PlasmaError_OutOfMemory; + } } } while (pointer == NULL); int fd; @@ -212,7 +217,7 @@ void PlasmaObject_init(PlasmaObject* object, ObjectTableEntry* entry) { void PlasmaStore::return_from_get(GetRequest* get_req) { // Send the get reply to the client. Status s = SendGetReply(get_req->client->fd, &get_req->object_ids[0], get_req->objects, - get_req->object_ids.size()); + get_req->object_ids.size()); warn_if_sigpipe(s.ok() ? 0 : -1, get_req->client->fd); // If we successfully sent the get reply message to the client, then also send // the file descriptors. @@ -249,10 +254,14 @@ void PlasmaStore::return_from_get(GetRequest* get_req) { auto& get_requests = object_get_requests_[object_id]; // Erase get_req from the vector. auto it = std::find(get_requests.begin(), get_requests.end(), get_req); - if (it != get_requests.end()) { get_requests.erase(it); } + if (it != get_requests.end()) { + get_requests.erase(it); + } } // Remove the get request. - if (get_req->timer != -1) { ARROW_CHECK(loop_->remove_timer(get_req->timer) == AE_OK); } + if (get_req->timer != -1) { + ARROW_CHECK(loop_->remove_timer(get_req->timer) == AE_OK); + } delete get_req; } @@ -287,8 +296,9 @@ void PlasmaStore::update_object_get_requests(const ObjectID& object_id) { object_get_requests_.erase(object_id); } -void PlasmaStore::process_get_request( - Client* client, const std::vector& object_ids, int64_t timeout_ms) { +void PlasmaStore::process_get_request(Client* client, + const std::vector& object_ids, + int64_t timeout_ms) { // Create a get request for this object. GetRequest* get_req = new GetRequest(client, object_ids); @@ -327,8 +337,8 @@ void PlasmaStore::process_get_request( } } -int PlasmaStore::remove_client_from_object_clients( - ObjectTableEntry* entry, Client* client) { +int PlasmaStore::remove_client_from_object_clients(ObjectTableEntry* entry, + Client* client) { auto it = entry->clients.find(client); if (it != entry->clients.end()) { entry->clients.erase(it); @@ -408,7 +418,9 @@ void PlasmaStore::connect_client(int listener_sock) { // TODO(pcm): Check return value. loop_->add_file_event(client_fd, kEventLoopRead, [this, client](int events) { Status s = process_message(client); - if (!s.ok()) { ARROW_LOG(FATAL) << "Failed to process file event: " << s; } + if (!s.ok()) { + ARROW_LOG(FATAL) << "Failed to process file event: " << s; + } }); ARROW_LOG(DEBUG) << "New connection with fd " << client_fd; } @@ -466,8 +478,9 @@ void PlasmaStore::send_notifications(int client_fd) { // at the end of the method. // TODO(pcm): Introduce status codes and check in case the file descriptor // is added twice. - loop_->add_file_event(client_fd, kEventLoopWrite, - [this, client_fd](int events) { send_notifications(client_fd); }); + loop_->add_file_event(client_fd, kEventLoopWrite, [this, client_fd](int events) { + send_notifications(client_fd); + }); break; } else { ARROW_LOG(WARNING) << "Failed to send notification to client on fd " << client_fd; @@ -482,7 +495,8 @@ void PlasmaStore::send_notifications(int client_fd) { delete[] notification; } // Remove the sent notifications from the array. - it->second.object_notifications.erase(it->second.object_notifications.begin(), + it->second.object_notifications.erase( + it->second.object_notifications.begin(), it->second.object_notifications.begin() + num_processed); // Stop sending notifications if the pipe was broken. @@ -492,7 +506,9 @@ void PlasmaStore::send_notifications(int client_fd) { } // If we have sent all notifications, remove the fd from the event loop. - if (it->second.object_notifications.empty()) { loop_->remove_file_event(client_fd); } + if (it->second.object_notifications.empty()) { + loop_->remove_file_event(client_fd); + } } void PlasmaStore::push_notification(ObjectInfoT* object_info) { @@ -550,8 +566,8 @@ Status PlasmaStore::process_message(Client* client) { RETURN_NOT_OK(ReadCreateRequest(input, &object_id, &data_size, &metadata_size)); int error_code = create_object(object_id, data_size, metadata_size, client, &object); - HANDLE_SIGPIPE( - SendCreateReply(client->fd, object_id, &object, error_code), client->fd); + HANDLE_SIGPIPE(SendCreateReply(client->fd, object_id, &object, error_code), + client->fd); if (error_code == PlasmaError_OK) { warn_if_sigpipe(send_fd(client->fd, object.handle.store_fd), client->fd); } @@ -593,8 +609,8 @@ Status PlasmaStore::process_message(Client* client) { subscribe_to_updates(client); break; case MessageType_PlasmaConnectRequest: { - HANDLE_SIGPIPE( - SendConnectReply(client->fd, store_info_.memory_capacity), client->fd); + HANDLE_SIGPIPE(SendConnectReply(client->fd, store_info_.memory_capacity), + client->fd); } break; case DISCONNECT_CLIENT: ARROW_LOG(DEBUG) << "Disconnecting client on fd " << client->fd; @@ -609,7 +625,9 @@ Status PlasmaStore::process_message(Client* client) { // Report "success" to valgrind. void signal_handler(int signal) { - if (signal == SIGTERM) { exit(0); } + if (signal == SIGTERM) { + exit(0); + } } void start_server(char* socket_name, int64_t system_memory) { @@ -623,11 +641,11 @@ void start_server(char* socket_name, int64_t system_memory) { ARROW_CHECK(socket >= 0); // TODO(pcm): Check return value. loop.add_file_event(socket, kEventLoopRead, - [&store, socket](int events) { store.connect_client(socket); }); + [&store, socket](int events) { store.connect_client(socket); }); loop.run(); } -} // namespace plasma +} // namespace plasma int main(int argc, char* argv[]) { signal(SIGTERM, plasma::signal_handler); diff --git a/cpp/src/plasma/store.h b/cpp/src/plasma/store.h index 27c3813da8e7a..fec25c133ce17 100644 --- a/cpp/src/plasma/store.h +++ b/cpp/src/plasma/store.h @@ -66,7 +66,7 @@ class PlasmaStore { /// cannot create the object. In this case, the client should not call /// plasma_release. int create_object(const ObjectID& object_id, int64_t data_size, int64_t metadata_size, - Client* client, PlasmaObject* result); + Client* client, PlasmaObject* result); /// Delete objects that have been created in the hash table. This should only /// be called on objects that are returned by the eviction policy to evict. @@ -87,8 +87,8 @@ class PlasmaStore { /// @param object_ids Object IDs of the objects to be gotten. /// @param timeout_ms The timeout for the get request in milliseconds. /// @return Void. - void process_get_request( - Client* client, const std::vector& object_ids, int64_t timeout_ms); + void process_get_request(Client* client, const std::vector& object_ids, + int64_t timeout_ms); /// Seal an object. The object is now immutable and can be accessed with get. /// @@ -168,6 +168,6 @@ class PlasmaStore { std::unordered_map pending_notifications_; }; -} // namespace plasma +} // namespace plasma #endif // PLASMA_STORE_H diff --git a/cpp/src/plasma/test/client_tests.cc b/cpp/src/plasma/test/client_tests.cc index 6dc558e77078b..02b3832145186 100644 --- a/cpp/src/plasma/test/client_tests.cc +++ b/cpp/src/plasma/test/client_tests.cc @@ -127,7 +127,7 @@ TEST_F(TestPlasmaStore, MultipleGetTest) { ASSERT_EQ(object_buffer[1].data[0], 2); } -} // namespace plasma +} // namespace plasma int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); diff --git a/cpp/src/plasma/test/serialization_tests.cc b/cpp/src/plasma/test/serialization_tests.cc index 13938cd6fb042..aca47d3d6f911 100644 --- a/cpp/src/plasma/test/serialization_tests.cc +++ b/cpp/src/plasma/test/serialization_tests.cc @@ -167,11 +167,11 @@ TEST(PlasmaSerialization, GetReply) { ASSERT_EQ(object_ids[0], object_ids_return[0]); ASSERT_EQ(object_ids[1], object_ids_return[1]); ASSERT_EQ(memcmp(&plasma_objects[object_ids[0]], &plasma_objects_return[0], - sizeof(PlasmaObject)), - 0); + sizeof(PlasmaObject)), + 0); ASSERT_EQ(memcmp(&plasma_objects[object_ids[1]], &plasma_objects_return[1], - sizeof(PlasmaObject)), - 0); + sizeof(PlasmaObject)), + 0); close(fd); } @@ -303,15 +303,15 @@ TEST(PlasmaSerialization, WaitRequest) { const int num_ready_objects_in = 1; int64_t timeout_ms = 1000; - ARROW_CHECK_OK(SendWaitRequest( - fd, &object_requests_in[0], num_objects_in, num_ready_objects_in, timeout_ms)); + ARROW_CHECK_OK(SendWaitRequest(fd, &object_requests_in[0], num_objects_in, + num_ready_objects_in, timeout_ms)); /* Read message back. */ std::vector data = read_message_from_file(fd, MessageType_PlasmaWaitRequest); int num_ready_objects_out; int64_t timeout_ms_read; ObjectRequestMap object_requests_out; - ARROW_CHECK_OK(ReadWaitRequest( - data.data(), object_requests_out, &timeout_ms_read, &num_ready_objects_out)); + ARROW_CHECK_OK(ReadWaitRequest(data.data(), object_requests_out, &timeout_ms_read, + &num_ready_objects_out)); ASSERT_EQ(num_objects_in, object_requests_out.size()); ASSERT_EQ(num_ready_objects_out, num_ready_objects_in); for (int i = 0; i < num_objects_in; i++) { @@ -389,4 +389,4 @@ TEST(PlasmaSerialization, DataReply) { ASSERT_EQ(metadata_size1, metadata_size2); } -} // namespace plasma +} // namespace plasma