| #include "managed_store.hpp" |
| |
| #include <sys/types.h> |
| |
| #include <algorithm> |
| #include <chrono> |
| #include <cstddef> |
| #include <cstdint> |
| #include <exception> |
| #include <fstream> |
| #include <functional> |
| #include <ios> |
| #include <list> |
| #include <memory> |
| #include <optional> |
| #include <queue> |
| #include <span> |
| #include <string> |
| #include <string_view> |
| #include <thread> |
| #include <utility> |
| #include <vector> |
| |
| #include "absl/functional/any_invocable.h" |
| #include "absl/log/log.h" |
| #include "absl/status/status.h" |
| #include "absl/strings/string_view.h" |
| #include "absl/strings/substitute.h" |
| #include "absl/synchronization/mutex.h" |
| #include "boost/asio/io_context.hpp" |
| #include "boost/asio/steady_timer.hpp" |
| #include "boost/callable_traits/return_type.hpp" |
| #include "boost/system/detail/errc.hpp" |
| #include "boost/system/detail/error_code.hpp" |
| #include "boost/system/error_code.hpp" |
| #include "subscription.h" |
| #include "nlohmann/json.hpp" |
| #include "dbus_utility.hpp" // NOLINT |
| #include "async_resp.hpp" // NOLINT |
| #include "http_request.hpp" // NOLINT |
| #include "managed_store_clock.hpp" |
| #include "managed_store_types.hpp" |
| #include "sdbusplus/asio/connection.hpp" |
| #include "sdbusplus/bus/match.hpp" |
| #include "sdbusplus/message.hpp" |
| #include "sdbusplus/message/native_types.hpp" |
| |
| #ifdef UNIT_TEST_BUILD |
| #include "test/g3/mock_managed_store.hpp" // NOLINT |
| #endif |
| |
| // TODO:: move these to a separate file (future CL when we have more stores): |
| namespace managedStore { |
| namespace { |
| |
| using ::ecclesia::EventSourceId; |
| |
| void AppendWithSep(std::string& out, const char sep, const std::string& arg) { |
| if (!out.empty()) out += sep; |
| out += arg; |
| } |
| |
| // join 'args' strings with a separator without creating any intermediate |
| // objects, append to 'out' |
| template <typename... Args> |
| void AppendWithSep(std::string& out, const char sep, const std::string& arg, |
| const Args&... args) { |
| AppendWithSep(out, sep, arg); |
| AppendWithSep(out, sep, args...); |
| } |
| |
| } // namespace |
| |
| // TODO:: can ^ be a std::shared_ptr on `static ManagedObjectStore::instance()` |
| // when we implement graceful shutdowns |
| ManagedStore* InitializeManagedStore( |
| const ManagedObjectStoreConfig* cfg, |
| boost::asio::io_context* io_main_thread, |
| const std::shared_ptr<boost::asio::io_context>& io_context_worker_threads, |
| ecclesia::SubscriptionService* subscription_service, |
| sdbusplus::asio::connection* systemBus) { |
| LOG(INFO) << "InitializeManagedStore with systembus"; |
| |
| static bool initialize = false; |
| |
| // First initialization should only happen if cfg, io, and systemBus are not |
| // null |
| if (cfg != nullptr && io_main_thread != nullptr && systemBus != nullptr) { |
| // Once this is set to true, every future call will always return the |
| // managed store instance |
| initialize = true; |
| } |
| |
| if (!initialize) { |
| return nullptr; |
| } |
| |
| // NOLINTNEXTLINE(google3-runtime-global-variables) |
| static ManagedStore instance( |
| *cfg, *io_main_thread, std::this_thread::get_id(), |
| io_context_worker_threads, subscription_service, systemBus); |
| return &instance; |
| } |
| |
| ManagedStore* GetManagedObjectStore() { |
| return InitializeManagedStore(nullptr, nullptr, nullptr, nullptr, nullptr); |
| } |
| |
| void ManagedObjectStore::UpdateOrInsertManagedObjects( |
| const std::string& key_id, const std::shared_ptr<ValueType>& value) { |
| absl::MutexLock lock(&managed_objects_mutex_); |
| // Initialize numTimesUsed: |
| value->numTimesUsed = 1; |
| auto current_object_iter = managedObjects.find(key_id); |
| // Update managed object if being tracked already. |
| if (current_object_iter != managedObjects.end()) { |
| // preserve the lastUsed: |
| value->lastUsed = current_object_iter->second->lastUsed; |
| // preserve the lastUsedAges: |
| value->lastUsedAges = current_object_iter->second->lastUsedAges; |
| // preserve the numTimesUsed: |
| value->numTimesUsed = current_object_iter->second->numTimesUsed; |
| // an shared / unique pointer count refreshes: |
| value->numRefreshesDone = current_object_iter->second->numRefreshesDone + 1; |
| // count the scheduled refreshes: |
| value->numRefreshesScheduled = |
| current_object_iter->second->numRefreshesScheduled; |
| |
| BMCWEB_LOG_STATEFUL_DEBUG << "Update existing entry:" << " Key: " << key_id |
| << " Value: " << value->toString(); |
| } |
| // upsert the key: |
| managedObjects.insert_or_assign(key_id, value); |
| |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "Updated Object! Key: " << key_id |
| << " New: " << managedObjects.find(key_id)->second->toString(); |
| } |
| |
| void ManagedObjectStore::processDbusResponse( |
| const KeyType& key, const std::shared_ptr<ValueType>& value, |
| bool isReadThroughRequest, std::optional<uint64_t> refresh_interval) { |
| std::string key_id = key.GetId(); |
| |
| BMCWEB_LOG_STATEFUL_DEBUG << "Updating store with fresh dbus response " |
| << " Key: " << key_id |
| << " Value: " << value->toString() |
| << " ec: " << value->errorCode.message(); |
| |
| UpdateOrInsertManagedObjects(key_id, value); |
| |
| // Callbacks to invoke after processing dbus response. |
| std::queue<ManagedStoreCb> cb_queue = GetCallbacksFromRequestCbMap(key_id); |
| |
| // Update pending dbus response count on receiving response. |
| { |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| this->managedStoreTracker.decrementPendingDbusResponses( |
| isReadThroughRequest); |
| } |
| |
| while (!cb_queue.empty()) { |
| auto& callback = cb_queue.front(); |
| std::move(callback)(value); |
| cb_queue.pop(); |
| } |
| |
| bool has_refresh_interval = refresh_interval.has_value(); |
| |
| // A refresh operation is queued into the priority queue if the object has |
| // no previous queued refresh operation or if there is a |
| // custom refresh interval that needs to be prioritized over previously queued |
| // refresh operation. |
| size_t& pq_entry_count = key_to_pq_entry_[key_id]; |
| if (pq_entry_count == 0 || has_refresh_interval) { |
| pq_entry_count++; |
| // Queue refresh operation. |
| pq.push( |
| PriorityQueueEntry(key, value->nextRefreshAt, value->lastRefreshAt)); |
| |
| // Cancel current scheduled refresh operation if refresh interval of the key |
| // is less than the timer wait duration. |
| auto timer_wait_duration = timer.expiry() - clockNow(); |
| if (isSchedulerActive && has_refresh_interval && |
| timer_wait_duration > std::chrono::milliseconds(*refresh_interval)) { |
| timer.cancel(); |
| return; |
| } |
| } |
| |
| // Finally, call scheduler to queue up next refresh operations. |
| scheduleRefresh(); |
| } |
| |
| bool ManagedObjectStore::DetectChange(const ValueType& valueA, |
| const ValueType& valueB) { |
| return (valueA != valueB); |
| } |
| |
| void ManagedObjectStore::UpdateNumRefreshesScheduled(std::string& key_id) { |
| absl::MutexLock lock(&managed_objects_mutex_); |
| auto managed_object_iter = this->managedObjects.find(key_id); |
| if (managed_object_iter != this->managedObjects.end()) { |
| managed_object_iter->second->numRefreshesScheduled += 1; |
| |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "refreshObject: Key: " << managed_object_iter->first |
| << " Value before refresh: " << managed_object_iter->second->toString(); |
| } |
| } |
| |
| void ManagedObjectStore::refreshObject( |
| const KeyType& key, bool isReadThroughRequest, |
| const std::shared_ptr<boost::asio::io_context::strand>& strand) { |
| std::string key_id = key.GetId(); |
| |
| UpdateNumRefreshesScheduled(key_id); |
| |
| std::optional<uint64_t> refresh_interval = GetRefreshInterval(key_id); |
| |
| switch (key.getManagedType()) { |
| case ManagedType::kManagedObject: { |
| getManagedObjectsFromDbusService( |
| strand, key.serviceName, key.objectPath, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedPropertyMap: { |
| getManagedPropertiesMapFromDbusService( |
| strand, key.serviceName, key.objectPath, key.interface, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedProperty: { |
| getManagedPropertiesFromDbusService( |
| strand, key.serviceName, key.objectPath, key.interface, key.property, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedSubtree: { |
| getManagedSubtreeFromDbusService( |
| strand, key.objectPath, key.treeDepth, key.interfaceList, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedSubtreePaths: { |
| getManagedSubtreePathsFromDbusService( |
| strand, key.objectPath, key.treeDepth, key.interfaceList, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedAssociatedSubtree: { |
| getManagedAssociatedSubtreeFromDbusService( |
| strand, key.associatedPath, key.objectPath, key.treeDepth, |
| key.interfaceList, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedAssociatedSubtreePaths: { |
| getManagedAssociatedSubtreePathsFromDbusService( |
| strand, key.associatedPath, key.objectPath, key.treeDepth, |
| key.interfaceList, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedMapperObject: { |
| getManagedMapperObjectFromDbusService( |
| strand, key.objectPath, key.interfaceList, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| case ManagedType::kManagedSystemdListUnits: { |
| getSystemdUnitsFromDbusService( |
| strand, |
| [this, keyType = key, isReadThroughRequest, refresh_interval]( |
| const std::shared_ptr<ValueType>& managedValue) mutable { |
| processDbusResponse(keyType, managedValue, isReadThroughRequest, |
| refresh_interval); |
| }, |
| refresh_interval); |
| break; |
| } |
| } |
| |
| // Record a pending refresh operation. |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| managedStoreTracker.incrementPendingDbusResponses(isReadThroughRequest); |
| } |
| |
| void ManagedObjectStore::scheduleRefresh() { |
| if (this->isSchedulerActive || this->config.snap_shot_mode) { |
| // The timer callback will schedule the next refresh if the scheduler is |
| // active. |
| return; |
| } |
| |
| // Cannot schedule refresh if pending dbus responses reach threshold. |
| uint64_t count_pending_dbus_responses = 0; |
| { |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| count_pending_dbus_responses = |
| managedStoreTracker.countPendingDbusResponses; |
| if (count_pending_dbus_responses >= this->config.pendingDbusResponsesMax) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "scheduleRefresh: Pending io " |
| << "threshold reached."; |
| return; |
| } |
| } |
| |
| // Scan the priority queue to remove the entries that are no longer used and |
| // get to a schedulable entry. |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| bool can_schedule = false; |
| while (!can_schedule && !this->pq.empty()) { |
| const PriorityQueueEntry& pqEntry = pq.top(); |
| |
| // Get managedStore key to queue refresh of corresponding object in |
| // the managedStore. |
| const KeyType& key = pqEntry.managedStoreKey; |
| const std::string key_id = key.GetId(); |
| |
| // The object is no longer tracked: |
| if (!IsManagedObjectTracked(key_id)) { |
| pq.pop(); |
| key_to_pq_entry_.erase(key_id); |
| continue; |
| } |
| |
| std::chrono::steady_clock::time_point managed_store_entry_last_used; |
| std::chrono::steady_clock::time_point managed_store_entry_next_refresh_at; |
| std::chrono::steady_clock::time_point managed_store_entry_last_refresh_at; |
| { |
| absl::MutexLock lock(&managed_objects_mutex_); |
| auto managed_object_iter = managedObjects.find(key_id); |
| if (managed_object_iter == managedObjects.end()) continue; |
| |
| // Copy managed_store_entry timepoints so that we don't need locks later. |
| // Note, time here might be invalid after the following line |
| // because of an immediate object insertion or usage. This is ok because |
| // even if the priority queue entry is removed, when we find the object |
| // becomes stale, it will enqueued again. |
| // Invalid `last_refresh_at` or `next_refresh_at` will not cause fatal |
| // issues, as well, because freshness is strictly checked. |
| managed_store_entry_last_used = managed_object_iter->second->lastUsed; |
| managed_store_entry_next_refresh_at = |
| managed_object_iter->second->nextRefreshAt; |
| managed_store_entry_last_refresh_at = |
| managed_object_iter->second->lastRefreshAt; |
| } |
| |
| const auto duration_since_used = |
| clockSince(managed_store_entry_last_used, timeNow); |
| bool is_recently_used = duration_since_used < this->config.tLRUThreshold; |
| |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "SinceUsed:" << " Key: " << key.toString() |
| << " duration_since_used: " |
| << clockSinceMilliseconds(managed_store_entry_last_used, timeNow) |
| << " is_recently_used: " << is_recently_used; |
| |
| // LRU Eviction: |
| if (!is_recently_used) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "Since Used: Evict:" << " Key: " << key_id; |
| EvictManagedObject(key_id); |
| pq.pop(); |
| key_to_pq_entry_.erase(key_id); |
| continue; |
| } |
| |
| // Handle recently refreshed object. |
| if (managed_store_entry_last_refresh_at > pqEntry.lastRefreshAt) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "PQ entry recently refreshed: " << key_id; |
| |
| // Get current refresh interval from the managedStore entry. |
| auto current_refresh_interval = managed_store_entry_next_refresh_at - |
| managed_store_entry_last_refresh_at; |
| |
| // Get refresh interval from the priority queue entry. |
| auto pq_entry_refresh_interval = |
| pqEntry.nextRefreshAt - pqEntry.lastRefreshAt; |
| size_t* pq_entry_count = nullptr; |
| if (auto find_key_count = key_to_pq_entry_.find(key_id); |
| find_key_count != key_to_pq_entry_.end()) { |
| pq_entry_count = &find_key_count->second; |
| } |
| |
| // The pqEntry is redundant and needs to be removed if corresponding |
| // managed store entry is refreshed recently and refresh interval |
| // configured in managedStore entry is less than the refresh interval |
| // configured in pqEntry. |
| if (pq_entry_count != nullptr && |
| (current_refresh_interval < pq_entry_refresh_interval || |
| (current_refresh_interval == pq_entry_refresh_interval && |
| *pq_entry_count > 1))) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "Removing redundant pqEntry: " |
| << pqEntry.managedStoreKey.toString(); |
| pq.pop(); |
| if (--(*pq_entry_count) == 0) { |
| key_to_pq_entry_.erase(key_id); |
| } |
| continue; |
| } |
| |
| // Re-queue the entry because the last object was refreshed due to a |
| // forced cache read through. |
| BMCWEB_LOG_STATEFUL_DEBUG << "Re-queue pqEntry: " |
| << pqEntry.managedStoreKey.toString(); |
| const KeyType key_local = key; |
| pq.pop(); |
| pq.push(PriorityQueueEntry(key_local, managed_store_entry_next_refresh_at, |
| managed_store_entry_last_refresh_at)); |
| continue; |
| } |
| can_schedule = true; |
| } |
| |
| if (!can_schedule) { |
| return; |
| } |
| |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "scheduleRefresh: " << pq.size() |
| << " Pending dbus responses: " << count_pending_dbus_responses; |
| |
| PriorityQueueEntry pqEntry = pq.top(); |
| pq.pop(); |
| |
| // Get service name and object path to schedule refresh of the corresponding |
| // object in store. |
| const KeyType& managedStoreKey = pqEntry.managedStoreKey; |
| std::string key_id = managedStoreKey.GetId(); |
| |
| // Stop tracking the pq entry once it is pop'd from the queue. |
| if (auto iter = key_to_pq_entry_.find(key_id); |
| iter != key_to_pq_entry_.end()) { |
| if (--iter->second == 0) { |
| key_to_pq_entry_.erase(iter); |
| } |
| } |
| |
| // Set scheduler status to active before scheduling the refresh op to ensure |
| // an immediate callback keeps the system in the correct state. |
| isSchedulerActive = true; |
| |
| // If the refresh needs to be scheduled later in time, we set the timeout |
| // based on the time difference between the expected refresh time and |
| // current time. |
| // NOTE: the expected refresh time is set based on t_ttl. |
| const auto timerTimeout = |
| std::chrono::duration_cast<std::chrono::milliseconds>( |
| pqEntry.nextRefreshAt - timeNow); |
| timer.expires_after(timerTimeout); |
| |
| BMCWEB_LOG_STATEFUL_DEBUG << "Scheduling Refresh after " |
| << timerTimeout.count() << " milliseconds. " |
| << "Key: " << managedStoreKey.GetId(); |
| |
| timer.async_wait( |
| [this, managedStoreKey, pqEntry](boost::system::error_code ec) { |
| isSchedulerActive = false; |
| if (ec == boost::asio::error::operation_aborted) { |
| BMCWEB_LOG_STATEFUL_INFO << "Timer operation aborted for key: " |
| << managedStoreKey.GetId(); |
| // There is an object needs to be refreshed earlier than currently |
| // scheduled object. Push the key for current object back into the PQ |
| // to be scheduled later point in time. |
| pq.push(PriorityQueueEntry(managedStoreKey, pqEntry.nextRefreshAt, |
| pqEntry.lastRefreshAt)); |
| scheduleRefresh(); |
| return; |
| } |
| |
| if (ec) { |
| BMCWEB_LOG_STATEFUL_ERROR << "Async_wait failed" << ec; |
| return; |
| } |
| BMCWEB_LOG_STATEFUL_DEBUG << "Refreshing managed store entry. " |
| << "Key: " << managedStoreKey.GetId(); |
| refreshObject(managedStoreKey); |
| scheduleRefresh(); |
| }); |
| } |
| |
| // TODO - testing |
| void ManagedObjectStore::listUnits( |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedSystemdListUnitsCb callback) { |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::SystemdListUnits& listUnits) mutable { |
| std::move(callback)(ec, listUnits); |
| }, |
| "org.freedesktop.systemd1", "/org/freedesktop/systemd1", |
| "org.freedesktop.systemd1.Manager", "ListUnits"); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedSystemdListUnits); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (!valueType->SystemdListUnits.has_value()) { |
| std::move(callback)(valueType->errorCode, {}); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, |
| valueType->SystemdListUnits.value()); |
| }); |
| } |
| |
| void ManagedObjectStore::getProperty( |
| const KeyType& keyType, const ManagedObjectStoreContext& requestContext, |
| GetManagedPropertyCb callback) { |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::DbusVariantType& value) mutable { |
| std::move(callback)(ec, value, 0); |
| }, |
| keyType.serviceName, keyType.objectPath.str, |
| "org.freedesktop.DBus.Properties", "Get", keyType.interface, |
| keyType.property); |
| return; |
| } |
| |
| getStoredOrReadThrough( |
| keyType, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (!valueType->managedProperty.has_value()) { |
| callback(valueType->errorCode, {}, 0); |
| return; |
| } |
| uint64_t age_in_ms = static_cast<uint64_t>( |
| std::chrono::duration_cast<std::chrono::milliseconds>( |
| clockNow() - valueType->lastRefreshAt) |
| .count()); |
| LOG(INFO) << "MAX AGE AT THIS POINT: " << age_in_ms; |
| std::move(callback)(valueType->errorCode, |
| valueType->managedProperty.value(), age_in_ms); |
| }); |
| } |
| |
| void ManagedObjectStore::getAllProperties( |
| const std::string& service, const std::string& objectPath, |
| const std::string& interface, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedPropertyMapCb callback) { |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const ::dbus::utility::DBusPropertiesMap& properties) mutable { |
| std::move(callback)(ec, properties); |
| }, |
| service, objectPath, "org.freedesktop.DBus.Properties", "GetAll", |
| interface); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedPropertyMap, service, objectPath, |
| interface); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (!valueType->managedPropertyMap.has_value()) { |
| std::move(callback)(valueType->errorCode, {}); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, |
| valueType->managedPropertyMap.value()); |
| }); |
| } |
| |
| void ManagedObjectStore::subscribe( |
| const ManagedObjectStoreContext& requestContext, |
| const std::string& match_expression, |
| ecclesia::EventSourceId event_source_id) { |
| if (match_expression.empty()) { |
| LOG(WARNING) |
| << "D-Bus match expression is empty. Invalid call to subscribe!"; |
| return; |
| } |
| |
| if (requestContext.eventSourceIds == nullptr || |
| requestContext.request_type == |
| ManagedObjectStoreContext::RequestType::kQuery) { |
| LOG(WARNING) << "eventSourceIds is null. Invalid call to subscribe!"; |
| return; |
| } |
| |
| // Check if monitor already exists. |
| { |
| absl::MutexLock lock(&signal_monitor_mutex_); |
| if (auto find_monitor = signal_monitors_.find(event_source_id); |
| find_monitor != signal_monitors_.end()) { |
| if (requestContext.request_type == |
| ManagedObjectStoreContext::RequestType::kCancelSubscription) { |
| // Erase the monitor |
| signal_monitors_.erase(find_monitor); |
| LOG(WARNING) << "Erased D-Bus monitor for id: " |
| << event_source_id.ToString(); |
| return; |
| } |
| |
| LOG(WARNING) << "D-Bus monitor already created for id: " |
| << event_source_id.ToString(); |
| |
| // Record monitor id regardless of an existing subscription since |
| // multiple Redfish resources can have dependency on a dbus signal. |
| requestContext.eventSourceIds->push_back(std::move(event_source_id)); |
| return; |
| } |
| } |
| |
| boost::asio::post(io_context_main_thread_, [this, event_source_id, |
| match_expression] { |
| absl::MutexLock lock(&signal_monitor_mutex_); |
| signal_monitors_[event_source_id] = std::make_unique< |
| sdbusplus::bus::match_t>( |
| *system_bus_, match_expression, |
| [event_source_id, this](sdbusplus::message_t&) { |
| LOG(INFO) |
| << "Received dbus signal. Notifying Subscription service now!"; |
| auto status = |
| subscription_service_->Notify(event_source_id, absl::OkStatus()); |
| if (status.ok()) { |
| LOG(INFO) << "Notified successfully!"; |
| return; |
| } |
| LOG(WARNING) << "Error while sending notification: " << status; |
| if (status.code() == absl::StatusCode::kNotFound) { |
| absl::MutexLock lock(&signal_monitor_mutex_); |
| signal_monitors_.erase(event_source_id); |
| LOG(INFO) << "Erased D-Bus monitor for id: " |
| << event_source_id.ToString(); |
| } |
| }); |
| LOG(INFO) << "D-Bus monitor created. Event Source id: " |
| << event_source_id.ToString(); |
| }); |
| |
| // Record event source ids. |
| requestContext.eventSourceIds->push_back(std::move(event_source_id)); |
| } |
| |
| void ManagedObjectStore::subscribeToInterfacesAdded( |
| const std::string& object_path, |
| const ManagedObjectStoreContext& request_context) { |
| std::string match_expression = absl::Substitute( |
| "type='signal',interface='org.freedesktop.DBus.ObjectManager',path_" |
| "namespace='$0'," |
| "member='InterfacesAdded'", |
| object_path); |
| |
| std::string monitor_id; |
| AppendWithSep(monitor_id, '|', object_path, "InterfacesAdded", "EVENT"); |
| |
| ecclesia::EventSourceId event_source_id( |
| monitor_id, ecclesia::EventSourceId::Type::kDbusObjects); |
| |
| subscribe(request_context, match_expression, std::move(event_source_id)); |
| } |
| |
| void ManagedObjectStore::getSubTree( |
| const std::string& path, int32_t depth, |
| const std::vector<std::string>& interfaces, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedSubtreeCb callback) { |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreeResponse& subtree) mutable { |
| std::move(callback)(ec, subtree); |
| }, |
| "xyz.openbmc_project.ObjectMapper", |
| "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetSubTree", path, depth, |
| interfaces); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedSubtree, path, depth, interfaces); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (!valueType->managedSubtree.has_value()) { |
| std::move(callback)(valueType->errorCode, {}); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, |
| valueType->managedSubtree.value()); |
| }); |
| } |
| |
| void ManagedObjectStore::getSubTree( |
| const std::string& path, int32_t depth, |
| std::span<const std::string_view> interfaces, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedSubtreeCb callback) { |
| std::vector<std::string> interfaces2; |
| for (const auto& s : interfaces) { |
| interfaces2.emplace_back(s); |
| } |
| |
| this->getSubTree(path, depth, interfaces2, requestContext, |
| std::move(callback)); |
| } |
| |
| void ManagedObjectStore::getSubTreePaths( |
| const std::string& path, int32_t depth, |
| std::span<const std::string_view> interfaces, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedSubtreePathsCb callback) { |
| std::vector<std::string> interfaces2; |
| for (const auto& s : interfaces) { |
| interfaces2.emplace_back(s); |
| } |
| |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreePathsResponse& |
| subtreePaths) mutable { |
| std::move(callback)(ec, subtreePaths); |
| }, |
| "xyz.openbmc_project.ObjectMapper", |
| "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetSubTreePaths", path, depth, |
| interfaces2); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedSubtreePaths, path, depth, interfaces2); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (!valueType->managedSubtreePaths.has_value()) { |
| std::move(callback)(valueType->errorCode, {}); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, |
| valueType->managedSubtreePaths.value()); |
| }); |
| } |
| |
| void ManagedObjectStore::getAssociatedSubTree( |
| const sdbusplus::message::object_path& associatedPath, |
| const sdbusplus::message::object_path& path, int32_t depth, |
| std::span<const std::string_view> interfaces, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedSubtreeCb callback) { |
| std::vector<std::string> interfaces2; |
| for (const auto& s : interfaces) { |
| interfaces2.emplace_back(s); |
| } |
| this->getAssociatedSubTree(associatedPath, path, depth, interfaces2, |
| requestContext, std::move(callback)); |
| } |
| |
| void ManagedObjectStore::getAssociatedSubTree( |
| const sdbusplus::message::object_path& associatedPath, |
| const sdbusplus::message::object_path& path, int32_t depth, |
| const std::vector<std::string>& interfaces, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedSubtreeCb callback) { |
| std::vector<std::string> interfaces2; |
| interfaces2.reserve(interfaces.size()); |
| for (const auto& s : interfaces) { |
| interfaces2.emplace_back(s); |
| } |
| |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreeResponse& subtree) mutable { |
| std::move(callback)(ec, subtree); |
| }, |
| "xyz.openbmc_project.ObjectMapper", |
| "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetAssociatedSubTree", |
| associatedPath, path, depth, interfaces2); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedAssociatedSubtree, associatedPath.str, |
| path.str, depth, interfaces2); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (!valueType->managedSubtree.has_value()) { |
| std::move(callback)(valueType->errorCode, {}); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, |
| valueType->managedSubtree.value()); |
| }); |
| } |
| |
| void ManagedObjectStore::getAssociatedSubTreePaths( |
| const sdbusplus::message::object_path& associatedPath, |
| const sdbusplus::message::object_path& path, int32_t depth, |
| std::span<const std::string_view> interfaces, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedSubtreePathsCb callback) { |
| std::vector<std::string> interfaces2; |
| for (const auto& s : interfaces) { |
| interfaces2.emplace_back(s); |
| } |
| |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreePathsResponse& |
| subtreePaths) mutable { |
| std::move(callback)(ec, subtreePaths); |
| }, |
| "xyz.openbmc_project.ObjectMapper", |
| "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetAssociatedSubTreePaths", |
| associatedPath, path, depth, interfaces2); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedAssociatedSubtreePaths, |
| associatedPath.str, path.str, depth, interfaces2); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (!valueType->managedSubtreePaths.has_value()) { |
| std::move(callback)(valueType->errorCode, {}); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, |
| valueType->managedSubtreePaths.value()); |
| }); |
| } |
| |
| void ManagedObjectStore::getDbusObject( |
| const std::string& path, std::span<const std::string_view> interfaces, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedMapperObjectCb callback) { |
| std::vector<std::string> interfaces2; |
| for (const auto& s : interfaces) { |
| interfaces2.emplace_back(s); |
| } |
| |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetObject& object) mutable { |
| std::move(callback)(ec, object); |
| }, |
| "xyz.openbmc_project.ObjectMapper", |
| "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetObject", path, interfaces2); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedMapperObject, path, interfaces2); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback{std::move(callback)}]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (valueType->mapperGetObject.has_value()) { |
| std::move(callback)(valueType->errorCode, |
| valueType->mapperGetObject.value()); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, {}); |
| }); |
| } |
| |
| void ManagedObjectStore::getManagedObjectsWithContext( |
| const std::string& service, const sdbusplus::message::object_path& path, |
| const ManagedObjectStoreContext& requestContext, |
| GetManagedObjectsCb callback) { |
| { |
| uint64_t count_get_managed_objects = 0; |
| // total counter of calls: |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| count_get_managed_objects = |
| ++this->managedStoreTracker.countGetManagedObjects; |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "getManagedObjectsWithContext:" << " service: " << service |
| << " path: " << path.str |
| << " requestContext: " << requestContext.toString() |
| << " count: " << count_get_managed_objects; |
| } |
| |
| // When Object Store is disabled, the requests read through. |
| if (!this->isEnabled()) { |
| // TODO:: do we want to count that (if the cache is actually disabled) |
| // i think we should since we want to use that as validation for the |
| // store being enabled? |
| { |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| this->managedStoreTracker.countGetManagedObjectsCacheMiss += 1; |
| } |
| PostDbusCallToIoContextThreadSafe( |
| requestContext.GetStrand(), |
| [callback{std::move(callback)}]( |
| const boost::system::error_code& ec, |
| const dbus::utility::ManagedObjectType& objects) mutable { |
| std::move(callback)(ec, objects); |
| }, |
| service, path, "org.freedesktop.DBus.ObjectManager", |
| "GetManagedObjects"); |
| return; |
| } |
| |
| KeyType key_type(ManagedType::kManagedObject, service, path.str); |
| getStoredOrReadThrough( |
| key_type, requestContext, |
| [callback = std::move(callback)]( |
| const std::shared_ptr<ValueType>& valueType) mutable { |
| if (valueType->managedObject.has_value()) { |
| std::move(callback)(valueType->errorCode, |
| valueType->managedObject.value()); |
| return; |
| } |
| std::move(callback)(valueType->errorCode, {}); |
| }); |
| } |
| |
| std::shared_ptr<ValueType> ManagedObjectStore::IsManagedObjectCurrentAndTracked( |
| const std::string& key_id, const KeyType& keyType, |
| const ManagedObjectStoreContext& requestContext) { |
| absl::MutexLock lock(&managed_objects_mutex_); |
| |
| std::shared_ptr<ValueType> cached_object = nullptr; |
| bool is_object_current = false; |
| |
| auto managed_object_iter = managedObjects.find(key_id); |
| // Execute callback immediately if object is cached in store. |
| if (managed_object_iter != managedObjects.end()) { |
| cached_object = managed_object_iter->second; |
| const std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration current_age = |
| clockSince(cached_object->lastRefreshAt, timeNow); |
| |
| // update lastUsed: |
| // NOMUTANTS -- TODO later |
| cached_object->lastUsed = timeNow; |
| // count number of times used: |
| cached_object->numTimesUsed += 1; |
| // update lastUsedAges: |
| if (!cached_object->lastUsedAges) { |
| cached_object->lastUsedAges = |
| std::make_shared<std::list<std::chrono::steady_clock::duration>>(); |
| } |
| cached_object->lastUsedAges->push_back(current_age); |
| while (cached_object->lastUsedAges->size() >= |
| managedStore::ValueType::kLastUsedAgesMaxSize) { |
| cached_object->lastUsedAges->pop_front(); |
| } |
| |
| LOG(INFO) << "Object found in store. " << " Key: " << key_id |
| << " CurrentAge: " |
| << clockDurationMilliseconds(current_age) |
| << " Context: " << requestContext.toString() |
| << " Value: " << cached_object->toString(); |
| |
| // check if the current object is stale: |
| bool is_object_stale = current_age >= (this->config.tfixedThreshold + |
| this->config.tgraceThreshold); |
| |
| // Always use the cache if snapshot mode is enabled. |
| if (config.snap_shot_mode) { |
| is_object_stale = false; |
| } |
| |
| // Evict the object from store as it has become stale. |
| if (is_object_stale) { |
| LOG(INFO) << "Object has become stale! " |
| << " service: " << keyType.serviceName |
| << " path: " << keyType.objectPath.str |
| << " interface: " << keyType.interface << " property: " |
| << keyType.property << " current_age: " |
| << clockDurationMilliseconds(current_age) |
| << " Context: " << requestContext.toString() |
| << " Value: " << cached_object->toString(); |
| managedObjects.erase(managed_object_iter->first); |
| } |
| |
| // can we use it as is: |
| is_object_current = !is_object_stale; |
| |
| if (requestContext.hintMaxAge > std::chrono::seconds(0)) { |
| // we have to refresh it because of hintMaxAge |
| is_object_current = (current_age <= requestContext.hintMaxAge); |
| { |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| this->managedStoreTracker.countGetManagedObjectsCacheMissMaxAge += 1; |
| } |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "apply:" << " hintMaxAgeMS: " |
| << clockDurationMilliseconds(requestContext.hintMaxAge); |
| } |
| |
| // Force a refresh if the request is associated with subscription. |
| if (requestContext.request_type == |
| ManagedObjectStoreContext::RequestType::kOnEvent) { |
| // TODO(rahulkpr): Need to log how many read throughs were due to |
| // events. |
| is_object_current = false; |
| } |
| } |
| |
| if (requestContext.HasCustomRefreshInterval()) { |
| std::optional<uint64_t> current_refresh_interval = |
| GetRefreshInterval(key_id); |
| // NO_CDC: HasCustomRefreshInterval checks for optional. |
| const uint64_t& refresh_interval_ms = *requestContext.refresh_interval_ms; |
| if (std::chrono::milliseconds(refresh_interval_ms) >= |
| this->config.tfixedThreshold) { |
| // Requested interval is greater than global refresh interval. This |
| // is a no-op. Just read from cache without changing intervals. |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "Ignoring custom refresh interval.\nRequested " |
| "refresh_interval_ms: " |
| << refresh_interval_ms << " is greater than global refresh interval: " |
| << config.tfixedThreshold << " for key: " << key_id; |
| } else if (!current_refresh_interval.has_value() || |
| (current_refresh_interval.has_value() && |
| refresh_interval_ms < *current_refresh_interval)) { |
| // Requested interval is less than global refresh interval and there |
| // isn't any custom interval set. |
| // We need to do a cache read through and update refresh interval. |
| SetRefreshInterval(key_id, refresh_interval_ms); |
| // Read through is required to set custom interval. |
| is_object_current = false; |
| } |
| } |
| |
| if (!is_object_current) { |
| return nullptr; |
| } |
| |
| return cached_object; |
| } |
| |
| void ManagedObjectStore::getStoredOrReadThrough( |
| const KeyType& keyType, const ManagedObjectStoreContext& requestContext, |
| ManagedStoreCb callback) { |
| std::string key_id = keyType.GetId(); |
| |
| BMCWEB_LOG_DEBUG << __func__ << " Key: " << key_id |
| << " requestContext: " << requestContext.toString(); |
| |
| // Object is served from cache except in following scenarios: |
| // 1. Cache is cold. |
| // 2. Cached object is stale. |
| // 3. Custom refresh interval needs to be configured. |
| // 4. Request is associated with redfish event which requires a read |
| // through to create origin of condition. |
| |
| std::shared_ptr<ValueType> cached_object = |
| IsManagedObjectCurrentAndTracked(key_id, keyType, requestContext); |
| |
| bool is_object_current = cached_object != nullptr; |
| |
| // Evict the object from store as it has become stale. |
| if (is_object_current) { |
| // Return if the object is still fresh |
| { |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| this->managedStoreTracker.countGetManagedObjectsCacheHit += 1; |
| } |
| BMCWEB_LOG_STATEFUL_DEBUG << "Cache hit: calling the user callback for " |
| << key_id; |
| // If we are in snapshot mode, we want to simulate asynchronous callback |
| // behavior to catch dangling references into the callback. |
| if (config.snap_shot_mode) { |
| boost::asio::post(io_context_main_thread_, |
| [callback{std::move(callback)}, |
| cached_object{std::move(cached_object)}]() mutable { |
| // already in the strand |
| std::move(callback)(cached_object); |
| }); |
| return; |
| } |
| callback(cached_object); |
| return; |
| } |
| |
| { |
| absl::MutexLock lock(&managed_store_tracker_mutex_); |
| this->managedStoreTracker.countGetManagedObjectsCacheMiss += 1; |
| } |
| |
| // If snapshot mode is enabled, we will fail the request and send error code |
| if (this->config.snap_shot_mode) { |
| LOG(WARNING) << key_id << " not found in cache."; |
| LOG(WARNING) << "Cache miss in snapshot mode; no way to update the cache."; |
| callback(std::make_shared<ValueType>(boost::system::errc::make_error_code( |
| boost::system::errc::no_such_file_or_directory))); |
| return; |
| } |
| |
| bool need_refresh = true; |
| if (requestContext.GetStrand()) { |
| // from now on we need to make callback only happen in the strand |
| absl::AnyInvocable<void(const std::shared_ptr<ValueType>&)> |
| callback_in_strand = |
| [callback = std::move(callback), |
| strand = requestContext.GetStrand()]( |
| const std::shared_ptr<ValueType>& value) mutable { |
| std::function<void()> callback_wrapper = internal::CallAtMostOnce( |
| [callback(std::move(callback)), value]() mutable { |
| std::move(callback)(value); |
| }); |
| strand->post(std::move(callback_wrapper)); |
| }; |
| |
| // Handle concurrent requests for the same object by storing callback |
| // for the request. |
| // There might be cases where a callback is enqueued but dequeued shortly |
| // after, which makes the `need_refresh` invalid, however, this is totally |
| // fine as the follow `refreshObject` will just do a refresh earlier. |
| need_refresh = InsertCallbackIntoRequestCbMapAndReturnTrueIfNew( |
| key_id, std::move(callback_in_strand)); |
| } else { |
| need_refresh = InsertCallbackIntoRequestCbMapAndReturnTrueIfNew( |
| key_id, std::move(callback)); |
| } |
| |
| if (!need_refresh) { |
| return; |
| } |
| |
| BMCWEB_LOG_DEBUG << "Fetching fresh object over dbus. " << " Key: " << key_id |
| << " requestContext: " << requestContext.toString(); |
| |
| // Note: This refresh operation is not rate limited i.e all incoming |
| // requests with cache-miss will continue to read through without a cap on |
| // pending i/o since we don't want to block on scheduler or send a failure |
| // response. |
| BMCWEB_LOG_DEBUG << "strand is " |
| << (requestContext.GetStrand() ? "set" : "null"); |
| refreshObject(keyType, true, requestContext.GetStrand()); |
| } |
| |
| void ManagedObjectStore::clearAllObjects() { |
| { |
| absl::MutexLock lock(&managed_objects_mutex_); |
| this->managedObjects.clear(); |
| } |
| clearTimeTrace(); |
| // looks like it's the only way to do this: |
| boost::asio::post([this]() { pq = ManagedStorePriorityQueue(); }); |
| } |
| |
| void ManagedObjectStore::getManagedObjectsFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const std::string& service, const sdbusplus::message::object_path& path, |
| ManagedStoreCb callback, std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback{std::move(callback)}, refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::ManagedObjectType& objects) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(objects, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| service, path.str, "org.freedesktop.DBus.ObjectManager", |
| "GetManagedObjects"); |
| } |
| |
| void ManagedObjectStore::getManagedMapperObjectFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const sdbusplus::message::object_path& path, |
| const std::vector<std::string>& interfaces, ManagedStoreCb callback, |
| std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback{std::move(callback)}, refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetObject& object) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(object, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| "xyz.openbmc_project.ObjectMapper", "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetObject", path.str, interfaces); |
| } |
| |
| void ManagedObjectStore::getSystemdUnitsFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| ManagedStoreCb callback, std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback{std::move(callback)}, refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::SystemdListUnits& listUnits) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(listUnits, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| "org.freedesktop.systemd1", "/org/freedesktop/systemd1", |
| "org.freedesktop.systemd1.Manager", "ListUnits"); |
| } |
| |
| void ManagedObjectStore::getManagedPropertiesFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const std::string& service, const sdbusplus::message::object_path& path, |
| const std::string& interface, const std::string& property, |
| ManagedStoreCb callback, std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback{std::move(callback)}, refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::DbusVariantType& propertyValue) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(propertyValue, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| service, path.str, "org.freedesktop.DBus.Properties", "Get", interface, |
| property); |
| } |
| |
| void ManagedObjectStore::getManagedPropertiesMapFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const std::string& service, const sdbusplus::message::object_path& path, |
| const std::string& interface, ManagedStoreCb callback, |
| std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback{std::move(callback)}, refresh_interval]( |
| const boost::system::error_code ec, |
| const ::dbus::utility::DBusPropertiesMap& properties) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(properties, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| service, path, "org.freedesktop.DBus.Properties", "GetAll", interface); |
| } |
| |
| // Calls into dbus service to get subtree. |
| void ManagedObjectStore::getManagedSubtreeFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const sdbusplus::message::object_path& path, int32_t depth, |
| const std::vector<std::string>& interfaces, ManagedStoreCb callback, |
| std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback(std::move(callback)), refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreeResponse& subtree) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(subtree, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| "xyz.openbmc_project.ObjectMapper", "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetSubTree", path.str, depth, |
| interfaces); |
| } |
| |
| // Calls into dbus service to get subtree paths. |
| void ManagedObjectStore::getManagedSubtreePathsFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const sdbusplus::message::object_path& path, int32_t depth, |
| const std::vector<std::string>& interfaces, ManagedStoreCb callback, |
| std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback(std::move(callback)), refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreePathsResponse& |
| subtreePaths) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(subtreePaths, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| "xyz.openbmc_project.ObjectMapper", "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetSubTreePaths", path.str, depth, |
| interfaces); |
| } |
| |
| // Calls into dbus service to get subtree. |
| void ManagedObjectStore::getManagedAssociatedSubtreeFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const sdbusplus::message::object_path& associatedPath, |
| const sdbusplus::message::object_path& path, int32_t depth, |
| const std::vector<std::string>& interfaces, ManagedStoreCb callback, |
| std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback(std::move(callback)), refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreeResponse& subtree) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(subtree, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| "xyz.openbmc_project.ObjectMapper", "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetAssociatedSubTree", |
| associatedPath, path, depth, interfaces); |
| } |
| |
| // Calls into dbus service to get subtree paths. |
| void ManagedObjectStore::getManagedAssociatedSubtreePathsFromDbusService( |
| const std::shared_ptr<boost::asio::io_context::strand>& strand, |
| const sdbusplus::message::object_path& associatedPath, |
| const sdbusplus::message::object_path& path, int32_t depth, |
| const std::vector<std::string>& interfaces, ManagedStoreCb callback, |
| std::optional<uint64_t> refresh_interval) { |
| PostDbusCallToIoContextThreadSafe( |
| strand, |
| [this, callback(std::move(callback)), refresh_interval]( |
| const boost::system::error_code& ec, |
| const dbus::utility::MapperGetSubTreePathsResponse& |
| subtreePaths) mutable { |
| std::chrono::steady_clock::time_point timeNow = clockNow(); |
| std::chrono::steady_clock::duration interval = |
| refresh_interval.has_value() |
| ? std::chrono::milliseconds(*refresh_interval) |
| : this->config.tfixedThreshold; |
| auto value = std::make_shared<ValueType>(subtreePaths, ec, timeNow, |
| timeNow + interval); |
| std::move(callback)(value); |
| }, |
| "xyz.openbmc_project.ObjectMapper", "/xyz/openbmc_project/object_mapper", |
| "xyz.openbmc_project.ObjectMapper", "GetAssociatedSubTreePaths", |
| associatedPath, path, depth, interfaces); |
| } |
| |
| std::string ManagedObjectStoreConfig::toString() const { |
| return this->toJson().dump(); |
| } |
| |
| void ManagedObjectStore::storeTimeTrace(const nlohmann::json& tTrace) { |
| absl::MutexLock lock(&time_trace_array_mutex_); |
| if (time_trace_array_.size() >= this->config.timetrace_size_max) { |
| time_trace_array_.erase(0); |
| } |
| time_trace_array_.push_back(tTrace); |
| } |
| |
| void ManagedObjectStore::clearTimeTrace() { |
| absl::MutexLock lock(&time_trace_array_mutex_); |
| time_trace_array_.clear(); |
| } |
| |
| bool ManagedObjectStore::serialize(std::string_view filePath) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "starting Serialized"; |
| nlohmann::json obj; |
| if (!toJson(obj)) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "unable to serialize data ManagedObjectStore"; |
| return false; |
| } |
| try { |
| std::ofstream serializeFile(filePath.data(), std::ios::out); |
| serializeFile << obj.dump(); |
| } catch (const std::exception& e) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "unable write serialzed data to file"; |
| BMCWEB_LOG_STATEFUL_DEBUG << "error message: " << e.what(); |
| return false; |
| } |
| return true; |
| } |
| |
| bool ManagedObjectStore::deserialize(std::string_view filePath) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "starting Deserialized"; |
| nlohmann::json obj; |
| std::optional<std::string> file_string = |
| serialize::ReadBinaryFileToString(filePath); |
| if (!file_string.has_value()) { |
| BMCWEB_LOG_STATEFUL_DEBUG << "unable read the deserialzed file"; |
| return false; |
| } |
| obj = nlohmann::json::parse(file_string.value()); |
| BMCWEB_LOG_STATEFUL_DEBUG << "json file parsed"; |
| if (!fromJson(obj)) { |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "unable to deserialize ManagedObjectStore from json"; |
| return false; |
| } |
| BMCWEB_LOG_STATEFUL_DEBUG << "ManagedObjectStore deserialized from json"; |
| return true; |
| } |
| |
| bool ManagedObjectStore::toJson(nlohmann::json& obj) { |
| // turn data from ManagedObjectsMap to json |
| nlohmann::json jsonManagedObjects = nlohmann::json::array(); |
| absl::MutexLock lock(&managed_objects_mutex_); |
| for (ManagedObjectsMap::iterator itor = managedObjects.begin(); |
| itor != managedObjects.end(); itor++) { |
| nlohmann::json managedObject; |
| managedObject["key"] = itor->first; |
| managedObject["value"] = itor->second->serialize(); |
| jsonManagedObjects.push_back(managedObject); |
| } |
| obj["managedObject"] = jsonManagedObjects; |
| |
| return true; |
| } |
| |
| bool ManagedObjectStore::fromJson(const nlohmann::json& obj) { |
| nlohmann::json jsonManagedObjects = obj["managedObject"]; |
| for (nlohmann::json::iterator jmanagedObject = jsonManagedObjects.begin(); |
| jmanagedObject != jsonManagedObjects.end(); jmanagedObject++) { |
| try { |
| nlohmann::json::reference ref = jmanagedObject->at("value"); |
| std::shared_ptr<ValueType> val = std::make_shared<ValueType>(ref); |
| std::string key = jmanagedObject->at("key"); |
| absl::MutexLock lock(&managed_objects_mutex_); |
| managedObjects.insert({key, val}); |
| } catch (const std::exception& e) { |
| BMCWEB_LOG_STATEFUL_DEBUG |
| << "unable deserialize, error building managedObjects"; |
| BMCWEB_LOG_STATEFUL_DEBUG << "error message: " << e.what(); |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| nlohmann::json ManagedObjectStore::GetSubscriptionsToJSON() { |
| nlohmann::json json; |
| if (subscription_service_ == nullptr) { |
| json["Error"] = "SubscriptionService is null"; |
| return json; |
| } |
| return subscription_service_->GetSubscriptionsToJSON(); |
| } |
| |
| nlohmann::json ManagedObjectStore::GetEventsToJSON() { |
| nlohmann::json json; |
| if (subscription_service_ == nullptr) { |
| json["Error"] = "SubscriptionService is null"; |
| return json; |
| } |
| return subscription_service_->GetEventsToJSON(); |
| } |
| |
| nlohmann::json ManagedObjectStore::GetDBusMonitorsToJSON() { |
| nlohmann::json json; |
| nlohmann::json& monitors_json = json["DBusMonitors"]; |
| monitors_json = nlohmann::json::array(); |
| absl::MutexLock lock(&signal_monitor_mutex_); |
| for (const auto& [event_source_id, match] : signal_monitors_) { |
| nlohmann::json m_json; |
| m_json["EventSourceId"] = event_source_id.ToJSON(); |
| monitors_json.push_back(m_json); |
| } |
| return json; |
| } |
| |
| nlohmann::json ManagedObjectStore::GetEventsBySubscriptionIdToJSON( |
| size_t subscription_id) { |
| nlohmann::json json; |
| if (subscription_service_ == nullptr) { |
| json["Error"] = "SubscriptionService is null"; |
| return json; |
| } |
| return subscription_service_->GetEventsBySubscriptionIdToJSON( |
| subscription_id); |
| } |
| |
| nlohmann::json ManagedObjectStore::ClearEventStore() { |
| nlohmann::json json; |
| if (subscription_service_ == nullptr) { |
| json["Error"] = "SubscriptionService is null"; |
| return json; |
| } |
| subscription_service_->ClearEventStore(); |
| return subscription_service_->GetEventsToJSON(); |
| } |
| |
| nlohmann::json ManagedObjectStoreConfig::toJson() const { |
| nlohmann::json obj; |
| obj["is_enabled"] = this->isEnabled; |
| obj["pendingDbusResponsesMax"] = this->pendingDbusResponsesMax; |
| obj["tfixedThresholdMilliseconds"] = |
| clockDurationMilliseconds(this->tfixedThreshold); |
| obj["tgraceThresholdMilliseconds"] = |
| clockDurationMilliseconds(this->tgraceThreshold); |
| obj["tLRUThresholdMilliseconds"] = |
| clockDurationMilliseconds(this->tLRUThreshold); |
| obj["is_timetrace_enabled"] = this->timetrace; |
| obj["timetrace_size_max"] = this->timetrace_size_max; |
| return obj; |
| } |
| |
| std::string KeyType::GetId() const { |
| // Convert interface list to string |
| std::string interface_list_to_str; |
| for (const std::string& interface_str : interfaceList) { |
| AppendWithSep(interface_list_to_str, ',', interface_str); |
| } |
| |
| std::string ret; |
| switch (managedElementType) { |
| case ManagedType::kManagedObject: { |
| AppendWithSep(ret, kSep, "kManagedObject", serviceName, objectPath.str); |
| break; |
| } |
| case ManagedType::kManagedPropertyMap: { |
| AppendWithSep(ret, kSep, "kManagedPropertyMap", serviceName, |
| objectPath.str, interface); |
| break; |
| } |
| case ManagedType::kManagedProperty: { |
| AppendWithSep(ret, kSep, "kManagedProperty", serviceName, objectPath.str, |
| interface, property); |
| break; |
| } |
| case ManagedType::kManagedMapperObject: { |
| AppendWithSep(ret, kSep, "kManagedMapperObject", objectPath.str, |
| interface_list_to_str); |
| break; |
| } |
| case ManagedType::kManagedSystemdListUnits: { |
| AppendWithSep(ret, kSep, "kManagedSystemdListUnits"); |
| break; |
| } |
| case ManagedType::kManagedSubtree: { |
| AppendWithSep(ret, kSep, "kManagedSubtree", objectPath.str, |
| std::to_string(treeDepth), interface_list_to_str); |
| break; |
| } |
| case ManagedType::kManagedSubtreePaths: { |
| AppendWithSep(ret, kSep, "kManagedSubtreePaths", objectPath.str, |
| std::to_string(treeDepth), interface_list_to_str); |
| break; |
| } |
| case ManagedType::kManagedAssociatedSubtree: { |
| AppendWithSep(ret, kSep, "kManagedAssociatedSubtree", objectPath.str, |
| std::to_string(treeDepth), interface_list_to_str, |
| associatedPath.str); |
| break; |
| } |
| case ManagedType::kManagedAssociatedSubtreePaths: { |
| AppendWithSep(ret, kSep, "kManagedAssociatedSubtreePaths", objectPath.str, |
| std::to_string(treeDepth), interface_list_to_str, |
| associatedPath.str); |
| break; |
| } |
| } |
| return ret; |
| } |
| |
| inline std::size_t GetEstimatedJsonSize(const nlohmann::json& root) { |
| if (root.is_null()) { |
| return 0; |
| } |
| if (root.is_number()) { |
| return 8; |
| } |
| if (root.is_boolean()) { |
| return 1; |
| } |
| if (root.is_string()) { |
| return root.get<std::string>().size(); |
| } |
| if (root.is_binary()) { |
| return root.get_binary().size(); |
| } |
| const nlohmann::json::array_t* arr = |
| root.get_ptr<const nlohmann::json::array_t*>(); |
| if (arr != nullptr) { |
| std::size_t sum = 0; |
| for (const auto& element : *arr) { |
| sum += GetEstimatedJsonSize(element); |
| } |
| return sum; |
| } |
| const nlohmann::json::object_t* object = |
| root.get_ptr<const nlohmann::json::object_t*>(); |
| if (object != nullptr) { |
| std::size_t sum = 0; |
| for (const auto& [k, v] : *object) { |
| sum += k.size() + GetEstimatedJsonSize(v); |
| } |
| return sum; |
| } |
| return 0; |
| } |
| |
| std::size_t ManagedObjectStore::GetSnapshotSizeInBytes() const { |
| std::size_t bytes = 0; |
| absl::MutexLock lock(&managed_objects_mutex_); |
| for (const auto& iter : managedObjects) { |
| const std::string& key = iter.first; |
| bytes += key.size(); // Not using capacity here for simplicity |
| |
| const ValueType& value = *iter.second; |
| bytes += GetEstimatedJsonSize( |
| value.serialize()); // This might be over-estimate but it is the |
| // simpliest way to give us a quick number without |
| // developing complicated code that visit all types |
| } |
| return bytes; |
| } |
| |
| // Returns a json object with metrics about the managed objects. |
| nlohmann::json ManagedObjectStore::GetManagedObjectsMetrics( |
| std::chrono::steady_clock::time_point now) const { |
| constexpr int64_t kNumBuckets = 20; |
| constexpr int64_t kBucketSize = 1000; |
| nlohmann::json managed_objects_age_histograms = nlohmann::json::array(); |
| for (size_t i = 0; i < kNumBuckets; i++) { |
| nlohmann::json bucket; |
| bucket["Start"] = i * kBucketSize; |
| bucket["Count"] = 0; |
| managed_objects_age_histograms.push_back(bucket); |
| } |
| |
| nlohmann::json managed_objects = nlohmann::json::array(); |
| int64_t max_object_age_ms = 0; |
| int64_t max_last_served_age_ms = 0; |
| int64_t count_last_served = 0; |
| uint64_t sum_sotal_object_refreshes = 0; |
| uint64_t sum_total_object_refresh_scheduled = 0; |
| uint64_t sum_total_object_used = 0; |
| |
| int key_index = 0; |
| |
| { |
| absl::MutexLock lock(&managed_objects_mutex_); |
| for (const auto& iter : managedObjects) { |
| const std::string& key = iter.first; |
| const ValueType& value = *iter.second; |
| |
| nlohmann::json obj; |
| obj["@odata.id"] = |
| "/redfish/v1/Managers/bmc/ManagerDiagnosticData/Oem/Google/" |
| "GoogleManagedObjectStoreMetrics#/ManagedObjects/" + |
| std::to_string(key_index); |
| obj["@odata.type"] = |
| "#GoogleManagedObjectMetrics.v1_0_0.GoogleManagedObjectMetrics"; |
| |
| // Split the key: |
| std::string service_name; |
| std::string object_path; |
| std::string interface; |
| std::string property; |
| KeyType::fromKey(key, service_name, object_path, interface, property); |
| |
| obj["Id"] = key_index; |
| obj["ServiceName"] = service_name; |
| obj["ObjectPath"] = object_path; |
| obj["Interface"] = interface; |
| obj["Property"] = property; |
| obj["Age"] = clockSinceMilliseconds(value.lastRefreshAt, now); |
| obj["RefreshCountDone"] = value.numRefreshesDone; |
| obj["RefreshCountScheduled"] = value.numRefreshesScheduled; |
| obj["NumTimesUsed"] = value.numTimesUsed; |
| |
| key_index++; |
| |
| // sum up the counts: |
| sum_sotal_object_refreshes += value.numRefreshesDone; |
| sum_total_object_refresh_scheduled += value.numRefreshesScheduled; |
| sum_total_object_used += value.numTimesUsed; |
| |
| nlohmann::json last_used_ages_obj = nlohmann::json::array(); |
| if (value.lastUsedAges) { |
| std::for_each( |
| value.lastUsedAges->begin(), value.lastUsedAges->end(), |
| [&last_used_ages_obj, &count_last_served, &max_last_served_age_ms]( |
| const std::chrono::steady_clock::duration& dur) { |
| const int64_t lastUsedDurationMs = clockDurationMilliseconds(dur); |
| // report the last served ages: |
| last_used_ages_obj.push_back(lastUsedDurationMs); |
| // total count: |
| count_last_served += 1; |
| // track the max: |
| max_last_served_age_ms = |
| std::max(max_last_served_age_ms, lastUsedDurationMs); |
| }); |
| } |
| obj["AgeLastServed"] = last_used_ages_obj; |
| |
| // object age histogram: |
| const int64_t object_age_ms = |
| clockSinceMilliseconds(value.lastRefreshAt, now); |
| if (object_age_ms >= 0) { |
| int64_t bucket_index = |
| std::min(object_age_ms / kBucketSize, kNumBuckets); |
| if (bucket_index >= 0) { |
| // last buckets catches all: |
| if (bucket_index >= kNumBuckets) { |
| bucket_index = (kNumBuckets - 1); |
| } |
| const auto i = static_cast<size_t>(bucket_index); |
| auto count = managed_objects_age_histograms[i]["Count"] |
| .get_ref<nlohmann::json::number_integer_t&>(); |
| managed_objects_age_histograms[i]["Count"] = count + 1; |
| } |
| // max age: |
| max_object_age_ms = std::max(max_object_age_ms, object_age_ms); |
| } |
| managed_objects.push_back(obj); |
| } |
| } |
| |
| nlohmann::json google; |
| google["@odata.id"] = |
| "/redfish/v1/Managers/bmc/ManagerDiagnosticData/Oem/Google/" |
| "GoogleManagedObjectStoreMetrics"; |
| google["@odata.type"] = |
| "#GoogleManagedObjectStoreMetrics.v1_0_0." |
| "GoogleManagedObjectStoreMetrics"; |
| |
| // Age Buckets: |
| google["ManagedObjectsAgeHistograms"]["AgeBuckets"] = |
| managed_objects_age_histograms; |
| // Age Max: |
| google["ManagedObjectsAgeHistograms"]["AgeMax"] = max_object_age_ms; |
| |
| // Last Served Count: |
| google["ManagedObjectsAgeHistograms"]["LastServedCount"] = count_last_served; |
| google["ManagedObjectsAgeHistograms"]["LastServedMax"] = |
| max_last_served_age_ms; |
| |
| // Total num of refreshes: |
| google["NumRefreshesDone"] = sum_sotal_object_refreshes; |
| google["NumRefreshesScheduled"] = sum_total_object_refresh_scheduled; |
| |
| // Total num used: |
| google["NumTimesUsed"] = sum_total_object_used; |
| |
| // ManagedStore counters from managedStoreTracker |
| const ManagedStoreTracker store_tracker = getStoreTracker(); |
| google["NumGetManagedObjects"] = store_tracker.countGetManagedObjects; |
| google["NumGetManagedObjectsCacheMiss"] = |
| store_tracker.countGetManagedObjectsCacheMiss; |
| google["NumGetManagedObjectsCacheHit"] = |
| store_tracker.countGetManagedObjectsCacheHit; |
| google["NumGetManagedObjectsCacheMissMaxAge"] = |
| store_tracker.countGetManagedObjectsCacheMissMaxAge; |
| |
| // Export max pending and the threshold: |
| google["PendingResponsesMax"] = store_tracker.countMaxPendingDbusResponses; |
| google["PendingResponsesThreshold"] = |
| store_tracker.countThresholdPendingResponses; |
| |
| // objects: (will probably remove from none debug builds) |
| google["ManagedObjects"] = managed_objects; |
| google["ManagedObjectsSize"] = managed_objects.size(); |
| // final object: |
| return google; |
| } |
| |
| } // namespace managedStore |