Nvmed: Add inband feature lockdown dbus method
Added implementation for the new command and feature lockdown
functionality to lock different NVMe admin commands and features in band
This is to support E1.S SSD left shift development
Tested: https://paste.googleplex.com/5424008134787072
Google-Bug-Id: 439951442
Change-Id: I01fd32b8e7744dfd83d972dbcf27bb430855dac8
Signed-off-by: Agrim Bharat <agrimbharat@google.com>
diff --git a/src/NVMeController.cpp b/src/NVMeController.cpp
index 881f225..6c9e88f 100644
--- a/src/NVMeController.cpp
+++ b/src/NVMeController.cpp
@@ -8,15 +8,21 @@
#include "NVMePlugin.hpp"
#include "NVMeSubsys.hpp"
+#include <boost/asio/async_result.hpp>
+#include <boost/asio/spawn.hpp>
#include <phosphor-logging/lg2.hpp>
#include <sdbusplus/exception.hpp>
#include <sdbusplus/message/native_types.hpp>
#include <xyz/openbmc_project/Common/File/error.hpp>
#include <xyz/openbmc_project/Common/error.hpp>
+#include <cstdint>
#include <cstdio>
#include <filesystem>
#include <fstream>
+#include <system_error>
+#include <tuple>
+#include <vector>
// using sdbusplus::xyz::openbmc_project::Inventory::Item::server::
// StorageController;
@@ -136,6 +142,35 @@
protoSpecific, transferLength);
});
+ lockdownInterface =
+ objServer.add_interface(path, "xyz.openbmc_project.NVMe.Lockdown");
+
+ lockdownInterface->register_method(
+ "LockDownInband",
+ [selfWeak{weak_from_this()}](boost::asio::yield_context yield,
+ uint8_t prohibit,
+ const std::vector<uint8_t>& adminCmds,
+ const std::vector<uint8_t>& features) {
+ auto self = selfWeak.lock();
+ if (!self)
+ {
+ checkLibNVMeError(std::make_error_code(std::errc::no_such_device),
+ -1, "LockDownInband");
+ return std::tuple<uint32_t, uint32_t, uint32_t, std::string,
+ uint32_t>{0, 0, 0, "", 0};
+ }
+
+ if (self->status != Status::Enabled)
+ {
+ lg2::error("Controller has been disabled");
+ throw sdbusplus::xyz::openbmc_project::Common::Error::Unavailable();
+ }
+
+ return self->lockDownInbandMethod(std::move(yield), prohibit, adminCmds,
+ features);
+ });
+ lockdownInterface->initialize();
+
// StorageController interface is implemented manually to allow
// async methods
ctrlInterface = objServer.add_interface(
@@ -631,6 +666,7 @@
{
objServer.remove_interface(securityInterface);
objServer.remove_interface(passthruInterface);
+ objServer.remove_interface(lockdownInterface);
SoftwareVersion::emit_removed();
SoftwareExtVersion::emit_removed();
NVMeAdmin::emit_removed();
@@ -792,6 +828,61 @@
return {miStatus, adminStatus, completionDw0};
}
+std::tuple<uint32_t, uint32_t, uint32_t, std::string, uint32_t>
+ NVMeControllerEnabled::lockDownInbandMethod(
+ boost::asio::yield_context yield, uint8_t prohibit,
+ const std::vector<uint8_t>& adminCmds,
+ const std::vector<uint8_t>& features)
+{
+ using callback_t =
+ void(std::tuple<std::error_code, int, uint32_t, std::string, uint32_t>);
+
+ auto [err, nvmeStatus, completionDw0, failureScope, failureId] =
+ boost::asio::async_initiate<boost::asio::yield_context, callback_t>(
+ [intf{nvmeIntf}, ctrl{nvmeCtrl}, prohibit, adminCmds,
+ features](auto&& handler) {
+ auto h = asio_helper::CopyableCallback(
+ std::forward<decltype(handler)>(handler));
+
+ intf->adminLockdownInband(ctrl, prohibit, adminCmds, features,
+ [h](const std::error_code& err,
+ int nvmeStatus, uint32_t completionDw0,
+ const std::string& scope,
+ uint32_t id) mutable {
+ h(std::make_tuple(err, nvmeStatus, completionDw0, scope, id));
+ });
+ },
+ yield);
+
+ lg2::debug("NVMe command result: status={STATUS}, dw0={DW0}", "STATUS",
+ nvmeStatus, "DW0", completionDw0);
+ if (nvmeStatus < 0)
+ {
+ throw sdbusplus::exception::SdBusError(err.value(),
+ "lockDownInbandMethod");
+ }
+
+ uint32_t miStatus = 0;
+ uint32_t adminStatus = 0;
+ if (nvme_status_get_type(nvmeStatus) == NVME_STATUS_TYPE_MI)
+ {
+ // If it's an MI status (e.g., protocol failure), Admin status is 0.
+ miStatus = nvme_status_get_value(nvmeStatus);
+ adminStatus = 0;
+ completionDw0 = 0;
+ }
+ else
+ {
+ // If it's an Admin status (e.g., command failed on the drive), MI
+ // status is 0.
+ miStatus = 0;
+ adminStatus = nvme_status_get_value(nvmeStatus);
+ }
+
+ // Returns the standardized tuple for the D-Bus client
+ return {miStatus, adminStatus, completionDw0, failureScope, failureId};
+}
+
void NVMeControllerEnabled::attachVolume(
boost::asio::yield_context yield,
const sdbusplus::message::object_path& volumePath)
diff --git a/src/NVMeController.hpp b/src/NVMeController.hpp
index 14597d3..afdc470 100644
--- a/src/NVMeController.hpp
+++ b/src/NVMeController.hpp
@@ -107,6 +107,7 @@
std::shared_ptr<sdbusplus::asio::dbus_interface> ctrlInterface;
std::shared_ptr<sdbusplus::asio::dbus_interface> securityInterface;
std::shared_ptr<sdbusplus::asio::dbus_interface> passthruInterface;
+ std::shared_ptr<sdbusplus::asio::dbus_interface> lockdownInterface;
std::shared_ptr<NVMeMiIntf> nvmeIntf;
nvme_mi_ctrl_t nvmeCtrl;
@@ -266,6 +267,11 @@
uint32_t cdw10, uint32_t cdw11, uint32_t cdw12,
uint32_t cdw13, uint32_t cdw14, uint32_t cdw15);
+ std::tuple<uint32_t, uint32_t, uint32_t, std::string, uint32_t>
+ lockDownInbandMethod(boost::asio::yield_context yield, uint8_t prohibit,
+ const std::vector<uint8_t>& adminCmds,
+ const std::vector<uint8_t>& features);
+
void attachVolume(boost::asio::yield_context yield,
const sdbusplus::message::object_path& volumePath);
diff --git a/src/NVMeIntf.hpp b/src/NVMeIntf.hpp
index 5434c72..f9992c5 100644
--- a/src/NVMeIntf.hpp
+++ b/src/NVMeIntf.hpp
@@ -280,6 +280,14 @@
std::function<void(const std::error_code& ec,
const nvme_mi_admin_resp_hdr& adminResp,
std::span<uint8_t> respData)>&& cb) = 0;
+
+ virtual void adminLockdownInband(
+ nvme_mi_ctrl_t ctrl, uint8_t prohibit,
+ const std::vector<uint8_t>& adminCmds,
+ const std::vector<uint8_t>& features,
+ std::function<void(
+ const std::error_code&, int nvmeStatus, uint32_t comptionDw0,
+ const std::string& failureScope, uint32_t failureId)>&& cb) = 0;
};
/* A subset of Namespace Identify details of interest */
diff --git a/src/NVMeMi.cpp b/src/NVMeMi.cpp
index 46cbd94..b2c581c 100644
--- a/src/NVMeMi.cpp
+++ b/src/NVMeMi.cpp
@@ -10,11 +10,46 @@
#include <cassert>
#include <cerrno>
+#include <deque>
#include <fstream>
#include <stdexcept>
namespace common_err = sdbusplus::xyz::openbmc_project::Common::Error;
+// NVMe Admin Opcode for Lockdown (Opcode 0x24 in NVMe Base Spec 2.0a)
+constexpr uint8_t nvmeAdminOpcLockdown = 0x24;
+
+// NVMe Lockdown Scope values (CDW10, bits [3:0])
+constexpr uint8_t lockdownScopeAdminCmd = 0x0;
+constexpr uint8_t lockdownScopeFeatureId = 0x2;
+
+// CDW10 Bit Masks and Shifts
+constexpr uint8_t lockdownPrhbtMask = 0x1;
+constexpr uint8_t lockdownPrhbtShift = 4;
+
+constexpr uint8_t lockdownIfcMask = 0x3;
+constexpr uint8_t lockdownIfcShift = 5;
+
+constexpr uint8_t lockdownScopeMask = 0xF;
+constexpr uint8_t lockdownScopeShift = 0;
+
+constexpr uint8_t lockdownOfiShift = 8;
+
+// Default Interface Field (IFC) value: 00b = Admin Submission Queue (ASQ) only
+constexpr uint8_t lockdownIfcAsq = 0x00;
+
+static inline std::string scopeToString(uint8_t scope)
+{
+ switch (scope)
+ {
+ case 0x0:
+ return "AdminCmd";
+ case 0x2:
+ return "FeatureCmd";
+ default:
+ return "Unknown";
+ }
+}
// libnvme-mi root service
nvme_root_t NVMeMi::nvmeRoot = nvme_mi_create_root(stderr, DEFAULT_LOGLEVEL);
@@ -915,6 +950,104 @@
});
}
+void NVMeMi::processNextLockdownCommand(
+ nvme_mi_ctrl_t ctrl, uint8_t prohibit,
+ std::deque<std::pair<uint8_t, uint8_t>> lockdownQueue,
+ const std::function<void(const std::error_code&, int, uint32_t,
+ const std::string&, uint32_t)>& finalCb,
+ int lastNvmeStatus, uint32_t lastCompletionDw0, int attempt,
+ const std::string& lastFailureScope, uint32_t lastFailureId)
+{
+ if (lockdownQueue.empty())
+ {
+ // Propagates the final success status and the final propagated failure
+ // state
+ this->io.post([finalCb, lastNvmeStatus, lastCompletionDw0,
+ lastFailureScope, lastFailureId]() {
+ finalCb(std::error_code(), lastNvmeStatus, lastCompletionDw0,
+ lastFailureScope, lastFailureId);
+ });
+ return;
+ }
+
+ // local variables to hold the result of the synchronous hardware call.
+ uint32_t completionDw0 = 0;
+ int nvmeStatus = 0;
+ int currentErrno = 0;
+
+ const auto& currentCmd = lockdownQueue.front();
+ uint8_t itemCode = currentCmd.first;
+ uint8_t scope = currentCmd.second;
+
+ // CDW10 Construction: PRHBT (Bit 4) | OFI (Bit 8) | SCP (Bit 0)
+ uint32_t cdw10 = (((uint32_t)itemCode)
+ << lockdownOfiShift) | // OFI (Bits 15:8)
+ (((uint32_t)(lockdownIfcAsq & lockdownIfcMask))
+ << lockdownIfcShift) | // IFC (Bits 6:5)
+ (((uint32_t)(prohibit & lockdownPrhbtMask))
+ << lockdownPrhbtShift) | // PRHBT (Bit 4)
+ (((uint32_t)(scope & lockdownScopeMask))
+ << lockdownScopeShift); // SCP (Bits 3:0)
+
+ constexpr uint32_t lockdownTimeoutMs = 5 * 1000;
+
+ nvmeStatus = nvme_mi_admin_admin_passthru(
+ ctrl, nvmeAdminOpcLockdown, 0, 0, 0, 0, 0, cdw10, 0, 0, 0, 0, 0, 0,
+ nullptr, 0, nullptr, lockdownTimeoutMs, &completionDw0);
+
+ if (nvmeStatus < 0)
+ {
+ currentErrno = errno;
+ }
+ else
+ {
+ currentErrno = 0;
+ }
+
+ // Success condition
+ if (nvmeStatus == 0)
+ {
+ lockdownQueue.pop_front();
+
+ this->post([self = shared_from_this(), ctrl, prohibit,
+ lockdownQueue = std::move(lockdownQueue), finalCb,
+ nvmeStatus, completionDw0, lastFailureScope,
+ lastFailureId]() mutable {
+ self->processNextLockdownCommand(
+ ctrl, prohibit, std::move(lockdownQueue), finalCb, nvmeStatus,
+ completionDw0, 0, lastFailureScope, lastFailureId);
+ });
+ return;
+ }
+
+ // Failure condition and checking if reattempting is allowed
+ if (attempt == 0)
+ {
+ this->post([self = shared_from_this(), ctrl, prohibit,
+ lockdownQueue = std::move(lockdownQueue), finalCb,
+ nvmeStatus, completionDw0, lastFailureScope,
+ lastFailureId]() mutable {
+ self->processNextLockdownCommand(
+ ctrl, prohibit, std::move(lockdownQueue), finalCb, nvmeStatus,
+ completionDw0, 1, lastFailureScope, lastFailureId);
+ });
+ return;
+ }
+
+ // The device failed this command, so its details become the final error
+ // payload.
+ std::string failureScope = scopeToString(currentCmd.second);
+ uint32_t failureId = currentCmd.first;
+
+ // Call the final D-Bus callback with the specific failure context (current
+ // status is the error status).
+ this->io.post([finalCb, currentErrno, nvmeStatus, completionDw0,
+ failureScope, failureId]() {
+ finalCb(std::make_error_code(static_cast<std::errc>(currentErrno)),
+ nvmeStatus, completionDw0, failureScope, failureId);
+ });
+}
+
void NVMeMi::adminGetLogPage(
nvme_mi_ctrl_t ctrl, nvme_cmd_get_log_lid lid, uint32_t nsid, uint8_t lsp,
uint16_t /*lsi*/,
@@ -1170,6 +1303,41 @@
}
}
+void NVMeMi::adminLockdownInband(
+ nvme_mi_ctrl_t ctrl, uint8_t prohibit,
+ const std::vector<uint8_t>& adminCmds, const std::vector<uint8_t>& features,
+ std::function<void(const std::error_code&, int nvmeStatus,
+ uint32_t comptionDw0, const std::string& failureScope,
+ uint32_t failureId)>&& cb)
+{
+ // Flatten all command sets into a deque of {itemCode, scope} pairs
+ std::deque<std::pair<uint8_t, uint8_t>> lockdownQueue;
+
+ for (uint8_t cmd : adminCmds)
+ {
+ lockdownQueue.emplace_back(cmd, lockdownScopeAdminCmd);
+ }
+ for (uint8_t feature : features)
+ {
+ lockdownQueue.emplace_back(feature, lockdownScopeFeatureId);
+ }
+
+ std::error_code postErr =
+ tryPost([self{shared_from_this()}, ctrl, prohibit,
+ lockdownQueue = std::move(lockdownQueue), cb{cb}]() mutable {
+ self->processNextLockdownCommand(
+ ctrl, prohibit, std::move(lockdownQueue), cb, 0, 0, 0, "", 0);
+ });
+
+ if (postErr)
+ {
+ lg2::error("adminLockdownInband post failed: {ERROR}", "DEVICE",
+ device->describe(), "ERROR", postErr.message());
+
+ io.post([cb{std::move(cb)}, postErr]() { cb(postErr, -1, 0, "", 0); });
+ }
+}
+
void NVMeMi::adminXfer(
nvme_mi_ctrl_t ctrl, const nvme_mi_admin_req_hdr& adminReq,
std::span<uint8_t> data, unsigned int timeoutMs,
diff --git a/src/NVMeMi.hpp b/src/NVMeMi.hpp
index ecc4aff..4fbc29a 100644
--- a/src/NVMeMi.hpp
+++ b/src/NVMeMi.hpp
@@ -95,6 +95,15 @@
const nvme_mi_admin_resp_hdr&,
std::span<uint8_t>)>&& cb) override;
+ void adminLockdownInband(
+ nvme_mi_ctrl_t ctrl, uint8_t prohibit,
+ const std::vector<uint8_t>& adminCmds,
+ const std::vector<uint8_t>& features,
+ std::function<
+ void(const std::error_code&, int nvmeStatus, uint32_t comptionDw0,
+ const std::string& failureScope, uint32_t failureId)>&& cb)
+ override;
+
void adminSecuritySend(nvme_mi_ctrl_t ctrl, uint8_t proto,
uint16_t protoSpecific, std::span<uint8_t> data,
std::function<void(const std::error_code&,
@@ -245,5 +254,13 @@
std::vector<uint8_t>&& data,
std::function<void(const std::error_code&, std::span<uint8_t>)>&& cb);
+ void processNextLockdownCommand(
+ nvme_mi_ctrl_t ctrl, uint8_t prohibit,
+ std::deque<std::pair<uint8_t, uint8_t>> lockdownQueue,
+ const std::function<void(const std::error_code&, int, uint32_t,
+ const std::string&, uint32_t)>& finalCb,
+ int lastNvmeStatus, uint32_t lastCompletionDw0, int attempt,
+ const std::string& lastFailureScope, uint32_t lastFailureId);
+
static size_t getBlockSize(nvme_mi_ctrl_t ctrl, size_t lbaFormat);
};
diff --git a/src/NVMeMiFake.hpp b/src/NVMeMiFake.hpp
index ce22761..ccd9f67 100644
--- a/src/NVMeMiFake.hpp
+++ b/src/NVMeMiFake.hpp
@@ -431,7 +431,18 @@
return;
}
}
-
+ void adminLockdownInband(
+ [[maybe_unused]] nvme_mi_ctrl_t ctrl, [[maybe_unused]] uint8_t prohibit,
+ [[maybe_unused]] const std::vector<uint8_t>& adminCmds,
+ [[maybe_unused]] const std::vector<uint8_t>& features,
+ std::function<void(
+ const std::error_code&, int nvmeStatus, uint32_t comptionDw0,
+ const std::string& failureScope, uint32_t failureId)>&& cb) override
+ {
+ io.post([cb = std::move(cb)]() mutable {
+ cb(std::make_error_code(std::errc::not_supported), -1, 0, "", 0);
+ });
+ }
void adminSecuritySend(
[[maybe_unused]] nvme_mi_ctrl_t ctrl, [[maybe_unused]] uint8_t proto,
[[maybe_unused]] uint16_t protoSpecific,
diff --git a/tests/test_nvme_mi.cpp b/tests/test_nvme_mi.cpp
index 65640aa..e8a231e 100644
--- a/tests/test_nvme_mi.cpp
+++ b/tests/test_nvme_mi.cpp
@@ -126,7 +126,15 @@
const nvme_mi_admin_resp_hdr& adminResp,
std::span<uint8_t> respData)>&& cb),
(override));
-
+ MOCK_METHOD(
+ void, adminLockdownInband,
+ (nvme_mi_ctrl_t ctrl, uint8_t prohibit,
+ const std::vector<uint8_t>& adminCmds,
+ const std::vector<uint8_t>& features,
+ std::function<
+ void(const std::error_code&, int nvmeStatus, uint32_t comptionDw0,
+ const std::string& failureScope, uint32_t failureId)>&& cb),
+ (override));
MOCK_METHOD(
void, adminSecuritySend,
(nvme_mi_ctrl_t ctrl, uint8_t proto, uint16_t protoSpecific,