Qualcomm driver updates for v5.7
This adds a new library for subscribing to notifications about protection domains being stated and stopped and the integration of this with the APR driver. It also contains fixes and cleanups for AOSS driver, socinfo and rpmh. -----BEGIN PGP SIGNATURE----- iQJPBAABCAA5FiEEBd4DzF816k8JZtUlCx85Pw2ZrcUFAl5xplIbHGJqb3JuLmFu ZGVyc3NvbkBsaW5hcm8ub3JnAAoJEAsfOT8Nma3FgPQQAOF2RxwpvpVwRvIN0emM fMB9FOFhaLWx0ETkY1LXaRdNLOVTB6TgD7Ji9jeXOtiXR3H0NjG0Q6Q8nwaxkKP2 hMaJki5ojbTI2eTqHlkctVs9OsLAd4ldpjqMxOhlWWH0Q7Cn56mNpBpZiHBuFnAL tV+hbtnZDNmPdyh4FZq7DoxJmAf3yrp7nNK/jRmFx0LIT1WJu9lpfwWOUNe2ljuK BMHBcROi1Ar226zXl0wNQZMtMAbDc+4z7Mn0aVx1TES2RR8iP4GwBDvrBvvZGmdD Hb3QGnk0qYnwFlqPfw0gsRGv9kivw2O18GK5QPYrj9PJji562T6Tk2xFFF+Tdhg8 QeMlLHzz+xyFcQHZzYpXHQTDI8TjVw84p9MmGMlLUerC+V9GfWRB+fx6giD3zwWC tyHR7gzs0tCx56zz1ndRDR0CaWMdwbfct0hwhKjap72KUTSGBCATCdvesuZVlClc emh+kNcoPgO9iTau/mDijVLDmnZwHrOd/9fuXxJO+Pl0qi3eqfwDZcHH3CBrSRym XITmZe430o+uv+rHj+hINyCLlVXL5mSv/u7uc6flsS9L02vh0rrKwzasH6/klRje SBSeh8I+Qh5f91Y2lxnPUrV7yMdqHaGDapbzMzDqId/aFEQenkObMEJ/trzoHb3N 5U4dOSAANFJMz5jFgDPMxmxK =qXj9 -----END PGP SIGNATURE----- Merge tag 'qcom-drivers-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux into arm/drivers Qualcomm driver updates for v5.7 This adds a new library for subscribing to notifications about protection domains being stated and stopped and the integration of this with the APR driver. It also contains fixes and cleanups for AOSS driver, socinfo and rpmh. * tag 'qcom-drivers-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux: soc: qcom: Fix QCOM_APR dependencies soc: qcom: pdr: Avoid uninitialized use of found in pdr_indication_cb soc: qcom: apr: Add avs/audio tracking functionality dt-bindings: soc: qcom: apr: Add protection domain bindings soc: qcom: Introduce Protection Domain Restart helpers devicetree: bindings: firmware: add ipq806x to qcom_scm soc: qcom: socinfo: Use seq_putc() if possible drivers: qcom: rpmh-rsc: Use rcuidle tracepoints for rpmh soc: qcom: Do not depend on ARCH_QCOM for QMI helpers soc: qcom: aoss: Read back before triggering the IRQ soc: qcom: aoss: Use wake_up_all() instead of wake_up_interruptible_all() drivers: qcom: rpmh: remove rpmh_flush export drivers: qcom: rpmh: fix macro to accept NULL argument Link: https://lore.kernel.org/r/20200318044236.GD470201@yoga Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
8f10e1ab53
|
@ -10,6 +10,7 @@ Required properties:
|
|||
* "qcom,scm-apq8064"
|
||||
* "qcom,scm-apq8084"
|
||||
* "qcom,scm-ipq4019"
|
||||
* "qcom,scm-ipq806x"
|
||||
* "qcom,scm-msm8660"
|
||||
* "qcom,scm-msm8916"
|
||||
* "qcom,scm-msm8960"
|
||||
|
|
|
@ -45,6 +45,18 @@ by the individual bindings for the specific service
|
|||
12 - Ultrasound stream manager.
|
||||
13 - Listen stream manager.
|
||||
|
||||
- qcom,protection-domain
|
||||
Usage: optional
|
||||
Value type: <stringlist>
|
||||
Definition: Must list the protection domain service name and path
|
||||
that the particular apr service has a dependency on.
|
||||
Possible values are :
|
||||
"avs/audio", "msm/adsp/audio_pd".
|
||||
"kernel/elf_loader", "msm/modem/wlan_pd".
|
||||
"tms/servreg", "msm/adsp/audio_pd".
|
||||
"tms/servreg", "msm/modem/wlan_pd".
|
||||
"tms/servreg", "msm/slpi/sensor_pd".
|
||||
|
||||
= EXAMPLE
|
||||
The following example represents a QDSP based sound card on a MSM8996 device
|
||||
which uses apr as communication between Apps and QDSP.
|
||||
|
@ -82,3 +94,41 @@ which uses apr as communication between Apps and QDSP.
|
|||
...
|
||||
};
|
||||
};
|
||||
|
||||
= EXAMPLE 2
|
||||
The following example represents a QDSP based sound card with protection domain
|
||||
dependencies specified. Here some of the apr services are dependent on services
|
||||
running on protection domain hosted on ADSP/SLPI remote processors while others
|
||||
have no such dependency.
|
||||
|
||||
apr {
|
||||
compatible = "qcom,apr-v2";
|
||||
qcom,glink-channels = "apr_audio_svc";
|
||||
qcom,apr-domain = <APR_DOMAIN_ADSP>;
|
||||
|
||||
q6core {
|
||||
compatible = "qcom,q6core";
|
||||
reg = <APR_SVC_ADSP_CORE>;
|
||||
};
|
||||
|
||||
q6afe: q6afe {
|
||||
compatible = "qcom,q6afe";
|
||||
reg = <APR_SVC_AFE>;
|
||||
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
|
||||
...
|
||||
};
|
||||
|
||||
q6asm: q6asm {
|
||||
compatible = "qcom,q6asm";
|
||||
reg = <APR_SVC_ASM>;
|
||||
qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd";
|
||||
...
|
||||
};
|
||||
|
||||
q6adm: q6adm {
|
||||
compatible = "qcom,q6adm";
|
||||
reg = <APR_SVC_ADM>;
|
||||
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
|
||||
...
|
||||
};
|
||||
};
|
||||
|
|
|
@ -76,6 +76,10 @@ config QCOM_OCMEM
|
|||
requirements. This is typically used by the GPU, camera/video, and
|
||||
audio components on some Snapdragon SoCs.
|
||||
|
||||
config QCOM_PDR_HELPERS
|
||||
tristate
|
||||
select QCOM_QMI_HELPERS
|
||||
|
||||
config QCOM_PM
|
||||
bool "Qualcomm Power Management"
|
||||
depends on ARCH_QCOM && !ARM64
|
||||
|
@ -88,7 +92,6 @@ config QCOM_PM
|
|||
|
||||
config QCOM_QMI_HELPERS
|
||||
tristate
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
depends on NET
|
||||
|
||||
config QCOM_RMTFS_MEM
|
||||
|
@ -197,6 +200,8 @@ config QCOM_APR
|
|||
tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
depends on RPMSG
|
||||
depends on NET
|
||||
select QCOM_PDR_HELPERS
|
||||
help
|
||||
Enable APR IPC protocol support between
|
||||
application processor and QDSP6. APR is
|
||||
|
|
|
@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
|
|||
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
|
||||
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
|
||||
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
|
||||
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
|
||||
obj-$(CONFIG_QCOM_PM) += spm.o
|
||||
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
|
||||
qmi_helpers-y += qmi_encdec.o qmi_interface.o
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/soc/qcom/apr.h>
|
||||
#include <linux/soc/qcom/pdr.h>
|
||||
#include <linux/rpmsg.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
|
@ -21,6 +22,7 @@ struct apr {
|
|||
spinlock_t rx_lock;
|
||||
struct idr svcs_idr;
|
||||
int dest_domain_id;
|
||||
struct pdr_handle *pdr;
|
||||
struct workqueue_struct *rxwq;
|
||||
struct work_struct rx_work;
|
||||
struct list_head rx_list;
|
||||
|
@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np,
|
|||
id->svc_id + 1, GFP_ATOMIC);
|
||||
spin_unlock(&apr->svcs_lock);
|
||||
|
||||
of_property_read_string_index(np, "qcom,protection-domain",
|
||||
1, &adev->service_path);
|
||||
|
||||
dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
|
||||
|
||||
ret = device_register(&adev->dev);
|
||||
|
@ -300,14 +305,75 @@ static int apr_add_device(struct device *dev, struct device_node *np,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void of_register_apr_devices(struct device *dev)
|
||||
static int of_apr_add_pd_lookups(struct device *dev)
|
||||
{
|
||||
const char *service_name, *service_path;
|
||||
struct apr *apr = dev_get_drvdata(dev);
|
||||
struct device_node *node;
|
||||
struct pdr_service *pds;
|
||||
int ret;
|
||||
|
||||
for_each_child_of_node(dev->of_node, node) {
|
||||
ret = of_property_read_string_index(node, "qcom,protection-domain",
|
||||
0, &service_name);
|
||||
if (ret < 0)
|
||||
continue;
|
||||
|
||||
ret = of_property_read_string_index(node, "qcom,protection-domain",
|
||||
1, &service_path);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "pdr service path missing: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
|
||||
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
|
||||
dev_err(dev, "pdr add lookup failed: %d\n", ret);
|
||||
return PTR_ERR(pds);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void of_register_apr_devices(struct device *dev, const char *svc_path)
|
||||
{
|
||||
struct apr *apr = dev_get_drvdata(dev);
|
||||
struct device_node *node;
|
||||
const char *service_path;
|
||||
int ret;
|
||||
|
||||
for_each_child_of_node(dev->of_node, node) {
|
||||
struct apr_device_id id = { {0} };
|
||||
|
||||
/*
|
||||
* This function is called with svc_path NULL during
|
||||
* apr_probe(), in which case we register any apr devices
|
||||
* without a qcom,protection-domain specified.
|
||||
*
|
||||
* Then as the protection domains becomes available
|
||||
* (if applicable) this function is again called, but with
|
||||
* svc_path representing the service becoming available. In
|
||||
* this case we register any apr devices with a matching
|
||||
* qcom,protection-domain.
|
||||
*/
|
||||
|
||||
ret = of_property_read_string_index(node, "qcom,protection-domain",
|
||||
1, &service_path);
|
||||
if (svc_path) {
|
||||
/* skip APR services that are PD independent */
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/* skip APR services whose PD paths don't match */
|
||||
if (strcmp(service_path, svc_path))
|
||||
continue;
|
||||
} else {
|
||||
/* skip APR services whose PD lookups are registered */
|
||||
if (ret == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "reg", &id.svc_id))
|
||||
continue;
|
||||
|
||||
|
@ -318,6 +384,34 @@ static void of_register_apr_devices(struct device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static int apr_remove_device(struct device *dev, void *svc_path)
|
||||
{
|
||||
struct apr_device *adev = to_apr_device(dev);
|
||||
|
||||
if (svc_path && adev->service_path) {
|
||||
if (!strcmp(adev->service_path, (char *)svc_path))
|
||||
device_unregister(&adev->dev);
|
||||
} else {
|
||||
device_unregister(&adev->dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void apr_pd_status(int state, char *svc_path, void *priv)
|
||||
{
|
||||
struct apr *apr = (struct apr *)priv;
|
||||
|
||||
switch (state) {
|
||||
case SERVREG_SERVICE_STATE_UP:
|
||||
of_register_apr_devices(apr->dev, svc_path);
|
||||
break;
|
||||
case SERVREG_SERVICE_STATE_DOWN:
|
||||
device_for_each_child(apr->dev, svc_path, apr_remove_device);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int apr_probe(struct rpmsg_device *rpdev)
|
||||
{
|
||||
struct device *dev = &rpdev->dev;
|
||||
|
@ -343,28 +437,39 @@ static int apr_probe(struct rpmsg_device *rpdev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
INIT_WORK(&apr->rx_work, apr_rxwq);
|
||||
|
||||
apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
|
||||
if (IS_ERR(apr->pdr)) {
|
||||
dev_err(dev, "Failed to init PDR handle\n");
|
||||
ret = PTR_ERR(apr->pdr);
|
||||
goto destroy_wq;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&apr->rx_list);
|
||||
spin_lock_init(&apr->rx_lock);
|
||||
spin_lock_init(&apr->svcs_lock);
|
||||
idr_init(&apr->svcs_idr);
|
||||
of_register_apr_devices(dev);
|
||||
|
||||
ret = of_apr_add_pd_lookups(dev);
|
||||
if (ret)
|
||||
goto handle_release;
|
||||
|
||||
of_register_apr_devices(dev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apr_remove_device(struct device *dev, void *null)
|
||||
{
|
||||
struct apr_device *adev = to_apr_device(dev);
|
||||
|
||||
device_unregister(&adev->dev);
|
||||
|
||||
return 0;
|
||||
handle_release:
|
||||
pdr_handle_release(apr->pdr);
|
||||
destroy_wq:
|
||||
destroy_workqueue(apr->rxwq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void apr_remove(struct rpmsg_device *rpdev)
|
||||
{
|
||||
struct apr *apr = dev_get_drvdata(&rpdev->dev);
|
||||
|
||||
pdr_handle_release(apr->pdr);
|
||||
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
|
||||
flush_workqueue(apr->rxwq);
|
||||
destroy_workqueue(apr->rxwq);
|
||||
|
|
|
@ -0,0 +1,757 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2020 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "pdr_internal.h"
|
||||
|
||||
struct pdr_service {
|
||||
char service_name[SERVREG_NAME_LENGTH + 1];
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
|
||||
struct sockaddr_qrtr addr;
|
||||
|
||||
unsigned int instance;
|
||||
unsigned int service;
|
||||
u8 service_data_valid;
|
||||
u32 service_data;
|
||||
int state;
|
||||
|
||||
bool need_notifier_register;
|
||||
bool need_notifier_remove;
|
||||
bool need_locator_lookup;
|
||||
bool service_connected;
|
||||
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct pdr_handle {
|
||||
struct qmi_handle locator_hdl;
|
||||
struct qmi_handle notifier_hdl;
|
||||
|
||||
struct sockaddr_qrtr locator_addr;
|
||||
|
||||
struct list_head lookups;
|
||||
struct list_head indack_list;
|
||||
|
||||
/* control access to pdr lookup/indack lists */
|
||||
struct mutex list_lock;
|
||||
|
||||
/* serialize pd status invocation */
|
||||
struct mutex status_lock;
|
||||
|
||||
/* control access to the locator state */
|
||||
struct mutex lock;
|
||||
|
||||
bool locator_init_complete;
|
||||
|
||||
struct work_struct locator_work;
|
||||
struct work_struct notifier_work;
|
||||
struct work_struct indack_work;
|
||||
|
||||
struct workqueue_struct *notifier_wq;
|
||||
struct workqueue_struct *indack_wq;
|
||||
|
||||
void (*status)(int state, char *service_path, void *priv);
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct pdr_list_node {
|
||||
enum servreg_service_state curr_state;
|
||||
u16 transaction_id;
|
||||
struct pdr_service *pds;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
static int pdr_locator_new_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
locator_hdl);
|
||||
struct pdr_service *pds;
|
||||
|
||||
/* Create a local client port for QMI communication */
|
||||
pdr->locator_addr.sq_family = AF_QIPCRTR;
|
||||
pdr->locator_addr.sq_node = svc->node;
|
||||
pdr->locator_addr.sq_port = svc->port;
|
||||
|
||||
mutex_lock(&pdr->lock);
|
||||
pdr->locator_init_complete = true;
|
||||
mutex_unlock(&pdr->lock);
|
||||
|
||||
/* Service pending lookup requests */
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->need_locator_lookup)
|
||||
schedule_work(&pdr->locator_work);
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdr_locator_del_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
locator_hdl);
|
||||
|
||||
mutex_lock(&pdr->lock);
|
||||
pdr->locator_init_complete = false;
|
||||
mutex_unlock(&pdr->lock);
|
||||
|
||||
pdr->locator_addr.sq_node = 0;
|
||||
pdr->locator_addr.sq_port = 0;
|
||||
}
|
||||
|
||||
static struct qmi_ops pdr_locator_ops = {
|
||||
.new_server = pdr_locator_new_server,
|
||||
.del_server = pdr_locator_del_server,
|
||||
};
|
||||
|
||||
static int pdr_register_listener(struct pdr_handle *pdr,
|
||||
struct pdr_service *pds,
|
||||
bool enable)
|
||||
{
|
||||
struct servreg_register_listener_resp resp;
|
||||
struct servreg_register_listener_req req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
|
||||
servreg_register_listener_resp_ei,
|
||||
&resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
req.enable = enable;
|
||||
strcpy(req.service_path, pds->service_path);
|
||||
|
||||
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
|
||||
&txn, SERVREG_REGISTER_LISTENER_REQ,
|
||||
SERVREG_REGISTER_LISTENER_REQ_LEN,
|
||||
servreg_register_listener_req_ei,
|
||||
&req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("PDR: %s register listener txn wait failed: %d\n",
|
||||
pds->service_path, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("PDR: %s register listener failed: 0x%x\n",
|
||||
pds->service_path, resp.resp.error);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX)
|
||||
pr_err("PDR: %s notification state invalid: 0x%x\n",
|
||||
pds->service_path, resp.curr_state);
|
||||
|
||||
pds->state = resp.curr_state;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdr_notifier_work(struct work_struct *work)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
|
||||
notifier_work);
|
||||
struct pdr_service *pds;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->service_connected) {
|
||||
if (!pds->need_notifier_register)
|
||||
continue;
|
||||
|
||||
pds->need_notifier_register = false;
|
||||
ret = pdr_register_listener(pdr, pds, true);
|
||||
if (ret < 0)
|
||||
pds->state = SERVREG_SERVICE_STATE_DOWN;
|
||||
} else {
|
||||
if (!pds->need_notifier_remove)
|
||||
continue;
|
||||
|
||||
pds->need_notifier_remove = false;
|
||||
pds->state = SERVREG_SERVICE_STATE_DOWN;
|
||||
}
|
||||
|
||||
mutex_lock(&pdr->status_lock);
|
||||
pdr->status(pds->state, pds->service_path, pdr->priv);
|
||||
mutex_unlock(&pdr->status_lock);
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
}
|
||||
|
||||
static int pdr_notifier_new_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
notifier_hdl);
|
||||
struct pdr_service *pds;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->service == svc->service &&
|
||||
pds->instance == svc->instance) {
|
||||
pds->service_connected = true;
|
||||
pds->need_notifier_register = true;
|
||||
pds->addr.sq_family = AF_QIPCRTR;
|
||||
pds->addr.sq_node = svc->node;
|
||||
pds->addr.sq_port = svc->port;
|
||||
queue_work(pdr->notifier_wq, &pdr->notifier_work);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdr_notifier_del_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
notifier_hdl);
|
||||
struct pdr_service *pds;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->service == svc->service &&
|
||||
pds->instance == svc->instance) {
|
||||
pds->service_connected = false;
|
||||
pds->need_notifier_remove = true;
|
||||
pds->addr.sq_node = 0;
|
||||
pds->addr.sq_port = 0;
|
||||
queue_work(pdr->notifier_wq, &pdr->notifier_work);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
}
|
||||
|
||||
static struct qmi_ops pdr_notifier_ops = {
|
||||
.new_server = pdr_notifier_new_server,
|
||||
.del_server = pdr_notifier_del_server,
|
||||
};
|
||||
|
||||
static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
|
||||
u16 tid)
|
||||
{
|
||||
struct servreg_set_ack_resp resp;
|
||||
struct servreg_set_ack_req req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei,
|
||||
&resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
req.transaction_id = tid;
|
||||
strcpy(req.service_path, pds->service_path);
|
||||
|
||||
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
|
||||
&txn, SERVREG_SET_ACK_REQ,
|
||||
SERVREG_SET_ACK_REQ_LEN,
|
||||
servreg_set_ack_req_ei,
|
||||
&req);
|
||||
|
||||
/* Skip waiting for response */
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pdr_indack_work(struct work_struct *work)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
|
||||
indack_work);
|
||||
struct pdr_list_node *ind, *tmp;
|
||||
struct pdr_service *pds;
|
||||
|
||||
list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
|
||||
pds = ind->pds;
|
||||
pdr_send_indack_msg(pdr, pds, ind->transaction_id);
|
||||
|
||||
mutex_lock(&pdr->status_lock);
|
||||
pds->state = ind->curr_state;
|
||||
pdr->status(pds->state, pds->service_path, pdr->priv);
|
||||
mutex_unlock(&pdr->status_lock);
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_del(&ind->node);
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
kfree(ind);
|
||||
}
|
||||
}
|
||||
|
||||
static void pdr_indication_cb(struct qmi_handle *qmi,
|
||||
struct sockaddr_qrtr *sq,
|
||||
struct qmi_txn *txn, const void *data)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
notifier_hdl);
|
||||
const struct servreg_state_updated_ind *ind_msg = data;
|
||||
struct pdr_list_node *ind;
|
||||
struct pdr_service *pds;
|
||||
bool found = false;
|
||||
|
||||
if (!ind_msg || !ind_msg->service_path[0] ||
|
||||
strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
|
||||
return;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (strcmp(pds->service_path, ind_msg->service_path))
|
||||
continue;
|
||||
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
if (!found)
|
||||
return;
|
||||
|
||||
pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
|
||||
ind_msg->service_path, ind_msg->curr_state,
|
||||
ind_msg->transaction_id);
|
||||
|
||||
ind = kzalloc(sizeof(*ind), GFP_KERNEL);
|
||||
if (!ind)
|
||||
return;
|
||||
|
||||
ind->transaction_id = ind_msg->transaction_id;
|
||||
ind->curr_state = ind_msg->curr_state;
|
||||
ind->pds = pds;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_add_tail(&ind->node, &pdr->indack_list);
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
queue_work(pdr->indack_wq, &pdr->indack_work);
|
||||
}
|
||||
|
||||
static struct qmi_msg_handler qmi_indication_handler[] = {
|
||||
{
|
||||
.type = QMI_INDICATION,
|
||||
.msg_id = SERVREG_STATE_UPDATED_IND_ID,
|
||||
.ei = servreg_state_updated_ind_ei,
|
||||
.decoded_size = sizeof(struct servreg_state_updated_ind),
|
||||
.fn = pdr_indication_cb,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
|
||||
struct servreg_get_domain_list_resp *resp,
|
||||
struct pdr_handle *pdr)
|
||||
{
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
ret = qmi_txn_init(&pdr->locator_hdl, &txn,
|
||||
servreg_get_domain_list_resp_ei, resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = qmi_send_request(&pdr->locator_hdl,
|
||||
&pdr->locator_addr,
|
||||
&txn, SERVREG_GET_DOMAIN_LIST_REQ,
|
||||
SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
|
||||
servreg_get_domain_list_req_ei,
|
||||
req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("PDR: %s get domain list txn wait failed: %d\n",
|
||||
req->service_name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("PDR: %s get domain list failed: 0x%x\n",
|
||||
req->service_name, resp->resp.error);
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
|
||||
{
|
||||
struct servreg_get_domain_list_resp *resp;
|
||||
struct servreg_get_domain_list_req req;
|
||||
struct servreg_location_entry *entry;
|
||||
int domains_read = 0;
|
||||
int ret, i;
|
||||
|
||||
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
|
||||
if (!resp)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Prepare req message */
|
||||
strcpy(req.service_name, pds->service_name);
|
||||
req.domain_offset_valid = true;
|
||||
req.domain_offset = 0;
|
||||
|
||||
do {
|
||||
req.domain_offset = domains_read;
|
||||
ret = pdr_get_domain_list(&req, resp, pdr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
for (i = domains_read; i < resp->domain_list_len; i++) {
|
||||
entry = &resp->domain_list[i];
|
||||
|
||||
if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
|
||||
continue;
|
||||
|
||||
if (!strcmp(entry->name, pds->service_path)) {
|
||||
pds->service_data_valid = entry->service_data_valid;
|
||||
pds->service_data = entry->service_data;
|
||||
pds->instance = entry->instance;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update ret to indicate that the service is not yet found */
|
||||
ret = -ENXIO;
|
||||
|
||||
/* Always read total_domains from the response msg */
|
||||
if (resp->domain_list_len > resp->total_domains)
|
||||
resp->domain_list_len = resp->total_domains;
|
||||
|
||||
domains_read += resp->domain_list_len;
|
||||
} while (domains_read < resp->total_domains);
|
||||
out:
|
||||
kfree(resp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pdr_notify_lookup_failure(struct pdr_handle *pdr,
|
||||
struct pdr_service *pds,
|
||||
int err)
|
||||
{
|
||||
pr_err("PDR: service lookup for %s failed: %d\n",
|
||||
pds->service_name, err);
|
||||
|
||||
if (err == -ENXIO)
|
||||
return;
|
||||
|
||||
list_del(&pds->node);
|
||||
pds->state = SERVREG_LOCATOR_ERR;
|
||||
mutex_lock(&pdr->status_lock);
|
||||
pdr->status(pds->state, pds->service_path, pdr->priv);
|
||||
mutex_unlock(&pdr->status_lock);
|
||||
kfree(pds);
|
||||
}
|
||||
|
||||
static void pdr_locator_work(struct work_struct *work)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
|
||||
locator_work);
|
||||
struct pdr_service *pds, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
/* Bail out early if the SERVREG LOCATOR QMI service is not up */
|
||||
mutex_lock(&pdr->lock);
|
||||
if (!pdr->locator_init_complete) {
|
||||
mutex_unlock(&pdr->lock);
|
||||
pr_debug("PDR: SERVICE LOCATOR service not available\n");
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&pdr->lock);
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
|
||||
if (!pds->need_locator_lookup)
|
||||
continue;
|
||||
|
||||
ret = pdr_locate_service(pdr, pds);
|
||||
if (ret < 0) {
|
||||
pdr_notify_lookup_failure(pdr, pds, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1,
|
||||
pds->instance);
|
||||
if (ret < 0) {
|
||||
pdr_notify_lookup_failure(pdr, pds, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
pds->need_locator_lookup = false;
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* pdr_add_lookup() - register a tracking request for a PD
|
||||
* @pdr: PDR client handle
|
||||
* @service_name: service name of the tracking request
|
||||
* @service_path: service path of the tracking request
|
||||
*
|
||||
* Registering a pdr lookup allows for tracking the life cycle of the PD.
|
||||
*
|
||||
* Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is
|
||||
* returned if a lookup is already in progress for the given service path.
|
||||
*/
|
||||
struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
|
||||
const char *service_name,
|
||||
const char *service_path)
|
||||
{
|
||||
struct pdr_service *pds, *tmp;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(pdr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH ||
|
||||
!service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pds = kzalloc(sizeof(*pds), GFP_KERNEL);
|
||||
if (!pds)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pds->service = SERVREG_NOTIFIER_SERVICE;
|
||||
strcpy(pds->service_name, service_name);
|
||||
strcpy(pds->service_path, service_path);
|
||||
pds->need_locator_lookup = true;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(tmp, &pdr->lookups, node) {
|
||||
if (strcmp(tmp->service_path, service_path))
|
||||
continue;
|
||||
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
ret = -EALREADY;
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_add(&pds->node, &pdr->lookups);
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
schedule_work(&pdr->locator_work);
|
||||
|
||||
return pds;
|
||||
err:
|
||||
kfree(pds);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_add_lookup);
|
||||
|
||||
/**
|
||||
* pdr_restart_pd() - restart PD
|
||||
* @pdr: PDR client handle
|
||||
* @pds: PD service handle
|
||||
*
|
||||
* Restarts the PD tracked by the PDR client handle for a given service path.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
|
||||
{
|
||||
struct servreg_restart_pd_resp resp;
|
||||
struct servreg_restart_pd_req req;
|
||||
struct sockaddr_qrtr addr;
|
||||
struct pdr_service *tmp;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(tmp, &pdr->lookups, node) {
|
||||
if (tmp != pds)
|
||||
continue;
|
||||
|
||||
if (!pds->service_connected)
|
||||
break;
|
||||
|
||||
/* Prepare req message */
|
||||
strcpy(req.service_path, pds->service_path);
|
||||
addr = pds->addr;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
if (!req.service_path[0])
|
||||
return -EINVAL;
|
||||
|
||||
ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
|
||||
servreg_restart_pd_resp_ei,
|
||||
&resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = qmi_send_request(&pdr->notifier_hdl, &addr,
|
||||
&txn, SERVREG_RESTART_PD_REQ,
|
||||
SERVREG_RESTART_PD_REQ_MAX_LEN,
|
||||
servreg_restart_pd_req_ei, &req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("PDR: %s PD restart txn wait failed: %d\n",
|
||||
req.service_path, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Check response if PDR is disabled */
|
||||
if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
|
||||
resp.resp.error == QMI_ERR_DISABLED_V01) {
|
||||
pr_err("PDR: %s PD restart is disabled: 0x%x\n",
|
||||
req.service_path, resp.resp.error);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Check the response for other error case*/
|
||||
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("PDR: %s request for PD restart failed: 0x%x\n",
|
||||
req.service_path, resp.resp.error);
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_restart_pd);
|
||||
|
||||
/**
|
||||
* pdr_handle_alloc() - initialize the PDR client handle
|
||||
* @status: function to be called on PD state change
|
||||
* @priv: handle for client's use
|
||||
*
|
||||
* Initializes the PDR client handle to allow for tracking/restart of PDs.
|
||||
*
|
||||
* Return: pdr_handle object on success, ERR_PTR on failure.
|
||||
*/
|
||||
struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
|
||||
char *service_path,
|
||||
void *priv), void *priv)
|
||||
{
|
||||
struct pdr_handle *pdr;
|
||||
int ret;
|
||||
|
||||
if (!status)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
|
||||
if (!pdr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pdr->status = status;
|
||||
pdr->priv = priv;
|
||||
|
||||
mutex_init(&pdr->status_lock);
|
||||
mutex_init(&pdr->list_lock);
|
||||
mutex_init(&pdr->lock);
|
||||
|
||||
INIT_LIST_HEAD(&pdr->lookups);
|
||||
INIT_LIST_HEAD(&pdr->indack_list);
|
||||
|
||||
INIT_WORK(&pdr->locator_work, pdr_locator_work);
|
||||
INIT_WORK(&pdr->notifier_work, pdr_notifier_work);
|
||||
INIT_WORK(&pdr->indack_work, pdr_indack_work);
|
||||
|
||||
pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
|
||||
if (!pdr->notifier_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pdr_handle;
|
||||
}
|
||||
|
||||
pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
|
||||
if (!pdr->indack_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_notifier;
|
||||
}
|
||||
|
||||
ret = qmi_handle_init(&pdr->locator_hdl,
|
||||
SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
|
||||
&pdr_locator_ops, NULL);
|
||||
if (ret < 0)
|
||||
goto destroy_indack;
|
||||
|
||||
ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1);
|
||||
if (ret < 0)
|
||||
goto release_qmi_handle;
|
||||
|
||||
ret = qmi_handle_init(&pdr->notifier_hdl,
|
||||
SERVREG_STATE_UPDATED_IND_MAX_LEN,
|
||||
&pdr_notifier_ops,
|
||||
qmi_indication_handler);
|
||||
if (ret < 0)
|
||||
goto release_qmi_handle;
|
||||
|
||||
return pdr;
|
||||
|
||||
release_qmi_handle:
|
||||
qmi_handle_release(&pdr->locator_hdl);
|
||||
destroy_indack:
|
||||
destroy_workqueue(pdr->indack_wq);
|
||||
destroy_notifier:
|
||||
destroy_workqueue(pdr->notifier_wq);
|
||||
free_pdr_handle:
|
||||
kfree(pdr);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_handle_alloc);
|
||||
|
||||
/**
|
||||
* pdr_handle_release() - release the PDR client handle
|
||||
* @pdr: PDR client handle
|
||||
*
|
||||
* Cleans up pending tracking requests and releases the underlying qmi handles.
|
||||
*/
|
||||
void pdr_handle_release(struct pdr_handle *pdr)
|
||||
{
|
||||
struct pdr_service *pds, *tmp;
|
||||
|
||||
if (IS_ERR_OR_NULL(pdr))
|
||||
return;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
|
||||
list_del(&pds->node);
|
||||
kfree(pds);
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
cancel_work_sync(&pdr->locator_work);
|
||||
cancel_work_sync(&pdr->notifier_work);
|
||||
cancel_work_sync(&pdr->indack_work);
|
||||
|
||||
destroy_workqueue(pdr->notifier_wq);
|
||||
destroy_workqueue(pdr->indack_wq);
|
||||
|
||||
qmi_handle_release(&pdr->locator_hdl);
|
||||
qmi_handle_release(&pdr->notifier_hdl);
|
||||
|
||||
kfree(pdr);
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_handle_release);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
|
|
@ -0,0 +1,379 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __QCOM_PDR_HELPER_INTERNAL__
|
||||
#define __QCOM_PDR_HELPER_INTERNAL__
|
||||
|
||||
#include <linux/soc/qcom/pdr.h>
|
||||
|
||||
#define SERVREG_LOCATOR_SERVICE 0x40
|
||||
#define SERVREG_NOTIFIER_SERVICE 0x42
|
||||
|
||||
#define SERVREG_REGISTER_LISTENER_REQ 0x20
|
||||
#define SERVREG_GET_DOMAIN_LIST_REQ 0x21
|
||||
#define SERVREG_STATE_UPDATED_IND_ID 0x22
|
||||
#define SERVREG_SET_ACK_REQ 0x23
|
||||
#define SERVREG_RESTART_PD_REQ 0x24
|
||||
|
||||
#define SERVREG_DOMAIN_LIST_LENGTH 32
|
||||
#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
|
||||
#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
|
||||
#define SERVREG_SET_ACK_REQ_LEN 72
|
||||
#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
|
||||
#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
|
||||
#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
|
||||
|
||||
struct servreg_location_entry {
|
||||
char name[SERVREG_NAME_LENGTH + 1];
|
||||
u8 service_data_valid;
|
||||
u32 service_data;
|
||||
u32 instance;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_location_entry_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
name),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
instance),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
service_data_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
service_data),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_get_domain_list_req {
|
||||
char service_name[SERVREG_NAME_LENGTH + 1];
|
||||
u8 domain_offset_valid;
|
||||
u32 domain_offset;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_get_domain_list_req,
|
||||
service_name),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_req,
|
||||
domain_offset_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_req,
|
||||
domain_offset),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_get_domain_list_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
u8 total_domains_valid;
|
||||
u16 total_domains;
|
||||
u8 db_rev_count_valid;
|
||||
u16 db_rev_count;
|
||||
u8 domain_list_valid;
|
||||
u32 domain_list_len;
|
||||
struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
total_domains_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
total_domains),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
db_rev_count_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
db_rev_count),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
domain_list_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_DATA_LEN,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
domain_list_len),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = SERVREG_DOMAIN_LIST_LENGTH,
|
||||
.elem_size = sizeof(struct servreg_location_entry),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
domain_list),
|
||||
.ei_array = servreg_location_entry_ei,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_register_listener_req {
|
||||
u8 enable;
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_register_listener_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_register_listener_req,
|
||||
enable),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_register_listener_req,
|
||||
service_path),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_register_listener_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
u8 curr_state_valid;
|
||||
enum servreg_service_state curr_state;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_register_listener_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_register_listener_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_register_listener_resp,
|
||||
curr_state_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(enum servreg_service_state),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_register_listener_resp,
|
||||
curr_state),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_restart_pd_req {
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_restart_pd_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_restart_pd_req,
|
||||
service_path),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_restart_pd_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_restart_pd_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_state_updated_ind {
|
||||
enum servreg_service_state curr_state;
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
u16 transaction_id;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_state_updated_ind_ei[] = {
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_state_updated_ind,
|
||||
curr_state),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_state_updated_ind,
|
||||
service_path),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x03,
|
||||
.offset = offsetof(struct servreg_state_updated_ind,
|
||||
transaction_id),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_set_ack_req {
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
u16 transaction_id;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_set_ack_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_set_ack_req,
|
||||
service_path),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_set_ack_req,
|
||||
transaction_id),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_set_ack_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_set_ack_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_set_ack_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
#endif
|
|
@ -200,7 +200,7 @@ static irqreturn_t qmp_intr(int irq, void *data)
|
|||
{
|
||||
struct qmp *qmp = data;
|
||||
|
||||
wake_up_interruptible_all(&qmp->event);
|
||||
wake_up_all(&qmp->event);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -225,6 +225,7 @@ static bool qmp_message_empty(struct qmp *qmp)
|
|||
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
|
||||
{
|
||||
long time_left;
|
||||
size_t tlen;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(len + sizeof(u32) > qmp->size))
|
||||
|
@ -239,6 +240,9 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
|
|||
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
|
||||
data, len / sizeof(u32));
|
||||
writel(len, qmp->msgram + qmp->offset);
|
||||
|
||||
/* Read back len to confirm data written in message RAM */
|
||||
tlen = readl(qmp->msgram + qmp->offset);
|
||||
qmp_kick(qmp);
|
||||
|
||||
time_left = wait_event_interruptible_timeout(qmp->event,
|
||||
|
|
|
@ -110,5 +110,6 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
|
|||
int rpmh_rsc_invalidate(struct rsc_drv *drv);
|
||||
|
||||
void rpmh_tx_done(const struct tcs_request *msg, int r);
|
||||
int rpmh_flush(struct rpmh_ctrlr *ctrlr);
|
||||
|
||||
#endif /* __RPM_INTERNAL_H__ */
|
||||
|
|
|
@ -277,7 +277,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
|
|||
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
|
||||
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
|
||||
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
|
||||
trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
|
||||
trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
|
||||
}
|
||||
|
||||
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
|
||||
|
||||
#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
|
||||
#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
|
||||
struct rpmh_request name = { \
|
||||
.msg = { \
|
||||
.state = s, \
|
||||
|
@ -33,7 +33,7 @@
|
|||
}, \
|
||||
.cmd = { { 0 } }, \
|
||||
.completion = q, \
|
||||
.dev = dev, \
|
||||
.dev = device, \
|
||||
.needs_free = false, \
|
||||
}
|
||||
|
||||
|
@ -427,11 +427,10 @@ static int is_req_valid(struct cache_req *req)
|
|||
req->sleep_val != req->wake_val);
|
||||
}
|
||||
|
||||
static int send_single(const struct device *dev, enum rpmh_state state,
|
||||
static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
|
||||
u32 addr, u32 data)
|
||||
{
|
||||
DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
|
||||
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
||||
DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
|
||||
|
||||
/* Wake sets are always complete and sleep sets are not */
|
||||
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
|
||||
|
@ -445,7 +444,7 @@ static int send_single(const struct device *dev, enum rpmh_state state,
|
|||
/**
|
||||
* rpmh_flush: Flushes the buffered active and sleep sets to TCS
|
||||
*
|
||||
* @dev: The device making the request
|
||||
* @ctrlr: controller making request to flush cached data
|
||||
*
|
||||
* Return: -EBUSY if the controller is busy, probably waiting on a response
|
||||
* to a RPMH request sent earlier.
|
||||
|
@ -454,10 +453,9 @@ static int send_single(const struct device *dev, enum rpmh_state state,
|
|||
* that is powering down the entire system. Since no other RPMH API would be
|
||||
* executing at this time, it is safe to run lockless.
|
||||
*/
|
||||
int rpmh_flush(const struct device *dev)
|
||||
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
|
||||
{
|
||||
struct cache_req *p;
|
||||
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
||||
int ret;
|
||||
|
||||
if (!ctrlr->dirty) {
|
||||
|
@ -480,11 +478,12 @@ int rpmh_flush(const struct device *dev)
|
|||
__func__, p->addr, p->sleep_val, p->wake_val);
|
||||
continue;
|
||||
}
|
||||
ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
|
||||
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
|
||||
p->sleep_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
|
||||
p->addr, p->wake_val);
|
||||
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
|
||||
p->wake_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -493,7 +492,6 @@ int rpmh_flush(const struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rpmh_flush);
|
||||
|
||||
/**
|
||||
* rpmh_invalidate: Invalidate all sleep and active sets
|
||||
|
|
|
@ -277,7 +277,7 @@ static int show_image_##type(struct seq_file *seq, void *p) \
|
|||
{ \
|
||||
struct smem_image_version *image_version = seq->private; \
|
||||
seq_puts(seq, image_version->type); \
|
||||
seq_puts(seq, "\n"); \
|
||||
seq_putc(seq, '\n'); \
|
||||
return 0; \
|
||||
} \
|
||||
static int open_image_##type(struct inode *inode, struct file *file) \
|
||||
|
|
|
@ -85,6 +85,7 @@ struct apr_device {
|
|||
uint16_t domain_id;
|
||||
uint32_t version;
|
||||
char name[APR_NAME_SIZE];
|
||||
const char *service_path;
|
||||
spinlock_t lock;
|
||||
struct list_head node;
|
||||
};
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __QCOM_PDR_HELPER__
|
||||
#define __QCOM_PDR_HELPER__
|
||||
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
|
||||
#define SERVREG_NAME_LENGTH 64
|
||||
|
||||
struct pdr_service;
|
||||
struct pdr_handle;
|
||||
|
||||
enum servreg_service_state {
|
||||
SERVREG_LOCATOR_ERR = 0x1,
|
||||
SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF,
|
||||
SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF,
|
||||
SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF,
|
||||
SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF,
|
||||
};
|
||||
|
||||
struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
|
||||
char *service_path,
|
||||
void *priv), void *priv);
|
||||
struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
|
||||
const char *service_name,
|
||||
const char *service_path);
|
||||
int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds);
|
||||
void pdr_handle_release(struct pdr_handle *pdr);
|
||||
|
||||
#endif
|
|
@ -88,6 +88,7 @@ struct qmi_elem_info {
|
|||
#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5
|
||||
#define QMI_ERR_INVALID_ID_V01 41
|
||||
#define QMI_ERR_ENCODING_V01 58
|
||||
#define QMI_ERR_DISABLED_V01 69
|
||||
#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
|
||||
#define QMI_ERR_NOT_SUPPORTED_V01 94
|
||||
|
||||
|
|
|
@ -20,8 +20,6 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state,
|
|||
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
||||
const struct tcs_cmd *cmd, u32 *n);
|
||||
|
||||
int rpmh_flush(const struct device *dev);
|
||||
|
||||
int rpmh_invalidate(const struct device *dev);
|
||||
|
||||
#else
|
||||
|
@ -40,9 +38,6 @@ static inline int rpmh_write_batch(const struct device *dev,
|
|||
const struct tcs_cmd *cmd, u32 *n)
|
||||
{ return -ENODEV; }
|
||||
|
||||
static inline int rpmh_flush(const struct device *dev)
|
||||
{ return -ENODEV; }
|
||||
|
||||
static inline int rpmh_invalidate(const struct device *dev)
|
||||
{ return -ENODEV; }
|
||||
|
||||
|
|
Loading…
Reference in New Issue