- A significant number of panfrost fixes for runtime_pm, MMU and GEM support
- A fix for DCS transfers on mcde -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCXXIE6wAKCRDj7w1vZxhR xY6ZAP90yW17rih0qJ9zJPFQM7SyemSvjoaLwc9TEKoygWq/SgD/YjwLMd5iePvr x9iWQlQwQQiIoF7/J6MY+vnvr7zUTAI= =rU91 -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-fixes-2019-09-06' of git://anongit.freedesktop.org/drm/drm-misc into drm-next - A significant number of panfrost fixes for runtime_pm, MMU and GEM support - A fix for DCS transfers on mcde Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime.ripard@bootlin.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190906070500.dfxacpgxoxalcha3@flea
This commit is contained in:
commit
ad49e38eb2
|
@ -390,6 +390,12 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
|
|||
|
||||
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
|
||||
|
||||
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
|
||||
shmem->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
sg_free_table(shmem->sgt);
|
||||
kfree(shmem->sgt);
|
||||
shmem->sgt = NULL;
|
||||
|
||||
drm_gem_shmem_put_pages_locked(shmem);
|
||||
|
||||
shmem->madv = -1;
|
||||
|
@ -409,13 +415,16 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
|
||||
|
||||
void drm_gem_shmem_purge(struct drm_gem_object *obj)
|
||||
bool drm_gem_shmem_purge(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
mutex_lock(&shmem->pages_lock);
|
||||
if (!mutex_trylock(&shmem->pages_lock))
|
||||
return false;
|
||||
drm_gem_shmem_purge_locked(obj);
|
||||
mutex_unlock(&shmem->pages_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_shmem_purge);
|
||||
|
||||
|
|
|
@ -178,22 +178,26 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
const u32 loop_delay_us = 10; /* us */
|
||||
const u8 *tx = msg->tx_buf;
|
||||
u32 loop_counter;
|
||||
size_t txlen;
|
||||
size_t txlen = msg->tx_len;
|
||||
size_t rxlen = msg->rx_len;
|
||||
u32 val;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
txlen = msg->tx_len;
|
||||
if (txlen > 12) {
|
||||
if (txlen > 16) {
|
||||
dev_err(d->dev,
|
||||
"dunno how to write more than 12 bytes yet\n");
|
||||
"dunno how to write more than 16 bytes yet\n");
|
||||
return -EIO;
|
||||
}
|
||||
if (rxlen > 4) {
|
||||
dev_err(d->dev,
|
||||
"dunno how to read more than 4 bytes yet\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev_dbg(d->dev,
|
||||
"message to channel %d, %zd bytes",
|
||||
msg->channel,
|
||||
txlen);
|
||||
"message to channel %d, write %zd bytes read %zd bytes\n",
|
||||
msg->channel, txlen, rxlen);
|
||||
|
||||
/* Command "nature" */
|
||||
if (MCDE_DSI_HOST_IS_READ(msg->type))
|
||||
|
@ -210,9 +214,7 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
if (mipi_dsi_packet_format_is_long(msg->type))
|
||||
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT;
|
||||
val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
|
||||
/* Add one to the length for the MIPI DCS command */
|
||||
val |= txlen
|
||||
<< DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
|
||||
val |= txlen << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
|
||||
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
|
||||
val |= msg->type << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
|
||||
writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
|
||||
|
@ -249,17 +251,36 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
writel(1, d->regs + DSI_DIRECT_CMD_SEND);
|
||||
|
||||
loop_counter = 1000 * 1000 / loop_delay_us;
|
||||
while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
|
||||
DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
|
||||
&& --loop_counter)
|
||||
usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
|
||||
if (MCDE_DSI_HOST_IS_READ(msg->type)) {
|
||||
/* Read command */
|
||||
while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
|
||||
(DSI_DIRECT_CMD_STS_READ_COMPLETED |
|
||||
DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR))
|
||||
&& --loop_counter)
|
||||
usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
|
||||
if (!loop_counter) {
|
||||
dev_err(d->dev, "DSI read timeout!\n");
|
||||
return -ETIME;
|
||||
}
|
||||
} else {
|
||||
/* Writing only */
|
||||
while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
|
||||
DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
|
||||
&& --loop_counter)
|
||||
usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
|
||||
|
||||
if (!loop_counter) {
|
||||
dev_err(d->dev, "DSI write timeout!\n");
|
||||
return -ETIME;
|
||||
if (!loop_counter) {
|
||||
dev_err(d->dev, "DSI write timeout!\n");
|
||||
return -ETIME;
|
||||
}
|
||||
}
|
||||
|
||||
val = readl(d->regs + DSI_DIRECT_CMD_STS);
|
||||
if (val & DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR) {
|
||||
dev_err(d->dev, "read completed with error\n");
|
||||
writel(1, d->regs + DSI_DIRECT_CMD_RD_INIT);
|
||||
return -EIO;
|
||||
}
|
||||
if (val & DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED) {
|
||||
val >>= DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT;
|
||||
dev_err(d->dev, "error during transmission: %04x\n",
|
||||
|
@ -269,10 +290,7 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
|
||||
if (!MCDE_DSI_HOST_IS_READ(msg->type)) {
|
||||
/* Return number of bytes written */
|
||||
if (mipi_dsi_packet_format_is_long(msg->type))
|
||||
ret = 4 + txlen;
|
||||
else
|
||||
ret = 4;
|
||||
ret = txlen;
|
||||
} else {
|
||||
/* OK this is a read command, get the response */
|
||||
u32 rdsz;
|
||||
|
@ -282,7 +300,13 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
rdsz = readl(d->regs + DSI_DIRECT_CMD_RD_PROPERTY);
|
||||
rdsz &= DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK;
|
||||
rddat = readl(d->regs + DSI_DIRECT_CMD_RDDAT);
|
||||
for (i = 0; i < 4 && i < rdsz; i++)
|
||||
if (rdsz < rxlen) {
|
||||
dev_err(d->dev, "read error, requested %zd got %d\n",
|
||||
rxlen, rdsz);
|
||||
return -EIO;
|
||||
}
|
||||
/* FIXME: read more than 4 bytes */
|
||||
for (i = 0; i < 4 && i < rxlen; i++)
|
||||
rx[i] = (rddat >> (i * 8)) & 0xff;
|
||||
ret = rdsz;
|
||||
}
|
||||
|
|
|
@ -53,8 +53,10 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
|
|||
if (err) {
|
||||
dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
|
||||
err);
|
||||
regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
|
||||
pfdev->devfreq.cur_volt);
|
||||
if (pfdev->regulator)
|
||||
regulator_set_voltage(pfdev->regulator,
|
||||
pfdev->devfreq.cur_volt,
|
||||
pfdev->devfreq.cur_volt);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include "panfrost_device.h"
|
||||
|
@ -125,7 +124,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
|
|||
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
|
||||
INIT_LIST_HEAD(&pfdev->as_lru_list);
|
||||
|
||||
spin_lock_init(&pfdev->hwaccess_lock);
|
||||
spin_lock_init(&pfdev->as_lock);
|
||||
|
||||
err = panfrost_clk_init(pfdev);
|
||||
|
@ -166,14 +164,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
|
|||
if (err)
|
||||
goto err_out4;
|
||||
|
||||
/* runtime PM will wake us up later */
|
||||
panfrost_gpu_power_off(pfdev);
|
||||
|
||||
pm_runtime_set_active(pfdev->dev);
|
||||
pm_runtime_get_sync(pfdev->dev);
|
||||
pm_runtime_mark_last_busy(pfdev->dev);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
|
||||
err = panfrost_perfcnt_init(pfdev);
|
||||
if (err)
|
||||
goto err_out5;
|
||||
|
|
|
@ -63,8 +63,6 @@ struct panfrost_device {
|
|||
struct drm_device *ddev;
|
||||
struct platform_device *pdev;
|
||||
|
||||
spinlock_t hwaccess_lock;
|
||||
|
||||
void __iomem *iomem;
|
||||
struct clk *clock;
|
||||
struct clk *bus_clock;
|
||||
|
@ -104,7 +102,6 @@ struct panfrost_device {
|
|||
struct panfrost_mmu {
|
||||
struct io_pgtable_cfg pgtbl_cfg;
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
struct mutex lock;
|
||||
int as;
|
||||
atomic_t as_count;
|
||||
struct list_head list;
|
||||
|
|
|
@ -523,10 +523,6 @@ static int panfrost_probe(struct platform_device *pdev)
|
|||
mutex_init(&pfdev->shrinker_lock);
|
||||
INIT_LIST_HEAD(&pfdev->shrinker_list);
|
||||
|
||||
pm_runtime_use_autosuspend(pfdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
|
||||
pm_runtime_enable(pfdev->dev);
|
||||
|
||||
err = panfrost_device_init(pfdev);
|
||||
if (err) {
|
||||
if (err != -EPROBE_DEFER)
|
||||
|
@ -541,6 +537,12 @@ static int panfrost_probe(struct platform_device *pdev)
|
|||
goto err_out1;
|
||||
}
|
||||
|
||||
pm_runtime_set_active(pfdev->dev);
|
||||
pm_runtime_mark_last_busy(pfdev->dev);
|
||||
pm_runtime_enable(pfdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
|
||||
pm_runtime_use_autosuspend(pfdev->dev);
|
||||
|
||||
/*
|
||||
* Register the DRM device with the core and the connectors with
|
||||
* sysfs
|
||||
|
@ -570,11 +572,13 @@ static int panfrost_remove(struct platform_device *pdev)
|
|||
|
||||
drm_dev_unregister(ddev);
|
||||
panfrost_gem_shrinker_cleanup(ddev);
|
||||
|
||||
pm_runtime_get_sync(pfdev->dev);
|
||||
pm_runtime_put_sync_autosuspend(pfdev->dev);
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
panfrost_devfreq_fini(pfdev);
|
||||
panfrost_device_fini(pfdev);
|
||||
pm_runtime_put_sync_suspend(pfdev->dev);
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
|
||||
drm_dev_put(ddev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -36,15 +36,18 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
|
|||
return count;
|
||||
}
|
||||
|
||||
static void panfrost_gem_purge(struct drm_gem_object *obj)
|
||||
static bool panfrost_gem_purge(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
mutex_lock(&shmem->pages_lock);
|
||||
|
||||
if (!mutex_trylock(&shmem->pages_lock))
|
||||
return false;
|
||||
|
||||
panfrost_mmu_unmap(to_panfrost_bo(obj));
|
||||
drm_gem_shmem_purge_locked(obj);
|
||||
|
||||
mutex_unlock(&shmem->pages_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
|
@ -61,8 +64,8 @@ panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
|
||||
if (freed >= sc->nr_to_scan)
|
||||
break;
|
||||
if (drm_gem_shmem_is_purgeable(shmem)) {
|
||||
panfrost_gem_purge(&shmem->base);
|
||||
if (drm_gem_shmem_is_purgeable(shmem) &&
|
||||
panfrost_gem_purge(&shmem->base)) {
|
||||
freed += shmem->base.size >> PAGE_SHIFT;
|
||||
list_del_init(&shmem->madv_list);
|
||||
}
|
||||
|
|
|
@ -141,7 +141,6 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
|
|||
static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
|
||||
{
|
||||
struct panfrost_device *pfdev = job->pfdev;
|
||||
unsigned long flags;
|
||||
u32 cfg;
|
||||
u64 jc_head = job->jc;
|
||||
int ret;
|
||||
|
@ -150,13 +149,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
|
|||
if (ret < 0)
|
||||
return;
|
||||
|
||||
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
|
||||
goto end;
|
||||
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
|
||||
pm_runtime_put_sync_autosuspend(pfdev->dev);
|
||||
return;
|
||||
}
|
||||
|
||||
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
|
||||
|
||||
panfrost_devfreq_record_transition(pfdev, js);
|
||||
spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
|
||||
|
||||
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
|
||||
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
|
||||
|
@ -185,12 +185,6 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
|
|||
job, js, jc_head);
|
||||
|
||||
job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
|
||||
|
||||
spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
|
||||
|
||||
end:
|
||||
pm_runtime_mark_last_busy(pfdev->dev);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
}
|
||||
|
||||
static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
|
||||
|
@ -369,6 +363,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
|
|||
struct panfrost_job *job = to_panfrost_job(sched_job);
|
||||
struct panfrost_device *pfdev = job->pfdev;
|
||||
int js = panfrost_job_get_slot(job);
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -394,6 +389,15 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
|
|||
if (sched_job)
|
||||
drm_sched_increase_karma(sched_job);
|
||||
|
||||
spin_lock_irqsave(&pfdev->js->job_lock, flags);
|
||||
for (i = 0; i < NUM_JOB_SLOTS; i++) {
|
||||
if (pfdev->jobs[i]) {
|
||||
pm_runtime_put_noidle(pfdev->dev);
|
||||
pfdev->jobs[i] = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
|
||||
|
||||
/* panfrost_core_dump(pfdev); */
|
||||
|
||||
panfrost_devfreq_record_transition(pfdev, js);
|
||||
|
@ -450,12 +454,21 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
|
|||
}
|
||||
|
||||
if (status & JOB_INT_MASK_DONE(j)) {
|
||||
struct panfrost_job *job = pfdev->jobs[j];
|
||||
struct panfrost_job *job;
|
||||
|
||||
pfdev->jobs[j] = NULL;
|
||||
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
|
||||
panfrost_devfreq_record_transition(pfdev, j);
|
||||
dma_fence_signal(job->done_fence);
|
||||
spin_lock(&pfdev->js->job_lock);
|
||||
job = pfdev->jobs[j];
|
||||
/* Only NULL if job timeout occurred */
|
||||
if (job) {
|
||||
pfdev->jobs[j] = NULL;
|
||||
|
||||
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
|
||||
panfrost_devfreq_record_transition(pfdev, j);
|
||||
|
||||
dma_fence_signal_locked(job->done_fence);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
}
|
||||
spin_unlock(&pfdev->js->job_lock);
|
||||
}
|
||||
|
||||
status &= ~mask;
|
||||
|
|
|
@ -80,19 +80,11 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
|||
}
|
||||
|
||||
|
||||
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size, u32 op)
|
||||
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
|
||||
u64 iova, size_t size, u32 op)
|
||||
{
|
||||
int ret, as_nr;
|
||||
|
||||
spin_lock(&pfdev->as_lock);
|
||||
as_nr = mmu->as;
|
||||
|
||||
if (as_nr < 0) {
|
||||
spin_unlock(&pfdev->as_lock);
|
||||
if (as_nr < 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (op != AS_COMMAND_UNLOCK)
|
||||
lock_region(pfdev, as_nr, iova, size);
|
||||
|
@ -101,10 +93,18 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev,
|
|||
write_cmd(pfdev, as_nr, op);
|
||||
|
||||
/* Wait for the flush to complete */
|
||||
ret = wait_ready(pfdev, as_nr);
|
||||
return wait_ready(pfdev, as_nr);
|
||||
}
|
||||
|
||||
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size, u32 op)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&pfdev->as_lock);
|
||||
ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
|
||||
spin_unlock(&pfdev->as_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -115,6 +115,8 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
|||
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
|
||||
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
|
||||
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
||||
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
|
||||
|
||||
|
@ -127,8 +129,10 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
|||
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
|
||||
}
|
||||
|
||||
static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
|
||||
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
|
||||
{
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
||||
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
|
||||
|
||||
|
@ -220,6 +224,22 @@ static size_t get_pgsize(u64 addr, size_t size)
|
|||
return SZ_2M;
|
||||
}
|
||||
|
||||
void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size)
|
||||
{
|
||||
if (mmu->as < 0)
|
||||
return;
|
||||
|
||||
pm_runtime_get_noresume(pfdev->dev);
|
||||
|
||||
/* Flush the PTs only if we're already awake */
|
||||
if (pm_runtime_active(pfdev->dev))
|
||||
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
|
||||
|
||||
pm_runtime_put_sync_autosuspend(pfdev->dev);
|
||||
}
|
||||
|
||||
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
||||
u64 iova, int prot, struct sg_table *sgt)
|
||||
{
|
||||
|
@ -228,8 +248,6 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
|||
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
|
||||
u64 start_iova = iova;
|
||||
|
||||
mutex_lock(&mmu->lock);
|
||||
|
||||
for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
|
||||
unsigned long paddr = sg_dma_address(sgl);
|
||||
size_t len = sg_dma_len(sgl);
|
||||
|
@ -246,10 +264,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
|||
}
|
||||
}
|
||||
|
||||
mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova,
|
||||
AS_COMMAND_FLUSH_PT);
|
||||
|
||||
mutex_unlock(&mmu->lock);
|
||||
panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -259,7 +274,6 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
|
|||
struct drm_gem_object *obj = &bo->base.base;
|
||||
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
int prot = IOMMU_READ | IOMMU_WRITE;
|
||||
|
||||
if (WARN_ON(bo->is_mapped))
|
||||
|
@ -272,14 +286,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
|
|||
if (WARN_ON(IS_ERR(sgt)))
|
||||
return PTR_ERR(sgt);
|
||||
|
||||
ret = pm_runtime_get_sync(pfdev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
|
||||
|
||||
pm_runtime_mark_last_busy(pfdev->dev);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
bo->is_mapped = true;
|
||||
|
||||
return 0;
|
||||
|
@ -293,19 +300,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
|
|||
u64 iova = bo->node.start << PAGE_SHIFT;
|
||||
size_t len = bo->node.size << PAGE_SHIFT;
|
||||
size_t unmapped_len = 0;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!bo->is_mapped))
|
||||
return;
|
||||
|
||||
dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
|
||||
|
||||
ret = pm_runtime_get_sync(pfdev->dev);
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
mutex_lock(&bo->mmu->lock);
|
||||
|
||||
while (unmapped_len < len) {
|
||||
size_t unmapped_page;
|
||||
size_t pgsize = get_pgsize(iova, len - unmapped_len);
|
||||
|
@ -318,22 +318,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
|
|||
unmapped_len += pgsize;
|
||||
}
|
||||
|
||||
mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT,
|
||||
bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
|
||||
|
||||
mutex_unlock(&bo->mmu->lock);
|
||||
|
||||
pm_runtime_mark_last_busy(pfdev->dev);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
|
||||
bo->is_mapped = false;
|
||||
}
|
||||
|
||||
static void mmu_tlb_inv_context_s1(void *cookie)
|
||||
{
|
||||
struct panfrost_file_priv *priv = cookie;
|
||||
|
||||
mmu_hw_do_operation(priv->pfdev, &priv->mmu, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
||||
}
|
||||
{}
|
||||
|
||||
static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||
size_t granule, bool leaf, void *cookie)
|
||||
|
@ -356,7 +346,6 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
|
|||
struct panfrost_mmu *mmu = &priv->mmu;
|
||||
struct panfrost_device *pfdev = priv->pfdev;
|
||||
|
||||
mutex_init(&mmu->lock);
|
||||
INIT_LIST_HEAD(&mmu->list);
|
||||
mmu->as = -1;
|
||||
|
||||
|
@ -383,6 +372,11 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
|
|||
|
||||
spin_lock(&pfdev->as_lock);
|
||||
if (mmu->as >= 0) {
|
||||
pm_runtime_get_noresume(pfdev->dev);
|
||||
if (pm_runtime_active(pfdev->dev))
|
||||
panfrost_mmu_disable(pfdev, mmu->as);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
|
||||
clear_bit(mmu->as, &pfdev->as_alloc_mask);
|
||||
clear_bit(mmu->as, &pfdev->as_in_use_mask);
|
||||
list_del(&mmu->list);
|
||||
|
@ -627,5 +621,4 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
|
|||
void panfrost_mmu_fini(struct panfrost_device *pfdev)
|
||||
{
|
||||
mmu_write(pfdev, MMU_INT_MASK, 0);
|
||||
mmu_disable(pfdev, 0);
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
|
|||
}
|
||||
|
||||
void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
|
||||
void drm_gem_shmem_purge(struct drm_gem_object *obj);
|
||||
bool drm_gem_shmem_purge(struct drm_gem_object *obj);
|
||||
|
||||
struct drm_gem_shmem_object *
|
||||
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
|
||||
|
|
Loading…
Reference in New Issue