2019-05-19 21:51:31 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2013-03-22 22:34:03 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __LINUX_HOST1X_H
|
|
|
|
#define __LINUX_HOST1X_H
|
|
|
|
|
2013-10-14 20:43:22 +08:00
|
|
|
#include <linux/device.h>
|
2013-09-24 22:30:32 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
2013-03-22 22:34:03 +08:00
|
|
|
enum host1x_class {
|
2013-09-24 19:59:01 +08:00
|
|
|
HOST1X_CLASS_HOST1X = 0x1,
|
|
|
|
HOST1X_CLASS_GR2D = 0x51,
|
|
|
|
HOST1X_CLASS_GR2D_SB = 0x52,
|
2016-12-14 19:16:13 +08:00
|
|
|
HOST1X_CLASS_VIC = 0x5D,
|
2013-02-28 15:08:01 +08:00
|
|
|
HOST1X_CLASS_GR3D = 0x60,
|
2013-03-22 22:34:03 +08:00
|
|
|
};
|
|
|
|
|
2013-09-24 21:35:40 +08:00
|
|
|
struct host1x_client;
|
2019-02-08 21:35:13 +08:00
|
|
|
struct iommu_group;
|
2013-09-24 21:35:40 +08:00
|
|
|
|
2017-04-10 18:27:01 +08:00
|
|
|
/**
|
|
|
|
* struct host1x_client_ops - host1x client operations
|
|
|
|
* @init: host1x client initialization code
|
|
|
|
* @exit: host1x client tear down code
|
|
|
|
*/
|
2013-09-24 21:35:40 +08:00
|
|
|
struct host1x_client_ops {
|
|
|
|
int (*init)(struct host1x_client *client);
|
|
|
|
int (*exit)(struct host1x_client *client);
|
|
|
|
};
|
|
|
|
|
2017-04-10 18:27:01 +08:00
|
|
|
/**
|
|
|
|
* struct host1x_client - host1x client structure
|
|
|
|
* @list: list node for the host1x client
|
|
|
|
* @parent: pointer to struct device representing the host1x controller
|
|
|
|
* @dev: pointer to struct device backing this host1x client
|
2019-02-08 21:35:13 +08:00
|
|
|
* @group: IOMMU group that this client is a member of
|
2017-04-10 18:27:01 +08:00
|
|
|
* @ops: host1x client operations
|
|
|
|
* @class: host1x class represented by this client
|
|
|
|
* @channel: host1x channel associated with this client
|
|
|
|
* @syncpts: array of syncpoints requested for this client
|
|
|
|
* @num_syncpts: number of syncpoints requested for this client
|
|
|
|
*/
|
2013-09-24 21:35:40 +08:00
|
|
|
struct host1x_client {
|
|
|
|
struct list_head list;
|
2013-10-14 20:43:22 +08:00
|
|
|
struct device *parent;
|
2013-09-24 21:35:40 +08:00
|
|
|
struct device *dev;
|
2019-02-08 21:35:13 +08:00
|
|
|
struct iommu_group *group;
|
2013-09-24 21:35:40 +08:00
|
|
|
|
|
|
|
const struct host1x_client_ops *ops;
|
|
|
|
|
|
|
|
enum host1x_class class;
|
|
|
|
struct host1x_channel *channel;
|
|
|
|
|
|
|
|
struct host1x_syncpt **syncpts;
|
|
|
|
unsigned int num_syncpts;
|
|
|
|
};
|
|
|
|
|
2013-09-24 22:30:32 +08:00
|
|
|
/*
|
|
|
|
* host1x buffer objects
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct host1x_bo;
|
|
|
|
struct sg_table;
|
|
|
|
|
|
|
|
struct host1x_bo_ops {
|
|
|
|
struct host1x_bo *(*get)(struct host1x_bo *bo);
|
|
|
|
void (*put)(struct host1x_bo *bo);
|
gpu: host1x: Overhaul host1x_bo_{pin,unpin}() API
The host1x_bo_pin() and host1x_bo_unpin() APIs are used to pin and unpin
buffers during host1x job submission. Pinning currently returns the SG
table and the DMA address (an IOVA if an IOMMU is used or a physical
address if no IOMMU is used) of the buffer. The DMA address is only used
for buffers that are relocated, whereas the host1x driver will map
gather buffers into its own IOVA space so that they can be processed by
the CDMA engine.
This approach has a couple of issues. On one hand it's not very useful
to return a DMA address for the buffer if host1x doesn't need it. On the
other hand, returning the SG table of the buffer is suboptimal because a
single SG table cannot be shared for multiple mappings, because the DMA
address is stored within the SG table, and the DMA address may be
different for different devices.
Subsequent patches will move the host1x driver over to the DMA API which
doesn't work with a single shared SG table. Fix this by returning a new
SG table each time a buffer is pinned. This allows the buffer to be
referenced by multiple jobs for different engines.
Change the prototypes of host1x_bo_pin() and host1x_bo_unpin() to take a
struct device *, specifying the device for which the buffer should be
pinned. This is required in order to be able to properly construct the
SG table. While at it, make host1x_bo_pin() return the SG table because
that allows us to return an ERR_PTR()-encoded error code if we need to,
or return NULL to signal that we don't need the SG table to be remapped
and can simply use the DMA address as-is. At the same time, returning
the DMA address is made optional because in the example of command
buffers, host1x doesn't need to know the DMA address since it will have
to create its own mapping anyway.
Signed-off-by: Thierry Reding <treding@nvidia.com>
2019-10-28 20:37:09 +08:00
|
|
|
struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
|
|
|
|
dma_addr_t *phys);
|
|
|
|
void (*unpin)(struct device *dev, struct sg_table *sgt);
|
2013-09-24 22:30:32 +08:00
|
|
|
void *(*mmap)(struct host1x_bo *bo);
|
|
|
|
void (*munmap)(struct host1x_bo *bo, void *addr);
|
|
|
|
void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
|
|
|
|
void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct host1x_bo {
|
|
|
|
const struct host1x_bo_ops *ops;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void host1x_bo_init(struct host1x_bo *bo,
|
|
|
|
const struct host1x_bo_ops *ops)
|
|
|
|
{
|
|
|
|
bo->ops = ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
|
|
|
|
{
|
|
|
|
return bo->ops->get(bo);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void host1x_bo_put(struct host1x_bo *bo)
|
|
|
|
{
|
|
|
|
bo->ops->put(bo);
|
|
|
|
}
|
|
|
|
|
gpu: host1x: Overhaul host1x_bo_{pin,unpin}() API
The host1x_bo_pin() and host1x_bo_unpin() APIs are used to pin and unpin
buffers during host1x job submission. Pinning currently returns the SG
table and the DMA address (an IOVA if an IOMMU is used or a physical
address if no IOMMU is used) of the buffer. The DMA address is only used
for buffers that are relocated, whereas the host1x driver will map
gather buffers into its own IOVA space so that they can be processed by
the CDMA engine.
This approach has a couple of issues. On one hand it's not very useful
to return a DMA address for the buffer if host1x doesn't need it. On the
other hand, returning the SG table of the buffer is suboptimal because a
single SG table cannot be shared for multiple mappings, because the DMA
address is stored within the SG table, and the DMA address may be
different for different devices.
Subsequent patches will move the host1x driver over to the DMA API which
doesn't work with a single shared SG table. Fix this by returning a new
SG table each time a buffer is pinned. This allows the buffer to be
referenced by multiple jobs for different engines.
Change the prototypes of host1x_bo_pin() and host1x_bo_unpin() to take a
struct device *, specifying the device for which the buffer should be
pinned. This is required in order to be able to properly construct the
SG table. While at it, make host1x_bo_pin() return the SG table because
that allows us to return an ERR_PTR()-encoded error code if we need to,
or return NULL to signal that we don't need the SG table to be remapped
and can simply use the DMA address as-is. At the same time, returning
the DMA address is made optional because in the example of command
buffers, host1x doesn't need to know the DMA address since it will have
to create its own mapping anyway.
Signed-off-by: Thierry Reding <treding@nvidia.com>
2019-10-28 20:37:09 +08:00
|
|
|
static inline struct sg_table *host1x_bo_pin(struct device *dev,
|
|
|
|
struct host1x_bo *bo,
|
|
|
|
dma_addr_t *phys)
|
2013-09-24 22:30:32 +08:00
|
|
|
{
|
gpu: host1x: Overhaul host1x_bo_{pin,unpin}() API
The host1x_bo_pin() and host1x_bo_unpin() APIs are used to pin and unpin
buffers during host1x job submission. Pinning currently returns the SG
table and the DMA address (an IOVA if an IOMMU is used or a physical
address if no IOMMU is used) of the buffer. The DMA address is only used
for buffers that are relocated, whereas the host1x driver will map
gather buffers into its own IOVA space so that they can be processed by
the CDMA engine.
This approach has a couple of issues. On one hand it's not very useful
to return a DMA address for the buffer if host1x doesn't need it. On the
other hand, returning the SG table of the buffer is suboptimal because a
single SG table cannot be shared for multiple mappings, because the DMA
address is stored within the SG table, and the DMA address may be
different for different devices.
Subsequent patches will move the host1x driver over to the DMA API which
doesn't work with a single shared SG table. Fix this by returning a new
SG table each time a buffer is pinned. This allows the buffer to be
referenced by multiple jobs for different engines.
Change the prototypes of host1x_bo_pin() and host1x_bo_unpin() to take a
struct device *, specifying the device for which the buffer should be
pinned. This is required in order to be able to properly construct the
SG table. While at it, make host1x_bo_pin() return the SG table because
that allows us to return an ERR_PTR()-encoded error code if we need to,
or return NULL to signal that we don't need the SG table to be remapped
and can simply use the DMA address as-is. At the same time, returning
the DMA address is made optional because in the example of command
buffers, host1x doesn't need to know the DMA address since it will have
to create its own mapping anyway.
Signed-off-by: Thierry Reding <treding@nvidia.com>
2019-10-28 20:37:09 +08:00
|
|
|
return bo->ops->pin(dev, bo, phys);
|
2013-09-24 22:30:32 +08:00
|
|
|
}
|
|
|
|
|
gpu: host1x: Overhaul host1x_bo_{pin,unpin}() API
The host1x_bo_pin() and host1x_bo_unpin() APIs are used to pin and unpin
buffers during host1x job submission. Pinning currently returns the SG
table and the DMA address (an IOVA if an IOMMU is used or a physical
address if no IOMMU is used) of the buffer. The DMA address is only used
for buffers that are relocated, whereas the host1x driver will map
gather buffers into its own IOVA space so that they can be processed by
the CDMA engine.
This approach has a couple of issues. On one hand it's not very useful
to return a DMA address for the buffer if host1x doesn't need it. On the
other hand, returning the SG table of the buffer is suboptimal because a
single SG table cannot be shared for multiple mappings, because the DMA
address is stored within the SG table, and the DMA address may be
different for different devices.
Subsequent patches will move the host1x driver over to the DMA API which
doesn't work with a single shared SG table. Fix this by returning a new
SG table each time a buffer is pinned. This allows the buffer to be
referenced by multiple jobs for different engines.
Change the prototypes of host1x_bo_pin() and host1x_bo_unpin() to take a
struct device *, specifying the device for which the buffer should be
pinned. This is required in order to be able to properly construct the
SG table. While at it, make host1x_bo_pin() return the SG table because
that allows us to return an ERR_PTR()-encoded error code if we need to,
or return NULL to signal that we don't need the SG table to be remapped
and can simply use the DMA address as-is. At the same time, returning
the DMA address is made optional because in the example of command
buffers, host1x doesn't need to know the DMA address since it will have
to create its own mapping anyway.
Signed-off-by: Thierry Reding <treding@nvidia.com>
2019-10-28 20:37:09 +08:00
|
|
|
static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
|
|
|
|
struct sg_table *sgt)
|
2013-09-24 22:30:32 +08:00
|
|
|
{
|
gpu: host1x: Overhaul host1x_bo_{pin,unpin}() API
The host1x_bo_pin() and host1x_bo_unpin() APIs are used to pin and unpin
buffers during host1x job submission. Pinning currently returns the SG
table and the DMA address (an IOVA if an IOMMU is used or a physical
address if no IOMMU is used) of the buffer. The DMA address is only used
for buffers that are relocated, whereas the host1x driver will map
gather buffers into its own IOVA space so that they can be processed by
the CDMA engine.
This approach has a couple of issues. On one hand it's not very useful
to return a DMA address for the buffer if host1x doesn't need it. On the
other hand, returning the SG table of the buffer is suboptimal because a
single SG table cannot be shared for multiple mappings, because the DMA
address is stored within the SG table, and the DMA address may be
different for different devices.
Subsequent patches will move the host1x driver over to the DMA API which
doesn't work with a single shared SG table. Fix this by returning a new
SG table each time a buffer is pinned. This allows the buffer to be
referenced by multiple jobs for different engines.
Change the prototypes of host1x_bo_pin() and host1x_bo_unpin() to take a
struct device *, specifying the device for which the buffer should be
pinned. This is required in order to be able to properly construct the
SG table. While at it, make host1x_bo_pin() return the SG table because
that allows us to return an ERR_PTR()-encoded error code if we need to,
or return NULL to signal that we don't need the SG table to be remapped
and can simply use the DMA address as-is. At the same time, returning
the DMA address is made optional because in the example of command
buffers, host1x doesn't need to know the DMA address since it will have
to create its own mapping anyway.
Signed-off-by: Thierry Reding <treding@nvidia.com>
2019-10-28 20:37:09 +08:00
|
|
|
bo->ops->unpin(dev, sgt);
|
2013-09-24 22:30:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
|
|
|
|
{
|
|
|
|
return bo->ops->mmap(bo);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
|
|
|
|
{
|
|
|
|
bo->ops->munmap(bo, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
|
|
|
|
{
|
|
|
|
return bo->ops->kmap(bo, pagenum);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void host1x_bo_kunmap(struct host1x_bo *bo,
|
|
|
|
unsigned int pagenum, void *addr)
|
|
|
|
{
|
|
|
|
bo->ops->kunmap(bo, pagenum, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* host1x syncpoints
|
|
|
|
*/
|
|
|
|
|
2013-10-14 20:21:52 +08:00
|
|
|
#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
|
2013-10-14 20:21:53 +08:00
|
|
|
#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
|
2013-10-14 20:21:52 +08:00
|
|
|
|
2013-10-14 20:21:53 +08:00
|
|
|
struct host1x_syncpt_base;
|
2013-09-24 22:30:32 +08:00
|
|
|
struct host1x_syncpt;
|
|
|
|
struct host1x;
|
|
|
|
|
|
|
|
struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
|
|
|
|
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
|
|
|
|
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
|
|
|
|
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
|
2015-01-28 21:29:02 +08:00
|
|
|
u32 host1x_syncpt_read(struct host1x_syncpt *sp);
|
2013-09-24 22:30:32 +08:00
|
|
|
int host1x_syncpt_incr(struct host1x_syncpt *sp);
|
2014-02-20 06:48:36 +08:00
|
|
|
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
|
2013-09-24 22:30:32 +08:00
|
|
|
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
|
|
|
|
u32 *value);
|
2017-08-30 18:48:31 +08:00
|
|
|
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
|
2013-10-14 20:21:52 +08:00
|
|
|
unsigned long flags);
|
2013-09-24 22:30:32 +08:00
|
|
|
void host1x_syncpt_free(struct host1x_syncpt *sp);
|
|
|
|
|
2013-10-14 20:21:53 +08:00
|
|
|
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
|
|
|
|
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
|
|
|
|
|
2013-09-24 22:30:32 +08:00
|
|
|
/*
|
|
|
|
* host1x channel
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct host1x_channel;
|
|
|
|
struct host1x_job;
|
|
|
|
|
2018-06-18 20:01:51 +08:00
|
|
|
struct host1x_channel *host1x_channel_request(struct host1x_client *client);
|
2013-09-24 22:30:32 +08:00
|
|
|
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
|
|
|
|
void host1x_channel_put(struct host1x_channel *channel);
|
|
|
|
int host1x_job_submit(struct host1x_job *job);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* host1x job
|
|
|
|
*/
|
|
|
|
|
2019-10-28 20:37:11 +08:00
|
|
|
#define HOST1X_RELOC_READ (1 << 0)
|
|
|
|
#define HOST1X_RELOC_WRITE (1 << 1)
|
|
|
|
|
2013-09-24 22:30:32 +08:00
|
|
|
struct host1x_reloc {
|
2014-06-10 16:25:00 +08:00
|
|
|
struct {
|
|
|
|
struct host1x_bo *bo;
|
|
|
|
unsigned long offset;
|
|
|
|
} cmdbuf;
|
|
|
|
struct {
|
|
|
|
struct host1x_bo *bo;
|
|
|
|
unsigned long offset;
|
|
|
|
} target;
|
|
|
|
unsigned long shift;
|
2019-10-28 20:37:11 +08:00
|
|
|
unsigned long flags;
|
2013-09-24 22:30:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct host1x_job {
|
|
|
|
/* When refcount goes to zero, job can be freed */
|
|
|
|
struct kref ref;
|
|
|
|
|
|
|
|
/* List entry */
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
/* Channel where job is submitted to */
|
|
|
|
struct host1x_channel *channel;
|
|
|
|
|
2018-05-16 20:12:33 +08:00
|
|
|
/* client where the job originated */
|
|
|
|
struct host1x_client *client;
|
2013-09-24 22:30:32 +08:00
|
|
|
|
|
|
|
/* Gathers and their memory */
|
|
|
|
struct host1x_job_gather *gathers;
|
|
|
|
unsigned int num_gathers;
|
|
|
|
|
|
|
|
/* Array of handles to be pinned & unpinned */
|
2018-05-16 22:58:44 +08:00
|
|
|
struct host1x_reloc *relocs;
|
2013-09-24 22:30:32 +08:00
|
|
|
unsigned int num_relocs;
|
|
|
|
struct host1x_job_unpin_data *unpins;
|
|
|
|
unsigned int num_unpins;
|
|
|
|
|
|
|
|
dma_addr_t *addr_phys;
|
|
|
|
dma_addr_t *gather_addr_phys;
|
|
|
|
dma_addr_t *reloc_addr_phys;
|
|
|
|
|
|
|
|
/* Sync point id, number of increments and end related to the submit */
|
|
|
|
u32 syncpt_id;
|
|
|
|
u32 syncpt_incrs;
|
|
|
|
u32 syncpt_end;
|
|
|
|
|
|
|
|
/* Maximum time to wait for this job */
|
|
|
|
unsigned int timeout;
|
|
|
|
|
|
|
|
/* Index and number of slots used in the push buffer */
|
|
|
|
unsigned int first_get;
|
|
|
|
unsigned int num_slots;
|
|
|
|
|
|
|
|
/* Copy of gathers */
|
|
|
|
size_t gather_copy_size;
|
|
|
|
dma_addr_t gather_copy;
|
|
|
|
u8 *gather_copy_mapped;
|
|
|
|
|
|
|
|
/* Check if register is marked as an address reg */
|
2017-06-15 07:18:38 +08:00
|
|
|
int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
|
2013-09-24 22:30:32 +08:00
|
|
|
|
2017-06-15 07:18:37 +08:00
|
|
|
/* Check if class belongs to the unit */
|
|
|
|
int (*is_valid_class)(u32 class);
|
|
|
|
|
2013-09-24 22:30:32 +08:00
|
|
|
/* Request a SETCLASS to this class */
|
|
|
|
u32 class;
|
|
|
|
|
|
|
|
/* Add a channel wait for previous ops to complete */
|
|
|
|
bool serialize;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
|
2018-05-05 14:45:47 +08:00
|
|
|
u32 num_cmdbufs, u32 num_relocs);
|
2018-05-16 23:01:43 +08:00
|
|
|
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
|
|
|
|
unsigned int words, unsigned int offset);
|
2013-09-24 22:30:32 +08:00
|
|
|
struct host1x_job *host1x_job_get(struct host1x_job *job);
|
|
|
|
void host1x_job_put(struct host1x_job *job);
|
|
|
|
int host1x_job_pin(struct host1x_job *job, struct device *dev);
|
|
|
|
void host1x_job_unpin(struct host1x_job *job);
|
|
|
|
|
2013-10-14 20:43:22 +08:00
|
|
|
/*
|
|
|
|
* subdevice probe infrastructure
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct host1x_device;
|
|
|
|
|
2017-04-10 18:27:01 +08:00
|
|
|
/**
|
|
|
|
* struct host1x_driver - host1x logical device driver
|
|
|
|
* @driver: core driver
|
|
|
|
* @subdevs: table of OF device IDs matching subdevices for this driver
|
|
|
|
* @list: list node for the driver
|
|
|
|
* @probe: called when the host1x logical device is probed
|
|
|
|
* @remove: called when the host1x logical device is removed
|
|
|
|
* @shutdown: called when the host1x logical device is shut down
|
|
|
|
*/
|
2013-10-14 20:43:22 +08:00
|
|
|
struct host1x_driver {
|
2014-12-18 22:29:14 +08:00
|
|
|
struct device_driver driver;
|
|
|
|
|
2013-10-14 20:43:22 +08:00
|
|
|
const struct of_device_id *subdevs;
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
int (*probe)(struct host1x_device *device);
|
|
|
|
int (*remove)(struct host1x_device *device);
|
2014-12-18 22:29:14 +08:00
|
|
|
void (*shutdown)(struct host1x_device *device);
|
2013-10-14 20:43:22 +08:00
|
|
|
};
|
|
|
|
|
2014-12-18 22:29:14 +08:00
|
|
|
static inline struct host1x_driver *
|
|
|
|
to_host1x_driver(struct device_driver *driver)
|
|
|
|
{
|
|
|
|
return container_of(driver, struct host1x_driver, driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
int host1x_driver_register_full(struct host1x_driver *driver,
|
|
|
|
struct module *owner);
|
2013-10-14 20:43:22 +08:00
|
|
|
void host1x_driver_unregister(struct host1x_driver *driver);
|
|
|
|
|
2014-12-18 22:29:14 +08:00
|
|
|
#define host1x_driver_register(driver) \
|
|
|
|
host1x_driver_register_full(driver, THIS_MODULE)
|
|
|
|
|
2013-10-14 20:43:22 +08:00
|
|
|
struct host1x_device {
|
|
|
|
struct host1x_driver *driver;
|
|
|
|
struct list_head list;
|
|
|
|
struct device dev;
|
|
|
|
|
|
|
|
struct mutex subdevs_lock;
|
|
|
|
struct list_head subdevs;
|
|
|
|
struct list_head active;
|
|
|
|
|
|
|
|
struct mutex clients_lock;
|
|
|
|
struct list_head clients;
|
2014-11-05 18:43:26 +08:00
|
|
|
|
2014-12-18 22:29:14 +08:00
|
|
|
bool registered;
|
2019-06-05 16:46:05 +08:00
|
|
|
|
|
|
|
struct device_dma_parameters dma_parms;
|
2013-10-14 20:43:22 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct host1x_device *to_host1x_device(struct device *dev)
|
|
|
|
{
|
|
|
|
return container_of(dev, struct host1x_device, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int host1x_device_init(struct host1x_device *device);
|
|
|
|
int host1x_device_exit(struct host1x_device *device);
|
|
|
|
|
|
|
|
int host1x_client_register(struct host1x_client *client);
|
|
|
|
int host1x_client_unregister(struct host1x_client *client);
|
|
|
|
|
2013-09-02 15:48:53 +08:00
|
|
|
struct tegra_mipi_device;
|
|
|
|
|
|
|
|
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
|
|
|
|
void tegra_mipi_free(struct tegra_mipi_device *device);
|
2016-08-12 22:00:53 +08:00
|
|
|
int tegra_mipi_enable(struct tegra_mipi_device *device);
|
|
|
|
int tegra_mipi_disable(struct tegra_mipi_device *device);
|
2013-09-02 15:48:53 +08:00
|
|
|
int tegra_mipi_calibrate(struct tegra_mipi_device *device);
|
|
|
|
|
2013-03-22 22:34:03 +08:00
|
|
|
#endif
|