drm-misc-next for 6.2:
UAPI Changes: Cross-subsystem Changes: Core Changes: - connector: Send hotplug event on cleanup - edid: logging/debug improvements - plane_helper: Improve tests Driver Changes: - bridge: - it6505: Synchronization improvements - panel: - panel-edp: Add INX N116BGE-EA2 C2 and C4 support. - nouveau: Fix page-fault handling - vmwgfx: fb and cursor refactoring, convert to generic hashtable -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCY1o0WQAKCRDj7w1vZxhR xa0BAQD60nhbHrEyNW9VA6Ube55FoL8PlWkzyy4PKLRd+oNoUwEAljQKo89ljXSF HlcqzMSWzjJjbjxlXkDUtc12TM4MOAs= =YoOX -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-2022-10-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for 6.2: UAPI Changes: Cross-subsystem Changes: Core Changes: - connector: Send hotplug event on cleanup - edid: logging/debug improvements - plane_helper: Improve tests Driver Changes: - bridge: - it6505: Synchronization improvements - panel: - panel-edp: Add INX N116BGE-EA2 C2 and C4 support. - nouveau: Fix page-fault handling - vmwgfx: fb and cursor refactoring, convert to generic hashtable Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20221027073407.c2tlaczvzjrnzazi@houat
This commit is contained in:
commit
2b1966c65b
|
@ -651,17 +651,6 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
|
|||
|
||||
Contact: Harry Wentland, Alex Deucher
|
||||
|
||||
vmwgfx: Replace hashtable with Linux' implementation
|
||||
----------------------------------------------------
|
||||
|
||||
The vmwgfx driver uses its own hashtable implementation. Replace the
|
||||
code with Linux' implementation and update the callers. It's mostly a
|
||||
refactoring task, but the interfaces are different.
|
||||
|
||||
Contact: Zack Rusin, Thomas Zimmermann <tzimmermann@suse.de>
|
||||
|
||||
Level: Intermediate
|
||||
|
||||
Bootsplash
|
||||
==========
|
||||
|
||||
|
|
|
@ -6108,7 +6108,6 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
|
|||
aconnector->base.name);
|
||||
|
||||
aconnector->base.force = DRM_FORCE_OFF;
|
||||
aconnector->base.override_edid = false;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -6143,8 +6142,6 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
|
|||
link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
|
||||
}
|
||||
|
||||
|
||||
aconnector->base.override_edid = true;
|
||||
create_eml_sink(aconnector);
|
||||
}
|
||||
|
||||
|
|
|
@ -412,6 +412,7 @@ struct it6505 {
|
|||
* Mutex protects extcon and interrupt functions from interfering
|
||||
* each other.
|
||||
*/
|
||||
struct mutex irq_lock;
|
||||
struct mutex extcon_lock;
|
||||
struct mutex mode_lock; /* used to bridge_detect */
|
||||
struct mutex aux_lock; /* used to aux data transfers */
|
||||
|
@ -440,7 +441,7 @@ struct it6505 {
|
|||
enum hdcp_state hdcp_status;
|
||||
struct delayed_work hdcp_work;
|
||||
struct work_struct hdcp_wait_ksv_list;
|
||||
struct completion wait_edid_complete;
|
||||
struct completion extcon_completion;
|
||||
u8 auto_train_retry;
|
||||
bool hdcp_desired;
|
||||
bool is_repeater;
|
||||
|
@ -725,28 +726,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
|
|||
DRM_MODE_ARG(&it6505->video_info));
|
||||
}
|
||||
|
||||
static int it6505_drm_dp_link_probe(struct drm_dp_aux *aux,
|
||||
struct it6505_drm_dp_link *link)
|
||||
{
|
||||
u8 values[3];
|
||||
int err;
|
||||
|
||||
memset(link, 0, sizeof(*link));
|
||||
|
||||
err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
link->revision = values[0];
|
||||
link->rate = drm_dp_bw_code_to_link_rate(values[1]);
|
||||
link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
|
||||
|
||||
if (values[2] & DP_ENHANCED_FRAME_CAP)
|
||||
link->capabilities = DP_ENHANCED_FRAME_CAP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
|
||||
struct it6505_drm_dp_link *link,
|
||||
u8 mode)
|
||||
|
@ -1456,11 +1435,19 @@ static void it6505_parse_link_capabilities(struct it6505 *it6505)
|
|||
int bcaps;
|
||||
|
||||
if (it6505->dpcd[0] == 0) {
|
||||
it6505_aux_on(it6505);
|
||||
it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
|
||||
ARRAY_SIZE(it6505->dpcd));
|
||||
dev_err(dev, "DPCD is not initialized");
|
||||
return;
|
||||
}
|
||||
|
||||
memset(link, 0, sizeof(*link));
|
||||
|
||||
link->revision = it6505->dpcd[0];
|
||||
link->rate = drm_dp_bw_code_to_link_rate(it6505->dpcd[1]);
|
||||
link->num_lanes = it6505->dpcd[2] & DP_MAX_LANE_COUNT_MASK;
|
||||
|
||||
if (it6505->dpcd[2] & DP_ENHANCED_FRAME_CAP)
|
||||
link->capabilities = DP_ENHANCED_FRAME_CAP;
|
||||
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "DPCD Rev.: %d.%d",
|
||||
link->revision >> 4, link->revision & 0x0F);
|
||||
|
||||
|
@ -2323,19 +2310,32 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
|
|||
static void it6505_irq_hpd(struct it6505 *it6505)
|
||||
{
|
||||
struct device *dev = &it6505->client->dev;
|
||||
int dp_sink_count;
|
||||
|
||||
it6505->hpd_state = it6505_get_sink_hpd_status(it6505);
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "hpd change interrupt, change to %s",
|
||||
it6505->hpd_state ? "high" : "low");
|
||||
|
||||
if (it6505->bridge.dev)
|
||||
drm_helper_hpd_irq_event(it6505->bridge.dev);
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
|
||||
it6505->sink_count);
|
||||
|
||||
if (it6505->hpd_state) {
|
||||
wait_for_completion_timeout(&it6505->wait_edid_complete,
|
||||
msecs_to_jiffies(6000));
|
||||
wait_for_completion_timeout(&it6505->extcon_completion,
|
||||
msecs_to_jiffies(1000));
|
||||
it6505_aux_on(it6505);
|
||||
if (it6505->dpcd[0] == 0) {
|
||||
it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
|
||||
ARRAY_SIZE(it6505->dpcd));
|
||||
it6505_variable_config(it6505);
|
||||
it6505_parse_link_capabilities(it6505);
|
||||
}
|
||||
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
|
||||
|
||||
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
|
||||
DP_SET_POWER_D0);
|
||||
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
|
||||
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
|
||||
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
|
||||
it6505->sink_count);
|
||||
|
||||
it6505_lane_termination_on(it6505);
|
||||
it6505_lane_power_on(it6505);
|
||||
|
||||
|
@ -2363,6 +2363,9 @@ static void it6505_irq_hpd(struct it6505 *it6505)
|
|||
it6505_lane_off(it6505);
|
||||
it6505_link_reset_step_train(it6505);
|
||||
}
|
||||
|
||||
if (it6505->bridge.dev)
|
||||
drm_helper_hpd_irq_event(it6505->bridge.dev);
|
||||
}
|
||||
|
||||
static void it6505_irq_hpd_irq(struct it6505 *it6505)
|
||||
|
@ -2491,8 +2494,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
|
|||
};
|
||||
int int_status[3], i;
|
||||
|
||||
msleep(100);
|
||||
mutex_lock(&it6505->extcon_lock);
|
||||
mutex_lock(&it6505->irq_lock);
|
||||
|
||||
if (it6505->enable_drv_hold || !it6505->powered)
|
||||
goto unlock;
|
||||
|
@ -2522,7 +2524,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
|
|||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&it6505->extcon_lock);
|
||||
mutex_unlock(&it6505->irq_lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -2625,26 +2627,14 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
|
|||
goto unlock;
|
||||
|
||||
if (it6505->enable_drv_hold) {
|
||||
status = it6505_get_sink_hpd_status(it6505) ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
status = it6505->hpd_state ? connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (it6505_get_sink_hpd_status(it6505)) {
|
||||
it6505_aux_on(it6505);
|
||||
it6505_drm_dp_link_probe(&it6505->aux, &it6505->link);
|
||||
if (it6505->hpd_state) {
|
||||
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
|
||||
DP_SET_POWER_D0);
|
||||
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
|
||||
|
||||
if (it6505->dpcd[0] == 0) {
|
||||
it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
|
||||
ARRAY_SIZE(it6505->dpcd));
|
||||
it6505_variable_config(it6505);
|
||||
it6505_parse_link_capabilities(it6505);
|
||||
}
|
||||
|
||||
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
|
||||
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
|
||||
|
@ -2711,9 +2701,12 @@ static void it6505_extcon_work(struct work_struct *work)
|
|||
*/
|
||||
if (ret)
|
||||
it6505_poweron(it6505);
|
||||
|
||||
complete_all(&it6505->extcon_completion);
|
||||
} else {
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "start to power off");
|
||||
pm_runtime_put_sync(dev);
|
||||
reinit_completion(&it6505->extcon_completion);
|
||||
|
||||
drm_helper_hpd_irq_event(it6505->bridge.dev);
|
||||
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
|
||||
|
@ -2871,10 +2864,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
/* Register aux channel */
|
||||
it6505->aux.name = "DP-AUX";
|
||||
it6505->aux.dev = dev;
|
||||
it6505->aux.drm_dev = bridge->dev;
|
||||
it6505->aux.transfer = it6505_aux_transfer;
|
||||
|
||||
ret = drm_dp_aux_register(&it6505->aux);
|
||||
|
||||
|
@ -3287,6 +3277,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
|
|||
if (!it6505)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&it6505->irq_lock);
|
||||
mutex_init(&it6505->extcon_lock);
|
||||
mutex_init(&it6505->mode_lock);
|
||||
mutex_init(&it6505->aux_lock);
|
||||
|
@ -3342,7 +3333,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
|
|||
INIT_WORK(&it6505->link_works, it6505_link_training_work);
|
||||
INIT_WORK(&it6505->hdcp_wait_ksv_list, it6505_hdcp_wait_ksv_list);
|
||||
INIT_DELAYED_WORK(&it6505->hdcp_work, it6505_hdcp_work);
|
||||
init_completion(&it6505->wait_edid_complete);
|
||||
init_completion(&it6505->extcon_completion);
|
||||
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
|
||||
it6505->powered = false;
|
||||
it6505->enable_drv_hold = DEFAULT_DRV_HOLD;
|
||||
|
@ -3354,6 +3345,11 @@ static int it6505_i2c_probe(struct i2c_client *client,
|
|||
debugfs_init(it6505);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
it6505->aux.name = "DP-AUX";
|
||||
it6505->aux.dev = dev;
|
||||
it6505->aux.transfer = it6505_aux_transfer;
|
||||
drm_dp_aux_init(&it6505->aux);
|
||||
|
||||
it6505->bridge.funcs = &it6505_bridge_funcs;
|
||||
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
|
||||
it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
|
||||
|
|
|
@ -235,7 +235,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
|
|||
{
|
||||
struct drm_device *dev = buffer->client->dev;
|
||||
|
||||
drm_gem_vunmap(buffer->gem, &buffer->map);
|
||||
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
|
||||
|
||||
if (buffer->gem)
|
||||
drm_gem_object_put(buffer->gem);
|
||||
|
|
|
@ -274,6 +274,7 @@ static int __drm_connector_init(struct drm_device *dev,
|
|||
INIT_LIST_HEAD(&connector->probed_modes);
|
||||
INIT_LIST_HEAD(&connector->modes);
|
||||
mutex_init(&connector->mutex);
|
||||
mutex_init(&connector->edid_override_mutex);
|
||||
connector->edid_blob_ptr = NULL;
|
||||
connector->epoch_counter = 0;
|
||||
connector->tile_blob_ptr = NULL;
|
||||
|
@ -582,6 +583,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
|
|||
mutex_destroy(&connector->mutex);
|
||||
|
||||
memset(connector, 0, sizeof(*connector));
|
||||
|
||||
if (dev->registered)
|
||||
drm_sysfs_hotplug_event(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_connector_cleanup);
|
||||
|
||||
|
|
|
@ -56,9 +56,10 @@ struct drm_plane;
|
|||
struct drm_plane_state;
|
||||
struct drm_property;
|
||||
struct edid;
|
||||
struct kref;
|
||||
struct work_struct;
|
||||
struct fwnode_handle;
|
||||
struct kref;
|
||||
struct seq_file;
|
||||
struct work_struct;
|
||||
|
||||
/* drm_crtc.c */
|
||||
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
|
||||
|
@ -286,5 +287,17 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
|||
|
||||
/* drm_edid.c */
|
||||
void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
|
||||
int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m);
|
||||
int drm_edid_override_set(struct drm_connector *connector, const void *edid, size_t size);
|
||||
int drm_edid_override_reset(struct drm_connector *connector);
|
||||
|
||||
/* drm_edid_load.c */
|
||||
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
|
||||
const struct drm_edid *drm_edid_load_firmware(struct drm_connector *connector);
|
||||
#else
|
||||
static inline const struct drm_edid *
|
||||
drm_edid_load_firmware(struct drm_connector *connector)
|
||||
{
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -328,13 +328,7 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
|
|||
|
||||
static int edid_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct drm_property_blob *edid = connector->edid_blob_ptr;
|
||||
|
||||
if (connector->override_edid && edid)
|
||||
seq_write(m, edid->data, edid->length);
|
||||
|
||||
return 0;
|
||||
return drm_edid_override_show(m->private, m);
|
||||
}
|
||||
|
||||
static int edid_open(struct inode *inode, struct file *file)
|
||||
|
|
|
@ -1613,7 +1613,8 @@ static const void *edid_extension_block_data(const struct edid *edid, int index)
|
|||
return edid_block_data(edid, index + 1);
|
||||
}
|
||||
|
||||
static int drm_edid_block_count(const struct drm_edid *drm_edid)
|
||||
/* EDID block count indicated in EDID, may exceed allocated size */
|
||||
static int __drm_edid_block_count(const struct drm_edid *drm_edid)
|
||||
{
|
||||
int num_blocks;
|
||||
|
||||
|
@ -1633,12 +1634,18 @@ static int drm_edid_block_count(const struct drm_edid *drm_edid)
|
|||
num_blocks = eeodb;
|
||||
}
|
||||
|
||||
/* Limit by allocated size */
|
||||
num_blocks = min(num_blocks, (int)drm_edid->size / EDID_LENGTH);
|
||||
|
||||
return num_blocks;
|
||||
}
|
||||
|
||||
/* EDID block count, limited by allocated size */
|
||||
static int drm_edid_block_count(const struct drm_edid *drm_edid)
|
||||
{
|
||||
/* Limit by allocated size */
|
||||
return min(__drm_edid_block_count(drm_edid),
|
||||
(int)drm_edid->size / EDID_LENGTH);
|
||||
}
|
||||
|
||||
/* EDID extension block count, limited by allocated size */
|
||||
static int drm_edid_extension_block_count(const struct drm_edid *drm_edid)
|
||||
{
|
||||
return drm_edid_block_count(drm_edid) - 1;
|
||||
|
@ -1972,7 +1979,7 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
|
|||
|
||||
status = edid_block_check(block, is_base_block);
|
||||
if (status == EDID_BLOCK_HEADER_REPAIR) {
|
||||
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
|
||||
DRM_DEBUG_KMS("Fixing EDID header, your hardware may be failing\n");
|
||||
edid_header_fix(block);
|
||||
|
||||
/* Retry with fixed header, update status if that worked. */
|
||||
|
@ -2033,6 +2040,36 @@ bool drm_edid_is_valid(struct edid *edid)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_edid_is_valid);
|
||||
|
||||
/**
|
||||
* drm_edid_valid - sanity check EDID data
|
||||
* @drm_edid: EDID data
|
||||
*
|
||||
* Sanity check an EDID. Cross check block count against allocated size and
|
||||
* checksum the blocks.
|
||||
*
|
||||
* Return: True if the EDID data is valid, false otherwise.
|
||||
*/
|
||||
bool drm_edid_valid(const struct drm_edid *drm_edid)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!drm_edid)
|
||||
return false;
|
||||
|
||||
if (edid_size_by_blocks(__drm_edid_block_count(drm_edid)) != drm_edid->size)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < drm_edid_block_count(drm_edid); i++) {
|
||||
const void *block = drm_edid_block_data(drm_edid, i);
|
||||
|
||||
if (!edid_block_valid(block, i == 0))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_edid_valid);
|
||||
|
||||
static struct edid *edid_filter_invalid_blocks(struct edid *edid,
|
||||
size_t *alloc_size)
|
||||
{
|
||||
|
@ -2159,58 +2196,91 @@ static void connector_bad_edid(struct drm_connector *connector,
|
|||
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
|
||||
return;
|
||||
|
||||
drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID is invalid:\n",
|
||||
connector->base.id, connector->name);
|
||||
for (i = 0; i < num_blocks; i++)
|
||||
edid_block_dump(KERN_DEBUG, edid + i, i);
|
||||
}
|
||||
|
||||
/* Get override or firmware EDID */
|
||||
static struct edid *drm_get_override_edid(struct drm_connector *connector,
|
||||
size_t *alloc_size)
|
||||
static const struct drm_edid *drm_edid_override_get(struct drm_connector *connector)
|
||||
{
|
||||
struct edid *override = NULL;
|
||||
const struct drm_edid *override = NULL;
|
||||
|
||||
if (connector->override_edid)
|
||||
override = drm_edid_duplicate(connector->edid_blob_ptr->data);
|
||||
mutex_lock(&connector->edid_override_mutex);
|
||||
|
||||
if (connector->edid_override)
|
||||
override = drm_edid_dup(connector->edid_override);
|
||||
|
||||
mutex_unlock(&connector->edid_override_mutex);
|
||||
|
||||
if (!override)
|
||||
override = drm_load_edid_firmware(connector);
|
||||
|
||||
/* FIXME: Get alloc size from deeper down the stack */
|
||||
if (!IS_ERR_OR_NULL(override) && alloc_size)
|
||||
*alloc_size = edid_size(override);
|
||||
override = drm_edid_load_firmware(connector);
|
||||
|
||||
return IS_ERR(override) ? NULL : override;
|
||||
}
|
||||
|
||||
/* For debugfs edid_override implementation */
|
||||
int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m)
|
||||
{
|
||||
const struct drm_edid *drm_edid;
|
||||
|
||||
mutex_lock(&connector->edid_override_mutex);
|
||||
|
||||
drm_edid = connector->edid_override;
|
||||
if (drm_edid)
|
||||
seq_write(m, drm_edid->edid, drm_edid->size);
|
||||
|
||||
mutex_unlock(&connector->edid_override_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* For debugfs edid_override implementation */
|
||||
int drm_edid_override_set(struct drm_connector *connector, const void *edid,
|
||||
size_t size)
|
||||
{
|
||||
int ret;
|
||||
const struct drm_edid *drm_edid;
|
||||
|
||||
if (size < EDID_LENGTH || edid_size(edid) > size)
|
||||
drm_edid = drm_edid_alloc(edid, size);
|
||||
if (!drm_edid_valid(drm_edid)) {
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override invalid\n",
|
||||
connector->base.id, connector->name);
|
||||
drm_edid_free(drm_edid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
connector->override_edid = false;
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override set\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
ret = drm_connector_update_edid_property(connector, edid);
|
||||
if (!ret)
|
||||
connector->override_edid = true;
|
||||
mutex_lock(&connector->edid_override_mutex);
|
||||
|
||||
return ret;
|
||||
drm_edid_free(connector->edid_override);
|
||||
connector->edid_override = drm_edid;
|
||||
|
||||
mutex_unlock(&connector->edid_override_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* For debugfs edid_override implementation */
|
||||
int drm_edid_override_reset(struct drm_connector *connector)
|
||||
{
|
||||
connector->override_edid = false;
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override reset\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
return drm_connector_update_edid_property(connector, NULL);
|
||||
mutex_lock(&connector->edid_override_mutex);
|
||||
|
||||
drm_edid_free(connector->edid_override);
|
||||
connector->edid_override = NULL;
|
||||
|
||||
mutex_unlock(&connector->edid_override_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_add_override_edid_modes - add modes from override/firmware EDID
|
||||
* drm_edid_override_connector_update - add modes from override/firmware EDID
|
||||
* @connector: connector we're probing
|
||||
*
|
||||
* Add modes from the override/firmware EDID, if available. Only to be used from
|
||||
|
@ -2220,24 +2290,25 @@ int drm_edid_override_reset(struct drm_connector *connector)
|
|||
*
|
||||
* Return: The number of modes added or 0 if we couldn't find any.
|
||||
*/
|
||||
int drm_add_override_edid_modes(struct drm_connector *connector)
|
||||
int drm_edid_override_connector_update(struct drm_connector *connector)
|
||||
{
|
||||
struct edid *override;
|
||||
const struct drm_edid *override;
|
||||
int num_modes = 0;
|
||||
|
||||
override = drm_get_override_edid(connector, NULL);
|
||||
override = drm_edid_override_get(connector);
|
||||
if (override) {
|
||||
drm_connector_update_edid_property(connector, override);
|
||||
num_modes = drm_add_edid_modes(connector, override);
|
||||
kfree(override);
|
||||
num_modes = drm_edid_connector_update(connector, override);
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
|
||||
connector->base.id, connector->name, num_modes);
|
||||
drm_edid_free(override);
|
||||
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
|
||||
connector->base.id, connector->name, num_modes);
|
||||
}
|
||||
|
||||
return num_modes;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_add_override_edid_modes);
|
||||
EXPORT_SYMBOL(drm_edid_override_connector_update);
|
||||
|
||||
typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len);
|
||||
|
||||
|
@ -2280,12 +2351,19 @@ static struct edid *_drm_do_get_edid(struct drm_connector *connector,
|
|||
{
|
||||
enum edid_block_status status;
|
||||
int i, num_blocks, invalid_blocks = 0;
|
||||
const struct drm_edid *override;
|
||||
struct edid *edid, *new;
|
||||
size_t alloc_size = EDID_LENGTH;
|
||||
|
||||
edid = drm_get_override_edid(connector, &alloc_size);
|
||||
if (edid)
|
||||
override = drm_edid_override_get(connector);
|
||||
if (override) {
|
||||
alloc_size = override->size;
|
||||
edid = kmemdup(override->edid, alloc_size, GFP_KERNEL);
|
||||
drm_edid_free(override);
|
||||
if (!edid)
|
||||
return NULL;
|
||||
goto ok;
|
||||
}
|
||||
|
||||
edid = kmalloc(alloc_size, GFP_KERNEL);
|
||||
if (!edid)
|
||||
|
@ -2388,7 +2466,7 @@ fail:
|
|||
* adapter and use drm_get_edid() instead of abusing this function.
|
||||
*
|
||||
* The EDID may be overridden using debugfs override_edid or firmware EDID
|
||||
* (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* order. Having either of them bypasses actual EDID reads.
|
||||
*
|
||||
* Return: Pointer to valid EDID or NULL if we couldn't find any.
|
||||
|
@ -2566,7 +2644,7 @@ EXPORT_SYMBOL(drm_get_edid);
|
|||
* this function.
|
||||
*
|
||||
* The EDID may be overridden using debugfs override_edid or firmware EDID
|
||||
* (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* order. Having either of them bypasses actual EDID reads.
|
||||
*
|
||||
* The returned pointer must be freed using drm_edid_free().
|
||||
|
@ -2604,7 +2682,7 @@ EXPORT_SYMBOL(drm_edid_read_custom);
|
|||
* Read EDID using the given I2C adapter.
|
||||
*
|
||||
* The EDID may be overridden using debugfs override_edid or firmware EDID
|
||||
* (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* order. Having either of them bypasses actual EDID reads.
|
||||
*
|
||||
* Prefer initializing connector->ddc with drm_connector_init_with_ddc() and
|
||||
|
@ -2640,7 +2718,7 @@ EXPORT_SYMBOL(drm_edid_read_ddc);
|
|||
* Read EDID using the connector's I2C adapter.
|
||||
*
|
||||
* The EDID may be overridden using debugfs override_edid or firmware EDID
|
||||
* (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
|
||||
* order. Having either of them bypasses actual EDID reads.
|
||||
*
|
||||
* The returned pointer must be freed using drm_edid_free().
|
||||
|
@ -3311,11 +3389,12 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
|
|||
* timing block contains enough info for us to create and return a new struct
|
||||
* drm_display_mode.
|
||||
*/
|
||||
static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
||||
static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connector,
|
||||
const struct drm_edid *drm_edid,
|
||||
const struct detailed_timing *timing,
|
||||
u32 quirks)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *mode;
|
||||
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
|
||||
unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
|
||||
|
@ -3332,17 +3411,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
|||
return NULL;
|
||||
|
||||
if (pt->misc & DRM_EDID_PT_STEREO) {
|
||||
DRM_DEBUG_KMS("stereo mode not supported\n");
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Stereo mode not supported\n",
|
||||
connector->base.id, connector->name);
|
||||
return NULL;
|
||||
}
|
||||
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
|
||||
DRM_DEBUG_KMS("composite sync not supported\n");
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
|
||||
/* it is incorrect if hsync/vsync width is zero */
|
||||
if (!hsync_pulse_width || !vsync_pulse_width) {
|
||||
DRM_DEBUG_KMS("Incorrect Detailed timing. "
|
||||
"Wrong Hsync/Vsync pulse width\n");
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Incorrect Detailed timing. Wrong Hsync/Vsync pulse width\n",
|
||||
connector->base.id, connector->name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -3899,7 +3980,8 @@ add_cvt_modes(struct drm_connector *connector, const struct drm_edid *drm_edid)
|
|||
return closure.modes;
|
||||
}
|
||||
|
||||
static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode);
|
||||
static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
static void
|
||||
do_detailed_mode(const struct detailed_timing *timing, void *c)
|
||||
|
@ -3910,7 +3992,7 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
|
|||
if (!is_detailed_timing_descriptor(timing))
|
||||
return;
|
||||
|
||||
newmode = drm_mode_detailed(closure->connector->dev,
|
||||
newmode = drm_mode_detailed(closure->connector,
|
||||
closure->drm_edid, timing,
|
||||
closure->quirks);
|
||||
if (!newmode)
|
||||
|
@ -3924,7 +4006,7 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
|
|||
* so fix up anything that looks like CEA/HDMI mode, but the clock
|
||||
* is just slightly off.
|
||||
*/
|
||||
fixup_detailed_cea_mode_clock(newmode);
|
||||
fixup_detailed_cea_mode_clock(closure->connector, newmode);
|
||||
|
||||
drm_mode_probed_add(closure->connector, newmode);
|
||||
closure->modes++;
|
||||
|
@ -4586,7 +4668,8 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
|
|||
struct drm_display_mode *newmode;
|
||||
|
||||
if (!drm_valid_hdmi_vic(vic)) {
|
||||
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
|
||||
drm_err(connector->dev, "[CONNECTOR:%d:%s] Unknown HDMI VIC: %d\n",
|
||||
connector->base.id, connector->name, vic);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5193,7 +5276,8 @@ static int add_cea_modes(struct drm_connector *connector,
|
|||
return modes;
|
||||
}
|
||||
|
||||
static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
|
||||
static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
const struct drm_display_mode *cea_mode;
|
||||
int clock1, clock2, clock;
|
||||
|
@ -5231,8 +5315,10 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
|
|||
if (mode->clock == clock)
|
||||
return;
|
||||
|
||||
DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
|
||||
type, vic, mode->clock, clock);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
|
||||
connector->base.id, connector->name,
|
||||
type, vic, mode->clock, clock);
|
||||
mode->clock = clock;
|
||||
}
|
||||
|
||||
|
@ -5340,15 +5426,12 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
|
|||
if (len >= 12)
|
||||
connector->audio_latency[1] = db[12];
|
||||
|
||||
DRM_DEBUG_KMS("HDMI: latency present %d %d, "
|
||||
"video latency %d %d, "
|
||||
"audio latency %d %d\n",
|
||||
connector->latency_present[0],
|
||||
connector->latency_present[1],
|
||||
connector->video_latency[0],
|
||||
connector->video_latency[1],
|
||||
connector->audio_latency[0],
|
||||
connector->audio_latency[1]);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] HDMI: latency present %d %d, video latency %d %d, audio latency %d %d\n",
|
||||
connector->base.id, connector->name,
|
||||
connector->latency_present[0], connector->latency_present[1],
|
||||
connector->video_latency[0], connector->video_latency[1],
|
||||
connector->audio_latency[0], connector->audio_latency[1]);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -5446,7 +5529,9 @@ static void drm_edid_to_eld(struct drm_connector *connector,
|
|||
return;
|
||||
|
||||
mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
|
||||
DRM_DEBUG_KMS("ELD monitor %s\n", &eld[DRM_ELD_MONITOR_NAME_STRING]);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
|
||||
connector->base.id, connector->name,
|
||||
&eld[DRM_ELD_MONITOR_NAME_STRING]);
|
||||
|
||||
eld[DRM_ELD_CEA_EDID_VER_MNL] = info->cea_rev << DRM_ELD_CEA_EDID_VER_SHIFT;
|
||||
eld[DRM_ELD_CEA_EDID_VER_MNL] |= mnl;
|
||||
|
@ -5500,8 +5585,9 @@ static void drm_edid_to_eld(struct drm_connector *connector,
|
|||
eld[DRM_ELD_BASELINE_ELD_LEN] =
|
||||
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
|
||||
|
||||
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
|
||||
drm_eld_size(eld), total_sad_count);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
|
||||
connector->base.id, connector->name,
|
||||
drm_eld_size(eld), total_sad_count);
|
||||
}
|
||||
|
||||
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
|
||||
|
@ -5772,7 +5858,8 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
|
|||
{
|
||||
struct drm_display_info *info = &connector->display_info;
|
||||
|
||||
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] CEA VCDB 0x%02x\n",
|
||||
connector->base.id, connector->name, db[2]);
|
||||
|
||||
if (db[2] & EDID_CEA_VCDB_QS)
|
||||
info->rgb_quant_range_selectable = true;
|
||||
|
@ -5955,7 +6042,8 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
|
|||
}
|
||||
|
||||
drm_dbg_kms(connector->dev,
|
||||
"HF-VSDB: max TMDS clock: %d KHz, HDMI 2.1 support: %s, DSC 1.2 support: %s\n",
|
||||
"[CONNECTOR:%d:%s] HF-VSDB: max TMDS clock: %d KHz, HDMI 2.1 support: %s, DSC 1.2 support: %s\n",
|
||||
connector->base.id, connector->name,
|
||||
max_tmds_clock, str_yes_no(max_frl_rate), str_yes_no(dsc_support));
|
||||
}
|
||||
|
||||
|
@ -5974,39 +6062,39 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
|
|||
if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
|
||||
dc_bpc = 10;
|
||||
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_30;
|
||||
DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
|
||||
connector->name);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 30.\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
|
||||
if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
|
||||
dc_bpc = 12;
|
||||
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_36;
|
||||
DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
|
||||
connector->name);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 36.\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
|
||||
if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
|
||||
dc_bpc = 16;
|
||||
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_48;
|
||||
DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
|
||||
connector->name);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 48.\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
|
||||
if (dc_bpc == 0) {
|
||||
DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
|
||||
connector->name);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] No deep color support on this HDMI sink.\n",
|
||||
connector->base.id, connector->name);
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
|
||||
connector->name, dc_bpc);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Assigning HDMI sink color depth as %d bpc.\n",
|
||||
connector->base.id, connector->name, dc_bpc);
|
||||
info->bpc = dc_bpc;
|
||||
|
||||
/* YCRCB444 is optional according to spec. */
|
||||
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
|
||||
info->edid_hdmi_ycbcr444_dc_modes = info->edid_hdmi_rgb444_dc_modes;
|
||||
DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
|
||||
connector->name);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does YCRCB444 in deep color.\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6014,8 +6102,8 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
|
|||
* then deep color 36 bit must be supported.
|
||||
*/
|
||||
if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
|
||||
DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
|
||||
connector->name);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink should do DC_36, but does not!\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6032,10 +6120,9 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
|
|||
if (len >= 7)
|
||||
info->max_tmds_clock = db[7] * 5000;
|
||||
|
||||
DRM_DEBUG_KMS("HDMI: DVI dual %d, "
|
||||
"max TMDS clock %d kHz\n",
|
||||
info->dvi_dual,
|
||||
info->max_tmds_clock);
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI: DVI dual %d, max TMDS clock %d kHz\n",
|
||||
connector->base.id, connector->name,
|
||||
info->dvi_dual, info->max_tmds_clock);
|
||||
|
||||
drm_parse_hdmi_deep_color_info(connector, db);
|
||||
}
|
||||
|
@ -6055,8 +6142,9 @@ static void drm_parse_microsoft_vsdb(struct drm_connector *connector,
|
|||
if (version == 1 || version == 2 || (version == 3 && !desktop_usage))
|
||||
info->non_desktop = true;
|
||||
|
||||
drm_dbg_kms(connector->dev, "HMD or specialized display VSDB version %u: 0x%02x\n",
|
||||
version, db[5]);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] HMD or specialized display VSDB version %u: 0x%02x\n",
|
||||
connector->base.id, connector->name, version, db[5]);
|
||||
}
|
||||
|
||||
static void drm_parse_cea_ext(struct drm_connector *connector,
|
||||
|
@ -6077,8 +6165,10 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
|
|||
info->cea_rev = edid_ext[1];
|
||||
|
||||
if (info->cea_rev != edid_ext[1])
|
||||
DRM_DEBUG_KMS("CEA extension version mismatch %u != %u\n",
|
||||
info->cea_rev, edid_ext[1]);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] CEA extension version mismatch %u != %u\n",
|
||||
connector->base.id, connector->name,
|
||||
info->cea_rev, edid_ext[1]);
|
||||
|
||||
/* The existence of a CTA extension should imply RGB support */
|
||||
info->color_formats = DRM_COLOR_FORMAT_RGB444;
|
||||
|
@ -6164,9 +6254,10 @@ static void drm_get_monitor_range(struct drm_connector *connector,
|
|||
|
||||
drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
|
||||
|
||||
DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
|
||||
info->monitor_range.min_vfreq,
|
||||
info->monitor_range.max_vfreq);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
|
||||
connector->base.id, connector->name,
|
||||
info->monitor_range.min_vfreq, info->monitor_range.max_vfreq);
|
||||
}
|
||||
|
||||
static void drm_parse_vesa_mso_data(struct drm_connector *connector,
|
||||
|
@ -6177,8 +6268,9 @@ static void drm_parse_vesa_mso_data(struct drm_connector *connector,
|
|||
struct drm_display_info *info = &connector->display_info;
|
||||
|
||||
if (block->num_bytes < 3) {
|
||||
drm_dbg_kms(connector->dev, "Unexpected vendor block size %u\n",
|
||||
block->num_bytes);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Unexpected vendor block size %u\n",
|
||||
connector->base.id, connector->name, block->num_bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -6186,13 +6278,16 @@ static void drm_parse_vesa_mso_data(struct drm_connector *connector,
|
|||
return;
|
||||
|
||||
if (sizeof(*vesa) != sizeof(*block) + block->num_bytes) {
|
||||
drm_dbg_kms(connector->dev, "Unexpected VESA vendor block size\n");
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Unexpected VESA vendor block size\n",
|
||||
connector->base.id, connector->name);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (FIELD_GET(DISPLAYID_VESA_MSO_MODE, vesa->mso)) {
|
||||
default:
|
||||
drm_dbg_kms(connector->dev, "Reserved MSO mode value\n");
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Reserved MSO mode value\n",
|
||||
connector->base.id, connector->name);
|
||||
fallthrough;
|
||||
case 0:
|
||||
info->mso_stream_count = 0;
|
||||
|
@ -6212,12 +6307,16 @@ static void drm_parse_vesa_mso_data(struct drm_connector *connector,
|
|||
|
||||
info->mso_pixel_overlap = FIELD_GET(DISPLAYID_VESA_MSO_OVERLAP, vesa->mso);
|
||||
if (info->mso_pixel_overlap > 8) {
|
||||
drm_dbg_kms(connector->dev, "Reserved MSO pixel overlap value %u\n",
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Reserved MSO pixel overlap value %u\n",
|
||||
connector->base.id, connector->name,
|
||||
info->mso_pixel_overlap);
|
||||
info->mso_pixel_overlap = 8;
|
||||
}
|
||||
|
||||
drm_dbg_kms(connector->dev, "MSO stream count %u, pixel overlap %u\n",
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] MSO stream count %u, pixel overlap %u\n",
|
||||
connector->base.id, connector->name,
|
||||
info->mso_stream_count, info->mso_pixel_overlap);
|
||||
}
|
||||
|
||||
|
@ -6300,8 +6399,9 @@ static u32 update_display_info(struct drm_connector *connector,
|
|||
if (info->bpc == 0 && edid->revision == 3 &&
|
||||
edid->input & DRM_EDID_DIGITAL_DFP_1_X) {
|
||||
info->bpc = 8;
|
||||
DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
|
||||
connector->name, info->bpc);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Assigning DFP sink color depth as %d bpc.\n",
|
||||
connector->base.id, connector->name, info->bpc);
|
||||
}
|
||||
|
||||
/* Only defined for 1.4 with digital displays */
|
||||
|
@ -6333,8 +6433,9 @@ static u32 update_display_info(struct drm_connector *connector,
|
|||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
|
||||
connector->name, info->bpc);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
|
||||
connector->base.id, connector->name, info->bpc);
|
||||
|
||||
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
|
||||
info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
|
||||
|
@ -6345,7 +6446,8 @@ static u32 update_display_info(struct drm_connector *connector,
|
|||
|
||||
out:
|
||||
if (quirks & EDID_QUIRK_NON_DESKTOP) {
|
||||
drm_dbg_kms(connector->dev, "Non-desktop display%s\n",
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
|
||||
connector->base.id, connector->name,
|
||||
info->non_desktop ? " (redundant quirk)" : "");
|
||||
info->non_desktop = true;
|
||||
}
|
||||
|
@ -6580,23 +6682,6 @@ int drm_edid_connector_update(struct drm_connector *connector,
|
|||
{
|
||||
int count;
|
||||
|
||||
/*
|
||||
* FIXME: Reconcile the differences in override_edid handling between
|
||||
* this and drm_connector_update_edid_property().
|
||||
*
|
||||
* If override_edid is set, and the EDID passed in here originates from
|
||||
* drm_edid_read() and friends, it will be the override EDID, and there
|
||||
* are no issues. drm_connector_update_edid_property() ignoring requests
|
||||
* to set the EDID dates back to a time when override EDID was not
|
||||
* handled at the low level EDID read.
|
||||
*
|
||||
* The only way the EDID passed in here can be different from the
|
||||
* override EDID is when a driver passes in an EDID that does *not*
|
||||
* originate from drm_edid_read() and friends, or passes in a stale
|
||||
* cached version. This, in turn, is a question of when an override EDID
|
||||
* set via debugfs should take effect.
|
||||
*/
|
||||
|
||||
count = _drm_edid_connector_update(connector, drm_edid);
|
||||
|
||||
_drm_update_tile_info(connector, drm_edid);
|
||||
|
@ -6611,10 +6696,6 @@ EXPORT_SYMBOL(drm_edid_connector_update);
|
|||
static int _drm_connector_update_edid_property(struct drm_connector *connector,
|
||||
const struct drm_edid *drm_edid)
|
||||
{
|
||||
/* ignore requests to set edid when overridden */
|
||||
if (connector->override_edid)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Set the display info, using edid if available, otherwise resetting
|
||||
* the values to defaults. This duplicates the work done in
|
||||
|
@ -6677,8 +6758,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
|
|||
struct drm_edid drm_edid;
|
||||
|
||||
if (edid && !drm_edid_is_valid(edid)) {
|
||||
drm_warn(connector->dev, "%s: EDID invalid.\n",
|
||||
connector->name);
|
||||
drm_warn(connector->dev, "[CONNECTOR:%d:%s] EDID invalid.\n",
|
||||
connector->base.id, connector->name);
|
||||
edid = NULL;
|
||||
}
|
||||
|
||||
|
@ -7054,11 +7135,14 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
|
|||
connector->tile_h_size = w + 1;
|
||||
connector->tile_v_size = h + 1;
|
||||
|
||||
DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
|
||||
DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
|
||||
DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
|
||||
num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
|
||||
DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
|
||||
drm_dbg_kms(connector->dev,
|
||||
"[CONNECTOR:%d:%s] tile cap 0x%x, size %dx%d, num tiles %dx%d, location %dx%d, vend %c%c%c",
|
||||
connector->base.id, connector->name,
|
||||
tile->tile_cap,
|
||||
connector->tile_h_size, connector->tile_v_size,
|
||||
connector->num_h_tile, connector->num_v_tile,
|
||||
connector->tile_h_loc, connector->tile_v_loc,
|
||||
tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
|
||||
|
||||
tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
|
||||
if (!tg)
|
||||
|
|
|
@ -11,12 +11,13 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_connector.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "drm_crtc_internal.h"
|
||||
|
||||
static char edid_firmware[PATH_MAX];
|
||||
module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
|
||||
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
|
||||
|
@ -159,23 +160,12 @@ static const u8 generic_edid[GENERIC_EDIDS][128] = {
|
|||
},
|
||||
};
|
||||
|
||||
static int edid_size(const u8 *edid, int data_size)
|
||||
{
|
||||
if (data_size < EDID_LENGTH)
|
||||
return 0;
|
||||
|
||||
return (edid[0x7e] + 1) * EDID_LENGTH;
|
||||
}
|
||||
|
||||
static void *edid_load(struct drm_connector *connector, const char *name,
|
||||
const char *connector_name)
|
||||
static const struct drm_edid *edid_load(struct drm_connector *connector, const char *name)
|
||||
{
|
||||
const struct firmware *fw = NULL;
|
||||
const u8 *fwdata;
|
||||
u8 *edid;
|
||||
const struct drm_edid *drm_edid;
|
||||
int fwsize, builtin;
|
||||
int i, valid_extensions = 0;
|
||||
bool print_bad_edid = !connector->bad_edid_counter || drm_debug_enabled(DRM_UT_KMS);
|
||||
|
||||
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
|
||||
if (builtin >= 0) {
|
||||
|
@ -185,18 +175,22 @@ static void *edid_load(struct drm_connector *connector, const char *name,
|
|||
struct platform_device *pdev;
|
||||
int err;
|
||||
|
||||
pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
|
||||
pdev = platform_device_register_simple(connector->name, -1, NULL, 0);
|
||||
if (IS_ERR(pdev)) {
|
||||
DRM_ERROR("Failed to register EDID firmware platform device "
|
||||
"for connector \"%s\"\n", connector_name);
|
||||
drm_err(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Failed to register EDID firmware platform device for connector \"%s\"\n",
|
||||
connector->base.id, connector->name,
|
||||
connector->name);
|
||||
return ERR_CAST(pdev);
|
||||
}
|
||||
|
||||
err = request_firmware(&fw, name, &pdev->dev);
|
||||
platform_device_unregister(pdev);
|
||||
if (err) {
|
||||
DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
|
||||
name, err);
|
||||
drm_err(connector->dev,
|
||||
"[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n",
|
||||
connector->base.id, connector->name,
|
||||
name, err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -204,70 +198,26 @@ static void *edid_load(struct drm_connector *connector, const char *name,
|
|||
fwsize = fw->size;
|
||||
}
|
||||
|
||||
if (edid_size(fwdata, fwsize) != fwsize) {
|
||||
DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
|
||||
"(expected %d, got %d\n", name,
|
||||
edid_size(fwdata, fwsize), (int)fwsize);
|
||||
edid = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded %s firmware EDID \"%s\"\n",
|
||||
connector->base.id, connector->name,
|
||||
builtin >= 0 ? "built-in" : "external", name);
|
||||
|
||||
drm_edid = drm_edid_alloc(fwdata, fwsize);
|
||||
if (!drm_edid_valid(drm_edid)) {
|
||||
drm_err(connector->dev, "Invalid firmware EDID \"%s\"\n", name);
|
||||
drm_edid_free(drm_edid);
|
||||
drm_edid = ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
|
||||
if (edid == NULL) {
|
||||
edid = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!drm_edid_block_valid(edid, 0, print_bad_edid,
|
||||
&connector->edid_corrupt)) {
|
||||
connector->bad_edid_counter++;
|
||||
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
|
||||
name);
|
||||
kfree(edid);
|
||||
edid = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 1; i <= edid[0x7e]; i++) {
|
||||
if (i != valid_extensions + 1)
|
||||
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
|
||||
edid + i * EDID_LENGTH, EDID_LENGTH);
|
||||
if (drm_edid_block_valid(edid + i * EDID_LENGTH, i,
|
||||
print_bad_edid,
|
||||
NULL))
|
||||
valid_extensions++;
|
||||
}
|
||||
|
||||
if (valid_extensions != edid[0x7e]) {
|
||||
u8 *new_edid;
|
||||
|
||||
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
|
||||
DRM_INFO("Found %d valid extensions instead of %d in EDID data "
|
||||
"\"%s\" for connector \"%s\"\n", valid_extensions,
|
||||
edid[0x7e], name, connector_name);
|
||||
edid[0x7e] = valid_extensions;
|
||||
|
||||
new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
|
||||
GFP_KERNEL);
|
||||
if (new_edid)
|
||||
edid = new_edid;
|
||||
}
|
||||
|
||||
DRM_INFO("Got %s EDID base block and %d extension%s from "
|
||||
"\"%s\" for connector \"%s\"\n", (builtin >= 0) ? "built-in" :
|
||||
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
|
||||
name, connector_name);
|
||||
|
||||
out:
|
||||
release_firmware(fw);
|
||||
return edid;
|
||||
|
||||
return drm_edid;
|
||||
}
|
||||
|
||||
struct edid *drm_load_edid_firmware(struct drm_connector *connector)
|
||||
const struct drm_edid *drm_edid_load_firmware(struct drm_connector *connector)
|
||||
{
|
||||
const char *connector_name = connector->name;
|
||||
char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL;
|
||||
struct edid *edid;
|
||||
const struct drm_edid *drm_edid;
|
||||
|
||||
if (edid_firmware[0] == '\0')
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
@ -288,7 +238,7 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
|
|||
while ((edidname = strsep(&edidstr, ","))) {
|
||||
colon = strchr(edidname, ':');
|
||||
if (colon != NULL) {
|
||||
if (strncmp(connector_name, edidname, colon - edidname))
|
||||
if (strncmp(connector->name, edidname, colon - edidname))
|
||||
continue;
|
||||
edidname = colon + 1;
|
||||
break;
|
||||
|
@ -310,8 +260,9 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
|
|||
if (*last == '\n')
|
||||
*last = '\0';
|
||||
|
||||
edid = edid_load(connector, edidname, connector_name);
|
||||
drm_edid = edid_load(connector, edidname);
|
||||
|
||||
kfree(fwstr);
|
||||
|
||||
return edid;
|
||||
return drm_edid;
|
||||
}
|
||||
|
|
|
@ -606,7 +606,7 @@ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
|
|||
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
|
||||
|
||||
/*
|
||||
* mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
|
||||
* mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of
|
||||
* the payload in a long packet transmitted from the peripheral back to the
|
||||
* host processor
|
||||
* @dsi: DSI peripheral device
|
||||
|
|
|
@ -151,9 +151,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
|
|||
count = 0;
|
||||
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
|
||||
continue;
|
||||
|
||||
/* only expose writeback connectors if userspace understands them */
|
||||
if (!file_priv->writeback_connectors &&
|
||||
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
|
||||
|
|
|
@ -367,7 +367,7 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
|
|||
* override/firmware EDID.
|
||||
*/
|
||||
if (count == 0 && connector->status == connector_status_connected)
|
||||
count = drm_add_override_edid_modes(connector);
|
||||
count = drm_edid_override_connector_update(connector);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ static u32 clip_scaled(int src, int dst, int *clip)
|
|||
* @dst: destination window rectangle
|
||||
* @clip: clip rectangle
|
||||
*
|
||||
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
|
||||
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by
|
||||
* the corresponding amounts, retaining the vertical and horizontal scaling
|
||||
* factors from @src to @dst.
|
||||
*
|
||||
|
|
|
@ -2355,7 +2355,7 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
|
||||
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
|
||||
|
@ -2371,16 +2371,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
|
|||
* CONFIG1 pin, but no such luck on our hardware.
|
||||
*
|
||||
* The only method left to us is to check the VBT to see
|
||||
* if the port is a dual mode capable DP port. But let's
|
||||
* only do that when we sucesfully read the EDID, to avoid
|
||||
* confusing log messages about DP dual mode adaptors when
|
||||
* there's nothing connected to the port.
|
||||
* if the port is a dual mode capable DP port.
|
||||
*/
|
||||
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
|
||||
/* An overridden EDID imply that we want this port for testing.
|
||||
* Make sure not to set limits for that port.
|
||||
*/
|
||||
if (has_edid && !connector->override_edid &&
|
||||
if (!connector->force &&
|
||||
intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Assuming DP dual mode adaptor presence based on VBT\n");
|
||||
|
@ -2435,18 +2429,18 @@ intel_hdmi_set_edid(struct drm_connector *connector)
|
|||
intel_gmbus_force_bit(i2c, false);
|
||||
}
|
||||
|
||||
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
|
||||
|
||||
to_intel_connector(connector)->detect_edid = edid;
|
||||
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
|
||||
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
|
||||
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
|
||||
|
||||
intel_hdmi_dp_dual_mode_detect(connector);
|
||||
|
||||
connected = true;
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
|
||||
|
||||
cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
|
||||
|
||||
return connected;
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include <nvif/if000c.h>
|
||||
#include <nvif/if500b.h>
|
||||
#include <nvif/if900b.h>
|
||||
#include <nvif/if000c.h>
|
||||
|
||||
#include <nvhw/class/cla0b5.h>
|
||||
|
||||
|
|
|
@ -1883,8 +1883,10 @@ static const struct edp_panel_entry edp_panels[] = {
|
|||
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
|
||||
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
|
||||
|
||||
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"),
|
||||
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
|
||||
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
|
||||
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
|
||||
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
|
||||
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
|
||||
|
||||
|
|
|
@ -62,13 +62,13 @@
|
|||
#define to_drm_sched_job(sched_job) \
|
||||
container_of((sched_job), struct drm_sched_job, queue_node)
|
||||
|
||||
int drm_sched_policy = DRM_SCHED_POLICY_RR;
|
||||
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
|
||||
|
||||
/**
|
||||
* DOC: sched_policy (int)
|
||||
* Used to override default entities scheduling policy in a run queue.
|
||||
*/
|
||||
MODULE_PARM_DESC(sched_policy, "Specify schedule policy for entities on a runqueue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin (default), " __stringify(DRM_SCHED_POLICY_FIFO) " = use FIFO.");
|
||||
MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
|
||||
module_param_named(sched_policy, drm_sched_policy, int, 0444);
|
||||
|
||||
static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
|
||||
|
|
|
@ -10,225 +10,306 @@
|
|||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_modes.h>
|
||||
#include <drm/drm_rect.h>
|
||||
|
||||
static void set_src(struct drm_plane_state *plane_state,
|
||||
unsigned int src_x, unsigned int src_y,
|
||||
unsigned int src_w, unsigned int src_h)
|
||||
static const struct drm_crtc_state crtc_state = {
|
||||
.crtc = ZERO_SIZE_PTR,
|
||||
.enable = true,
|
||||
.active = true,
|
||||
.mode = {
|
||||
DRM_MODE("1024x768", 0, 65000, 1024, 1048,
|
||||
1184, 1344, 0, 768, 771, 777, 806, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
|
||||
},
|
||||
};
|
||||
|
||||
struct drm_check_plane_state_test {
|
||||
const char *name;
|
||||
const char *msg;
|
||||
struct {
|
||||
unsigned int x;
|
||||
unsigned int y;
|
||||
unsigned int w;
|
||||
unsigned int h;
|
||||
} src, src_expected;
|
||||
struct {
|
||||
int x;
|
||||
int y;
|
||||
unsigned int w;
|
||||
unsigned int h;
|
||||
} crtc, crtc_expected;
|
||||
unsigned int rotation;
|
||||
int min_scale;
|
||||
int max_scale;
|
||||
bool can_position;
|
||||
};
|
||||
|
||||
static int drm_plane_helper_init(struct kunit *test)
|
||||
{
|
||||
plane_state->src_x = src_x;
|
||||
plane_state->src_y = src_y;
|
||||
plane_state->src_w = src_w;
|
||||
plane_state->src_h = src_h;
|
||||
const struct drm_check_plane_state_test *params = test->param_value;
|
||||
struct drm_plane *plane;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_plane_state *mock;
|
||||
|
||||
plane = kunit_kzalloc(test, sizeof(*plane), GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_NULL(test, plane);
|
||||
|
||||
fb = kunit_kzalloc(test, sizeof(*fb), GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_NULL(test, fb);
|
||||
fb->width = 2048;
|
||||
fb->height = 2048;
|
||||
|
||||
mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_NULL(test, mock);
|
||||
mock->plane = plane;
|
||||
mock->crtc = ZERO_SIZE_PTR;
|
||||
mock->fb = fb;
|
||||
mock->rotation = params->rotation;
|
||||
mock->src_x = params->src.x;
|
||||
mock->src_y = params->src.y;
|
||||
mock->src_w = params->src.w;
|
||||
mock->src_h = params->src.h;
|
||||
mock->crtc_x = params->crtc.x;
|
||||
mock->crtc_y = params->crtc.y;
|
||||
mock->crtc_w = params->crtc.w;
|
||||
mock->crtc_h = params->crtc.h;
|
||||
|
||||
test->priv = mock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool check_src_eq(struct drm_plane_state *plane_state,
|
||||
static void check_src_eq(struct kunit *test, struct drm_plane_state *plane_state,
|
||||
unsigned int src_x, unsigned int src_y,
|
||||
unsigned int src_w, unsigned int src_h)
|
||||
{
|
||||
if (plane_state->src.x1 < 0) {
|
||||
pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1);
|
||||
drm_rect_debug_print("src: ", &plane_state->src, true);
|
||||
return false;
|
||||
}
|
||||
if (plane_state->src.y1 < 0) {
|
||||
pr_err("src y coordinate %x should never be below 0.\n", plane_state->src.y1);
|
||||
drm_rect_debug_print("src: ", &plane_state->src, true);
|
||||
return false;
|
||||
}
|
||||
struct drm_rect expected = DRM_RECT_INIT(src_x, src_y, src_w, src_h);
|
||||
|
||||
if (plane_state->src.x1 != src_x ||
|
||||
plane_state->src.y1 != src_y ||
|
||||
drm_rect_width(&plane_state->src) != src_w ||
|
||||
drm_rect_height(&plane_state->src) != src_h) {
|
||||
drm_rect_debug_print("src: ", &plane_state->src, true);
|
||||
return false;
|
||||
}
|
||||
KUNIT_ASSERT_GE_MSG(test, plane_state->src.x1, 0,
|
||||
"src x coordinate %x should never be below 0, src: " DRM_RECT_FP_FMT,
|
||||
plane_state->src.x1, DRM_RECT_FP_ARG(&plane_state->src));
|
||||
|
||||
return true;
|
||||
KUNIT_ASSERT_GE_MSG(test, plane_state->src.y1, 0,
|
||||
"src y coordinate %x should never be below 0, src: " DRM_RECT_FP_FMT,
|
||||
plane_state->src.y1, DRM_RECT_FP_ARG(&plane_state->src));
|
||||
|
||||
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_equals(&plane_state->src, &expected),
|
||||
"dst: " DRM_RECT_FP_FMT ", expected: " DRM_RECT_FP_FMT,
|
||||
DRM_RECT_FP_ARG(&plane_state->src), DRM_RECT_FP_ARG(&expected));
|
||||
}
|
||||
|
||||
static void set_crtc(struct drm_plane_state *plane_state,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h)
|
||||
{
|
||||
plane_state->crtc_x = crtc_x;
|
||||
plane_state->crtc_y = crtc_y;
|
||||
plane_state->crtc_w = crtc_w;
|
||||
plane_state->crtc_h = crtc_h;
|
||||
}
|
||||
|
||||
static bool check_crtc_eq(struct drm_plane_state *plane_state,
|
||||
static void check_crtc_eq(struct kunit *test, struct drm_plane_state *plane_state,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h)
|
||||
{
|
||||
if (plane_state->dst.x1 != crtc_x ||
|
||||
plane_state->dst.y1 != crtc_y ||
|
||||
drm_rect_width(&plane_state->dst) != crtc_w ||
|
||||
drm_rect_height(&plane_state->dst) != crtc_h) {
|
||||
drm_rect_debug_print("dst: ", &plane_state->dst, false);
|
||||
struct drm_rect expected = DRM_RECT_INIT(crtc_x, crtc_y, crtc_w, crtc_h);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
KUNIT_EXPECT_TRUE_MSG(test, drm_rect_equals(&plane_state->dst, &expected),
|
||||
"dst: " DRM_RECT_FMT ", expected: " DRM_RECT_FMT,
|
||||
DRM_RECT_ARG(&plane_state->dst), DRM_RECT_ARG(&expected));
|
||||
}
|
||||
|
||||
static void drm_test_check_plane_state(struct kunit *test)
|
||||
{
|
||||
int ret;
|
||||
const struct drm_check_plane_state_test *params = test->param_value;
|
||||
struct drm_plane_state *plane_state = test->priv;
|
||||
|
||||
static const struct drm_crtc_state crtc_state = {
|
||||
.crtc = ZERO_SIZE_PTR,
|
||||
.enable = true,
|
||||
.active = true,
|
||||
.mode = {
|
||||
DRM_MODE("1024x768", 0, 65000, 1024, 1048, 1184, 1344, 0, 768, 771,
|
||||
777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
|
||||
},
|
||||
};
|
||||
static struct drm_plane plane = {
|
||||
.dev = NULL
|
||||
};
|
||||
static struct drm_framebuffer fb = {
|
||||
.width = 2048,
|
||||
.height = 2048
|
||||
};
|
||||
static struct drm_plane_state plane_state = {
|
||||
.plane = &plane,
|
||||
.crtc = ZERO_SIZE_PTR,
|
||||
.fb = &fb,
|
||||
.rotation = DRM_MODE_ROTATE_0
|
||||
};
|
||||
|
||||
/* Simple clipping, no scaling. */
|
||||
set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16);
|
||||
set_crtc(&plane_state, 0, 0, fb.width, fb.height);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
false, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple clipping check should pass\n");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
/* Rotated clipping + reflection, no scaling. */
|
||||
plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X;
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
false, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Rotated clipping check should pass\n");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
plane_state.rotation = DRM_MODE_ROTATE_0;
|
||||
|
||||
/* Check whether positioning works correctly. */
|
||||
set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16);
|
||||
set_crtc(&plane_state, 0, 0, 1023, 767);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
false, false);
|
||||
KUNIT_EXPECT_TRUE_MSG(test, ret,
|
||||
"Should not be able to position on the crtc with can_position=false\n");
|
||||
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
true, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple positioning should work\n");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1023, 767));
|
||||
|
||||
/* Simple scaling tests. */
|
||||
set_src(&plane_state, 0, 0, 512 << 16, 384 << 16);
|
||||
set_crtc(&plane_state, 0, 0, 1024, 768);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0x8001,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
false, false);
|
||||
KUNIT_EXPECT_TRUE_MSG(test, ret, "Upscaling out of range should fail.\n");
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0x8000,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
false, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Upscaling exactly 2x should work\n");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
0x1ffff, false, false);
|
||||
KUNIT_EXPECT_TRUE_MSG(test, ret, "Downscaling out of range should fail.\n");
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
0x20000, false, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed with exact scaling limit\n");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
/* Testing rounding errors. */
|
||||
set_src(&plane_state, 0, 0, 0x40001, 0x40001);
|
||||
set_crtc(&plane_state, 1022, 766, 4, 4);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
0x10001,
|
||||
true, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
|
||||
|
||||
set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001);
|
||||
set_crtc(&plane_state, -2, -2, 1028, 772);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
0x10001,
|
||||
false, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x40002, 0x40002,
|
||||
1024 << 16, 768 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff);
|
||||
set_crtc(&plane_state, 1022, 766, 4, 4);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0xffff,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
true, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
/* Should not be rounded to 0x20001, which would be upscaling. */
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
|
||||
|
||||
set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff);
|
||||
set_crtc(&plane_state, -2, -2, 1028, 772);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0xffff,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
false, false);
|
||||
KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
|
||||
KUNIT_EXPECT_TRUE(test, plane_state.visible);
|
||||
KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x3fffe, 0x3fffe,
|
||||
1024 << 16, 768 << 16));
|
||||
KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
KUNIT_ASSERT_EQ_MSG(test,
|
||||
drm_atomic_helper_check_plane_state(plane_state, &crtc_state,
|
||||
params->min_scale,
|
||||
params->max_scale,
|
||||
params->can_position, false),
|
||||
0, params->msg);
|
||||
KUNIT_EXPECT_TRUE(test, plane_state->visible);
|
||||
check_src_eq(test, plane_state, params->src_expected.x, params->src_expected.y,
|
||||
params->src_expected.w, params->src_expected.h);
|
||||
check_crtc_eq(test, plane_state, params->crtc_expected.x, params->crtc_expected.y,
|
||||
params->crtc_expected.w, params->crtc_expected.h);
|
||||
}
|
||||
|
||||
static void drm_check_plane_state_desc(const struct drm_check_plane_state_test *t,
|
||||
char *desc)
|
||||
{
|
||||
sprintf(desc, "%s", t->name);
|
||||
}
|
||||
|
||||
static const struct drm_check_plane_state_test drm_check_plane_state_tests[] = {
|
||||
{
|
||||
.name = "clipping_simple",
|
||||
.msg = "Simple clipping check should pass",
|
||||
.src = { 0, 0,
|
||||
2048 << 16,
|
||||
2048 << 16 },
|
||||
.crtc = { 0, 0, 2048, 2048 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = false,
|
||||
.src_expected = { 0, 0, 1024 << 16, 768 << 16 },
|
||||
.crtc_expected = { 0, 0, 1024, 768 },
|
||||
},
|
||||
{
|
||||
.name = "clipping_rotate_reflect",
|
||||
.msg = "Rotated clipping check should pass",
|
||||
.src = { 0, 0,
|
||||
2048 << 16,
|
||||
2048 << 16 },
|
||||
.crtc = { 0, 0, 2048, 2048 },
|
||||
.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = false,
|
||||
.src_expected = { 0, 0, 768 << 16, 1024 << 16 },
|
||||
.crtc_expected = { 0, 0, 1024, 768 },
|
||||
},
|
||||
{
|
||||
.name = "positioning_simple",
|
||||
.msg = "Simple positioning should work",
|
||||
.src = { 0, 0, 1023 << 16, 767 << 16 },
|
||||
.crtc = { 0, 0, 1023, 767 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = true,
|
||||
.src_expected = { 0, 0, 1023 << 16, 767 << 16 },
|
||||
.crtc_expected = { 0, 0, 1023, 767 },
|
||||
},
|
||||
{
|
||||
.name = "upscaling",
|
||||
.msg = "Upscaling exactly 2x should work",
|
||||
.src = { 0, 0, 512 << 16, 384 << 16 },
|
||||
.crtc = { 0, 0, 1024, 768 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = 0x8000,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = false,
|
||||
.src_expected = { 0, 0, 512 << 16, 384 << 16 },
|
||||
.crtc_expected = { 0, 0, 1024, 768 },
|
||||
},
|
||||
{
|
||||
.name = "downscaling",
|
||||
.msg = "Should succeed with exact scaling limit",
|
||||
.src = { 0, 0, 2048 << 16, 1536 << 16 },
|
||||
.crtc = { 0, 0, 1024, 768 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = 0x20000,
|
||||
.can_position = false,
|
||||
.src_expected = { 0, 0, 2048 << 16, 1536 << 16 },
|
||||
.crtc_expected = { 0, 0, 1024, 768 },
|
||||
},
|
||||
{
|
||||
.name = "rounding1",
|
||||
.msg = "Should succeed by clipping to exact multiple",
|
||||
.src = { 0, 0, 0x40001, 0x40001 },
|
||||
.crtc = { 1022, 766, 4, 4 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = 0x10001,
|
||||
.can_position = true,
|
||||
.src_expected = { 0, 0, 2 << 16, 2 << 16 },
|
||||
.crtc_expected = { 1022, 766, 2, 2 },
|
||||
},
|
||||
{
|
||||
.name = "rounding2",
|
||||
.msg = "Should succeed by clipping to exact multiple",
|
||||
.src = { 0x20001, 0x20001, 0x4040001, 0x3040001 },
|
||||
.crtc = { -2, -2, 1028, 772 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = 0x10001,
|
||||
.can_position = false,
|
||||
.src_expected = { 0x40002, 0x40002, 1024 << 16, 768 << 16 },
|
||||
.crtc_expected = { 0, 0, 1024, 768 },
|
||||
},
|
||||
{
|
||||
.name = "rounding3",
|
||||
.msg = "Should succeed by clipping to exact multiple",
|
||||
.src = { 0, 0, 0x3ffff, 0x3ffff },
|
||||
.crtc = { 1022, 766, 4, 4 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = 0xffff,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = true,
|
||||
/* Should not be rounded to 0x20001, which would be upscaling. */
|
||||
.src_expected = { 0, 0, 2 << 16, 2 << 16 },
|
||||
.crtc_expected = { 1022, 766, 2, 2 },
|
||||
},
|
||||
{
|
||||
.name = "rounding4",
|
||||
.msg = "Should succeed by clipping to exact multiple",
|
||||
.src = { 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff },
|
||||
.crtc = { -2, -2, 1028, 772 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = 0xffff,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = false,
|
||||
.src_expected = { 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16 },
|
||||
.crtc_expected = { 0, 0, 1024, 768 },
|
||||
},
|
||||
};
|
||||
|
||||
KUNIT_ARRAY_PARAM(drm_check_plane_state, drm_check_plane_state_tests, drm_check_plane_state_desc);
|
||||
|
||||
static void drm_test_check_invalid_plane_state(struct kunit *test)
|
||||
{
|
||||
const struct drm_check_plane_state_test *params = test->param_value;
|
||||
struct drm_plane_state *plane_state = test->priv;
|
||||
|
||||
KUNIT_ASSERT_LT_MSG(test,
|
||||
drm_atomic_helper_check_plane_state(plane_state, &crtc_state,
|
||||
params->min_scale,
|
||||
params->max_scale,
|
||||
params->can_position, false),
|
||||
0, params->msg);
|
||||
}
|
||||
|
||||
static const struct drm_check_plane_state_test drm_check_invalid_plane_state_tests[] = {
|
||||
{
|
||||
.name = "positioning_invalid",
|
||||
.msg = "Should not be able to position on the crtc with can_position=false",
|
||||
.src = { 0, 0, 1023 << 16, 767 << 16 },
|
||||
.crtc = { 0, 0, 1023, 767 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = false,
|
||||
},
|
||||
{
|
||||
.name = "upscaling_invalid",
|
||||
.msg = "Upscaling out of range should fail",
|
||||
.src = { 0, 0, 512 << 16, 384 << 16 },
|
||||
.crtc = { 0, 0, 1024, 768 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = 0x8001,
|
||||
.max_scale = DRM_PLANE_NO_SCALING,
|
||||
.can_position = false,
|
||||
},
|
||||
{
|
||||
.name = "downscaling_invalid",
|
||||
.msg = "Downscaling out of range should fail",
|
||||
.src = { 0, 0, 2048 << 16, 1536 << 16 },
|
||||
.crtc = { 0, 0, 1024, 768 },
|
||||
.rotation = DRM_MODE_ROTATE_0,
|
||||
.min_scale = DRM_PLANE_NO_SCALING,
|
||||
.max_scale = 0x1ffff,
|
||||
.can_position = false,
|
||||
},
|
||||
};
|
||||
|
||||
KUNIT_ARRAY_PARAM(drm_check_invalid_plane_state, drm_check_invalid_plane_state_tests,
|
||||
drm_check_plane_state_desc);
|
||||
|
||||
static struct kunit_case drm_plane_helper_test[] = {
|
||||
KUNIT_CASE(drm_test_check_plane_state),
|
||||
KUNIT_CASE_PARAM(drm_test_check_plane_state, drm_check_plane_state_gen_params),
|
||||
KUNIT_CASE_PARAM(drm_test_check_invalid_plane_state,
|
||||
drm_check_invalid_plane_state_gen_params),
|
||||
{}
|
||||
};
|
||||
|
||||
static struct kunit_suite drm_plane_helper_test_suite = {
|
||||
.name = "drm_plane_helper",
|
||||
.init = drm_plane_helper_init,
|
||||
.test_cases = drm_plane_helper_test,
|
||||
};
|
||||
|
||||
|
|
|
@ -736,12 +736,12 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
|
|||
struct vc4_bo *bo = to_vc4_bo(obj);
|
||||
|
||||
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
|
||||
DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
|
||||
DRM_DEBUG("mmapping of shader BOs for writing not allowed.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (bo->madv != VC4_MADV_WILLNEED) {
|
||||
DRM_DEBUG("mmaping of %s BO not allowed\n",
|
||||
DRM_DEBUG("mmapping of %s BO not allowed\n",
|
||||
bo->madv == VC4_MADV_DONTNEED ?
|
||||
"purgeable" : "purged");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -16,13 +16,6 @@ config DRM_VMWGFX
|
|||
virtual hardware.
|
||||
The compiled module will be called "vmwgfx.ko".
|
||||
|
||||
config DRM_VMWGFX_FBCON
|
||||
depends on DRM_VMWGFX && DRM_FBDEV_EMULATION
|
||||
bool "Enable framebuffer console under vmwgfx by default"
|
||||
help
|
||||
Choose this option if you are shipping a new vmwgfx
|
||||
userspace driver that supports using the kernel driver.
|
||||
|
||||
config DRM_VMWGFX_MKSSTATS
|
||||
bool "Enable mksGuestStats instrumentation of vmwgfx by default"
|
||||
depends on DRM_VMWGFX
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \
|
||||
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
||||
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
|
||||
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
|
||||
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
|
||||
|
@ -12,6 +12,4 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d
|
|||
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
|
||||
vmwgfx_gem.o
|
||||
|
||||
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -44,16 +44,20 @@
|
|||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include "ttm_object.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/module.h>
|
||||
#include "ttm_object.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
||||
|
||||
#define VMW_TTM_OBJECT_REF_HT_ORDER 10
|
||||
|
||||
/**
|
||||
* struct ttm_object_file
|
||||
*
|
||||
|
@ -74,16 +78,14 @@ struct ttm_object_file {
|
|||
struct ttm_object_device *tdev;
|
||||
spinlock_t lock;
|
||||
struct list_head ref_list;
|
||||
struct vmwgfx_open_hash ref_hash;
|
||||
DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
|
||||
struct kref refcount;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct ttm_object_device
|
||||
*
|
||||
* @object_lock: lock that protects the object_hash hash table.
|
||||
*
|
||||
* @object_hash: hash table for fast lookup of object global names.
|
||||
* @object_lock: lock that protects idr.
|
||||
*
|
||||
* @object_count: Per device object count.
|
||||
*
|
||||
|
@ -92,7 +94,6 @@ struct ttm_object_file {
|
|||
|
||||
struct ttm_object_device {
|
||||
spinlock_t object_lock;
|
||||
struct vmwgfx_open_hash object_hash;
|
||||
atomic_t object_count;
|
||||
struct dma_buf_ops ops;
|
||||
void (*dmabuf_release)(struct dma_buf *dma_buf);
|
||||
|
@ -138,6 +139,36 @@ ttm_object_file_ref(struct ttm_object_file *tfile)
|
|||
return tfile;
|
||||
}
|
||||
|
||||
static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
|
||||
uint64_t key,
|
||||
struct vmwgfx_hash_item **p_hash)
|
||||
{
|
||||
struct vmwgfx_hash_item *hash;
|
||||
|
||||
hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
|
||||
if (hash->key == key) {
|
||||
*p_hash = hash;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
|
||||
uint64_t key,
|
||||
struct vmwgfx_hash_item **p_hash)
|
||||
{
|
||||
struct vmwgfx_hash_item *hash;
|
||||
|
||||
hash_for_each_possible(tfile->ref_hash, hash, head, key) {
|
||||
if (hash->key == key) {
|
||||
*p_hash = hash;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ttm_object_file_destroy(struct kref *kref)
|
||||
{
|
||||
struct ttm_object_file *tfile =
|
||||
|
@ -240,37 +271,35 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
|
|||
* Return: A pointer to the object if successful or NULL otherwise.
|
||||
*/
|
||||
struct ttm_base_object *
|
||||
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
|
||||
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
|
||||
{
|
||||
struct vmwgfx_hash_item *hash;
|
||||
struct vmwgfx_open_hash *ht = &tfile->ref_hash;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
|
||||
ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__release(RCU);
|
||||
return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
|
||||
return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_noref_lookup);
|
||||
|
||||
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t key)
|
||||
uint64_t key)
|
||||
{
|
||||
struct ttm_base_object *base = NULL;
|
||||
struct vmwgfx_hash_item *hash;
|
||||
struct vmwgfx_open_hash *ht = &tfile->ref_hash;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
|
||||
ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
|
||||
base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
|
||||
if (!kref_get_unless_zero(&base->refcount))
|
||||
base = NULL;
|
||||
}
|
||||
|
@ -280,7 +309,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
|||
}
|
||||
|
||||
struct ttm_base_object *
|
||||
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
|
||||
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
|
||||
{
|
||||
struct ttm_base_object *base;
|
||||
|
||||
|
@ -299,7 +328,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
bool *existed,
|
||||
bool require_existed)
|
||||
{
|
||||
struct vmwgfx_open_hash *ht = &tfile->ref_hash;
|
||||
struct ttm_ref_object *ref;
|
||||
struct vmwgfx_hash_item *hash;
|
||||
int ret = -EINVAL;
|
||||
|
@ -312,10 +340,10 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
|
||||
while (ret == -EINVAL) {
|
||||
rcu_read_lock();
|
||||
ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
|
||||
ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
|
||||
|
||||
if (ret == 0) {
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
ref = hlist_entry(hash, struct ttm_ref_object, hash);
|
||||
if (kref_get_unless_zero(&ref->kref)) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
|
@ -337,21 +365,14 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
kref_init(&ref->kref);
|
||||
|
||||
spin_lock(&tfile->lock);
|
||||
ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
list_add_tail(&ref->head, &tfile->ref_list);
|
||||
kref_get(&base->refcount);
|
||||
spin_unlock(&tfile->lock);
|
||||
if (existed != NULL)
|
||||
*existed = false;
|
||||
break;
|
||||
}
|
||||
hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
|
||||
ret = 0;
|
||||
|
||||
list_add_tail(&ref->head, &tfile->ref_list);
|
||||
kref_get(&base->refcount);
|
||||
spin_unlock(&tfile->lock);
|
||||
BUG_ON(ret != -EINVAL);
|
||||
|
||||
kfree(ref);
|
||||
if (existed != NULL)
|
||||
*existed = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -363,10 +384,8 @@ ttm_ref_object_release(struct kref *kref)
|
|||
struct ttm_ref_object *ref =
|
||||
container_of(kref, struct ttm_ref_object, kref);
|
||||
struct ttm_object_file *tfile = ref->tfile;
|
||||
struct vmwgfx_open_hash *ht;
|
||||
|
||||
ht = &tfile->ref_hash;
|
||||
(void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
|
||||
hash_del_rcu(&ref->hash.head);
|
||||
list_del(&ref->head);
|
||||
spin_unlock(&tfile->lock);
|
||||
|
||||
|
@ -378,18 +397,17 @@ ttm_ref_object_release(struct kref *kref)
|
|||
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||
unsigned long key)
|
||||
{
|
||||
struct vmwgfx_open_hash *ht = &tfile->ref_hash;
|
||||
struct ttm_ref_object *ref;
|
||||
struct vmwgfx_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
spin_lock(&tfile->lock);
|
||||
ret = vmwgfx_ht_find_item(ht, key, &hash);
|
||||
ret = ttm_tfile_find_ref(tfile, key, &hash);
|
||||
if (unlikely(ret != 0)) {
|
||||
spin_unlock(&tfile->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
ref = hlist_entry(hash, struct ttm_ref_object, hash);
|
||||
kref_put(&ref->kref, ttm_ref_object_release);
|
||||
spin_unlock(&tfile->lock);
|
||||
return 0;
|
||||
|
@ -416,16 +434,13 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
|||
}
|
||||
|
||||
spin_unlock(&tfile->lock);
|
||||
vmwgfx_ht_remove(&tfile->ref_hash);
|
||||
|
||||
ttm_object_file_unref(&tfile);
|
||||
}
|
||||
|
||||
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
||||
unsigned int hash_order)
|
||||
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
|
||||
{
|
||||
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
if (unlikely(tfile == NULL))
|
||||
return NULL;
|
||||
|
@ -435,34 +450,21 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
|||
kref_init(&tfile->refcount);
|
||||
INIT_LIST_HEAD(&tfile->ref_list);
|
||||
|
||||
ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
hash_init(tfile->ref_hash);
|
||||
|
||||
return tfile;
|
||||
out_err:
|
||||
vmwgfx_ht_remove(&tfile->ref_hash);
|
||||
|
||||
kfree(tfile);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct ttm_object_device *
|
||||
ttm_object_device_init(unsigned int hash_order,
|
||||
const struct dma_buf_ops *ops)
|
||||
ttm_object_device_init(const struct dma_buf_ops *ops)
|
||||
{
|
||||
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
if (unlikely(tdev == NULL))
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&tdev->object_lock);
|
||||
atomic_set(&tdev->object_count, 0);
|
||||
ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
|
||||
if (ret != 0)
|
||||
goto out_no_object_hash;
|
||||
|
||||
/*
|
||||
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create
|
||||
|
@ -477,10 +479,6 @@ ttm_object_device_init(unsigned int hash_order,
|
|||
tdev->dmabuf_release = tdev->ops.release;
|
||||
tdev->ops.release = ttm_prime_dmabuf_release;
|
||||
return tdev;
|
||||
|
||||
out_no_object_hash:
|
||||
kfree(tdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
||||
|
@ -491,7 +489,6 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
|||
|
||||
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
|
||||
idr_destroy(&tdev->idr);
|
||||
vmwgfx_ht_remove(&tdev->object_hash);
|
||||
|
||||
kfree(tdev);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright (c) 2006-2022 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -42,8 +42,6 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include "vmwgfx_hashtab.h"
|
||||
|
||||
/**
|
||||
* enum ttm_object_type
|
||||
*
|
||||
|
@ -104,7 +102,7 @@ struct ttm_base_object {
|
|||
struct ttm_object_file *tfile;
|
||||
struct kref refcount;
|
||||
void (*refcount_release) (struct ttm_base_object **base);
|
||||
u32 handle;
|
||||
u64 handle;
|
||||
enum ttm_object_type object_type;
|
||||
u32 shareable;
|
||||
};
|
||||
|
@ -164,7 +162,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
|
|||
*/
|
||||
|
||||
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
|
||||
*tfile, uint32_t key);
|
||||
*tfile, uint64_t key);
|
||||
|
||||
/**
|
||||
* ttm_base_object_lookup_for_ref
|
||||
|
@ -178,7 +176,7 @@ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
|
|||
*/
|
||||
|
||||
extern struct ttm_base_object *
|
||||
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
|
||||
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key);
|
||||
|
||||
/**
|
||||
* ttm_base_object_unref
|
||||
|
@ -237,14 +235,12 @@ extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
|||
* ttm_object_file_init - initialize a struct ttm_object file
|
||||
*
|
||||
* @tdev: A struct ttm_object device this file is initialized on.
|
||||
* @hash_order: Order of the hash table used to hold the reference objects.
|
||||
*
|
||||
* This is typically called by the file_ops::open function.
|
||||
*/
|
||||
|
||||
extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
|
||||
*tdev,
|
||||
unsigned int hash_order);
|
||||
*tdev);
|
||||
|
||||
/**
|
||||
* ttm_object_file_release - release data held by a ttm_object_file
|
||||
|
@ -262,7 +258,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
|
|||
/**
|
||||
* ttm_object device init - initialize a struct ttm_object_device
|
||||
*
|
||||
* @hash_order: Order of hash table used to hash the base objects.
|
||||
* @ops: DMA buf ops for prime objects of this device.
|
||||
*
|
||||
* This function is typically called on device initialization to prepare
|
||||
|
@ -270,8 +265,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
|
|||
*/
|
||||
|
||||
extern struct ttm_object_device *
|
||||
ttm_object_device_init(unsigned int hash_order,
|
||||
const struct dma_buf_ops *ops);
|
||||
ttm_object_device_init(const struct dma_buf_ops *ops);
|
||||
|
||||
/**
|
||||
* ttm_object_device_release - release data held by a ttm_object_device
|
||||
|
@ -314,7 +308,7 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
|
|||
kfree_rcu(__obj, __prime.base.rhead)
|
||||
|
||||
struct ttm_base_object *
|
||||
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
|
||||
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
|
||||
|
||||
/**
|
||||
* ttm_base_object_noref_release - release a base object pointer looked up
|
||||
|
|
|
@ -807,9 +807,23 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
|||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_buffer_object *vbo;
|
||||
int cpp = DIV_ROUND_UP(args->bpp, 8);
|
||||
int ret;
|
||||
|
||||
args->pitch = args->width * ((args->bpp + 7) / 8);
|
||||
switch (cpp) {
|
||||
case 1: /* DRM_FORMAT_C8 */
|
||||
case 2: /* DRM_FORMAT_RGB565 */
|
||||
case 4: /* DRM_FORMAT_XRGB8888 */
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Dumb buffers don't allow anything else.
|
||||
* This is tested via IGT's dumb_buffers
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
args->pitch = args->width * cpp;
|
||||
args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
|
||||
|
||||
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright 2014-2022 VMware, Inc., Palo Alto, CA., USA
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
|
@ -28,6 +28,8 @@
|
|||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
|
||||
|
||||
/**
|
||||
|
@ -59,7 +61,7 @@ struct vmw_cmdbuf_res {
|
|||
* @resources and @list are protected by the cmdbuf mutex for now.
|
||||
*/
|
||||
struct vmw_cmdbuf_res_manager {
|
||||
struct vmwgfx_open_hash resources;
|
||||
DECLARE_HASHTABLE(resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
|
||||
struct list_head list;
|
||||
struct vmw_private *dev_priv;
|
||||
};
|
||||
|
@ -82,14 +84,13 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
|
|||
u32 user_key)
|
||||
{
|
||||
struct vmwgfx_hash_item *hash;
|
||||
int ret;
|
||||
unsigned long key = user_key | (res_type << 24);
|
||||
|
||||
ret = vmwgfx_ht_find_item(&man->resources, key, &hash);
|
||||
if (unlikely(ret != 0))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
|
||||
hash_for_each_possible_rcu(man->resources, hash, head, key) {
|
||||
if (hash->key == key)
|
||||
return hlist_entry(hash, struct vmw_cmdbuf_res, hash)->res;
|
||||
}
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -105,7 +106,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
|
|||
struct vmw_cmdbuf_res *entry)
|
||||
{
|
||||
list_del(&entry->head);
|
||||
WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash));
|
||||
hash_del_rcu(&entry->hash.head);
|
||||
vmw_resource_unreference(&entry->res);
|
||||
kfree(entry);
|
||||
}
|
||||
|
@ -159,7 +160,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
|
|||
void vmw_cmdbuf_res_revert(struct list_head *list)
|
||||
{
|
||||
struct vmw_cmdbuf_res *entry, *next;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, head) {
|
||||
switch (entry->state) {
|
||||
|
@ -167,8 +167,8 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
|
|||
vmw_cmdbuf_res_free(entry->man, entry);
|
||||
break;
|
||||
case VMW_CMDBUF_RES_DEL:
|
||||
ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash);
|
||||
BUG_ON(ret);
|
||||
hash_add_rcu(entry->man->resources, &entry->hash.head,
|
||||
entry->hash.key);
|
||||
list_move_tail(&entry->head, &entry->man->list);
|
||||
entry->state = VMW_CMDBUF_RES_COMMITTED;
|
||||
break;
|
||||
|
@ -199,26 +199,20 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
|||
struct list_head *list)
|
||||
{
|
||||
struct vmw_cmdbuf_res *cres;
|
||||
int ret;
|
||||
|
||||
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
|
||||
if (unlikely(!cres))
|
||||
return -ENOMEM;
|
||||
|
||||
cres->hash.key = user_key | (res_type << 24);
|
||||
ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash);
|
||||
if (unlikely(ret != 0)) {
|
||||
kfree(cres);
|
||||
goto out_invalid_key;
|
||||
}
|
||||
hash_add_rcu(man->resources, &cres->hash.head, cres->hash.key);
|
||||
|
||||
cres->state = VMW_CMDBUF_RES_ADD;
|
||||
cres->res = vmw_resource_reference(res);
|
||||
cres->man = man;
|
||||
list_add_tail(&cres->head, list);
|
||||
|
||||
out_invalid_key:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -243,24 +237,26 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
|
|||
struct list_head *list,
|
||||
struct vmw_resource **res_p)
|
||||
{
|
||||
struct vmw_cmdbuf_res *entry;
|
||||
struct vmw_cmdbuf_res *entry = NULL;
|
||||
struct vmwgfx_hash_item *hash;
|
||||
int ret;
|
||||
unsigned long key = user_key | (res_type << 24);
|
||||
|
||||
ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24),
|
||||
&hash);
|
||||
if (likely(ret != 0))
|
||||
hash_for_each_possible_rcu(man->resources, hash, head, key) {
|
||||
if (hash->key == key) {
|
||||
entry = hlist_entry(hash, struct vmw_cmdbuf_res, hash);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (unlikely(!entry))
|
||||
return -EINVAL;
|
||||
|
||||
entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
|
||||
|
||||
switch (entry->state) {
|
||||
case VMW_CMDBUF_RES_ADD:
|
||||
vmw_cmdbuf_res_free(man, entry);
|
||||
*res_p = NULL;
|
||||
break;
|
||||
case VMW_CMDBUF_RES_COMMITTED:
|
||||
(void) vmwgfx_ht_remove_item(&man->resources, &entry->hash);
|
||||
hash_del_rcu(&entry->hash.head);
|
||||
list_del(&entry->head);
|
||||
entry->state = VMW_CMDBUF_RES_DEL;
|
||||
list_add_tail(&entry->head, list);
|
||||
|
@ -287,7 +283,6 @@ struct vmw_cmdbuf_res_manager *
|
|||
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_cmdbuf_res_manager *man;
|
||||
int ret;
|
||||
|
||||
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||
if (!man)
|
||||
|
@ -295,12 +290,8 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
|
|||
|
||||
man->dev_priv = dev_priv;
|
||||
INIT_LIST_HEAD(&man->list);
|
||||
ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
|
||||
if (ret == 0)
|
||||
return man;
|
||||
|
||||
kfree(man);
|
||||
return ERR_PTR(ret);
|
||||
hash_init(man->resources);
|
||||
return man;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -320,7 +311,6 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
|
|||
list_for_each_entry_safe(entry, next, &man->list, head)
|
||||
vmw_cmdbuf_res_free(man, entry);
|
||||
|
||||
vmwgfx_ht_remove(&man->resources);
|
||||
kfree(man);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_mksstat.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include "vmwgfx_so.h"
|
||||
|
||||
|
@ -72,12 +73,24 @@ struct vmw_cotable_info {
|
|||
bool);
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Getting the initial size right is difficult because it all depends
|
||||
* on what the userspace is doing. The sizes will be aligned up to
|
||||
* a PAGE_SIZE so we just want to make sure that for majority of apps
|
||||
* the initial number of entries doesn't require an immediate resize.
|
||||
* For all cotables except SVGACOTableDXElementLayoutEntry and
|
||||
* SVGACOTableDXBlendStateEntry the initial number of entries fits
|
||||
* within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and
|
||||
* SVGACOTableDXBlendStateEntry we want to reserve two pages,
|
||||
* because that's what all apps will require initially.
|
||||
*/
|
||||
static const struct vmw_cotable_info co_info[] = {
|
||||
{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
|
||||
{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
|
||||
{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
|
||||
{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
|
||||
{PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
|
||||
{PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
|
||||
|
@ -395,9 +408,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
|||
int ret;
|
||||
size_t i;
|
||||
|
||||
MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
|
||||
MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
|
||||
|
||||
ret = vmw_cotable_readback(res);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_done;
|
||||
|
||||
cur_size_read_back = vcotbl->size_read_back;
|
||||
vcotbl->size_read_back = old_size_read_back;
|
||||
|
@ -411,7 +427,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
|||
true, true, vmw_bo_bo_free, &buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed initializing new cotable MOB.\n");
|
||||
return ret;
|
||||
goto out_done;
|
||||
}
|
||||
|
||||
bo = &buf->base;
|
||||
|
@ -485,6 +501,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
|||
/* Release the pin acquired in vmw_bo_init */
|
||||
ttm_bo_unpin(bo);
|
||||
|
||||
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
|
||||
|
||||
return 0;
|
||||
|
||||
out_map_new:
|
||||
|
@ -494,6 +512,9 @@ out_wait:
|
|||
ttm_bo_unreserve(bo);
|
||||
vmw_bo_unreference(&buf);
|
||||
|
||||
out_done:
|
||||
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,13 +25,17 @@
|
|||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include "vmwgfx_devcaps.h"
|
||||
#include "vmwgfx_mksstat.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
#include "ttm_object.h"
|
||||
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_ttm_helper.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_module.h>
|
||||
|
@ -41,17 +45,14 @@
|
|||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
#include "ttm_object.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
#include "vmwgfx_devcaps.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_mksstat.h"
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
|
||||
|
||||
#define VMW_MIN_INITIAL_WIDTH 800
|
||||
#define VMW_MIN_INITIAL_HEIGHT 600
|
||||
|
||||
/*
|
||||
* Fully encoded drm commands. Might move to vmw_drm.h
|
||||
*/
|
||||
|
@ -262,7 +263,6 @@ static const struct pci_device_id vmw_pci_id_list[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
|
||||
|
||||
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
|
||||
static int vmw_restrict_iommu;
|
||||
static int vmw_force_coherent;
|
||||
static int vmw_restrict_dma_mask;
|
||||
|
@ -272,8 +272,6 @@ static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
|
|||
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
||||
void *ptr);
|
||||
|
||||
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
||||
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
||||
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
|
||||
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
|
||||
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
|
||||
|
@ -623,8 +621,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
|||
width = vmw_read(dev_priv, SVGA_REG_WIDTH);
|
||||
height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
|
||||
|
||||
width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
|
||||
height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
|
||||
width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH);
|
||||
height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT);
|
||||
|
||||
if (width > dev_priv->fb_max_width ||
|
||||
height > dev_priv->fb_max_height) {
|
||||
|
@ -633,8 +631,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
|||
* This is a host error and shouldn't occur.
|
||||
*/
|
||||
|
||||
width = VMW_MIN_INITIAL_WIDTH;
|
||||
height = VMW_MIN_INITIAL_HEIGHT;
|
||||
width = VMWGFX_MIN_INITIAL_WIDTH;
|
||||
height = VMWGFX_MIN_INITIAL_HEIGHT;
|
||||
}
|
||||
|
||||
dev_priv->initial_width = width;
|
||||
|
@ -806,6 +804,43 @@ static int vmw_detect_version(struct vmw_private *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_write_driver_id(struct vmw_private *dev)
|
||||
{
|
||||
if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) {
|
||||
vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
|
||||
SVGA_REG_GUEST_DRIVER_ID_LINUX);
|
||||
|
||||
vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1,
|
||||
LINUX_VERSION_MAJOR << 24 |
|
||||
LINUX_VERSION_PATCHLEVEL << 16 |
|
||||
LINUX_VERSION_SUBLEVEL);
|
||||
vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2,
|
||||
VMWGFX_DRIVER_MAJOR << 24 |
|
||||
VMWGFX_DRIVER_MINOR << 16 |
|
||||
VMWGFX_DRIVER_PATCHLEVEL);
|
||||
vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0);
|
||||
|
||||
vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
|
||||
SVGA_REG_GUEST_DRIVER_ID_SUBMIT);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmw_sw_context_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_sw_context *sw_context = &dev_priv->ctx;
|
||||
|
||||
hash_init(sw_context->res_ht);
|
||||
}
|
||||
|
||||
static void vmw_sw_context_fini(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_sw_context *sw_context = &dev_priv->ctx;
|
||||
|
||||
vfree(sw_context->cmd_bounce);
|
||||
if (sw_context->staged_bindings)
|
||||
vmw_binding_state_free(sw_context->staged_bindings);
|
||||
}
|
||||
|
||||
static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
|
||||
{
|
||||
int ret;
|
||||
|
@ -815,6 +850,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
|
|||
|
||||
dev_priv->drm.dev_private = dev_priv;
|
||||
|
||||
vmw_sw_context_init(dev_priv);
|
||||
|
||||
mutex_init(&dev_priv->cmdbuf_mutex);
|
||||
mutex_init(&dev_priv->binding_mutex);
|
||||
spin_lock_init(&dev_priv->resource_lock);
|
||||
|
@ -844,9 +881,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
|
|||
|
||||
dev_priv->assume_16bpp = !!vmw_assume_16bpp;
|
||||
|
||||
dev_priv->enable_fb = enable_fbdev;
|
||||
|
||||
|
||||
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
|
||||
vmw_print_bitmap(&dev_priv->drm, "Capabilities",
|
||||
dev_priv->capabilities,
|
||||
|
@ -970,7 +1004,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
|
|||
goto out_err0;
|
||||
}
|
||||
|
||||
dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
|
||||
dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops);
|
||||
|
||||
if (unlikely(dev_priv->tdev == NULL)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
|
@ -1091,12 +1125,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
|
|||
vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
|
||||
VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
|
||||
VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
|
||||
|
||||
if (dev_priv->enable_fb) {
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
vmw_svga_enable(dev_priv);
|
||||
vmw_fb_init(dev_priv);
|
||||
}
|
||||
vmw_write_driver_id(dev_priv);
|
||||
|
||||
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
|
||||
register_pm_notifier(&dev_priv->pm_nb);
|
||||
|
@ -1143,15 +1172,10 @@ static void vmw_driver_unload(struct drm_device *dev)
|
|||
|
||||
unregister_pm_notifier(&dev_priv->pm_nb);
|
||||
|
||||
if (dev_priv->ctx.res_ht_initialized)
|
||||
vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
|
||||
vfree(dev_priv->ctx.cmd_bounce);
|
||||
if (dev_priv->enable_fb) {
|
||||
vmw_fb_off(dev_priv);
|
||||
vmw_fb_close(dev_priv);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
vmw_svga_disable(dev_priv);
|
||||
}
|
||||
vmw_sw_context_fini(dev_priv);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
|
||||
vmw_svga_disable(dev_priv);
|
||||
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
|
@ -1173,8 +1197,6 @@ static void vmw_driver_unload(struct drm_device *dev)
|
|||
vmw_irq_uninstall(&dev_priv->drm);
|
||||
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
if (dev_priv->ctx.staged_bindings)
|
||||
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
||||
|
||||
for (i = vmw_res_context; i < vmw_res_max; ++i)
|
||||
idr_destroy(&dev_priv->res_idr[i]);
|
||||
|
@ -1203,7 +1225,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
|||
if (unlikely(!vmw_fp))
|
||||
return ret;
|
||||
|
||||
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
|
||||
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev);
|
||||
if (unlikely(vmw_fp->tfile == NULL))
|
||||
goto out_no_tfile;
|
||||
|
||||
|
@ -1291,8 +1313,6 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
|
||||
vmw_kms_legacy_hotspot_clear(dev_priv);
|
||||
if (!dev_priv->enable_fb)
|
||||
vmw_svga_disable(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1485,25 +1505,19 @@ static int vmw_pm_freeze(struct device *kdev)
|
|||
DRM_ERROR("Failed to freeze modesetting.\n");
|
||||
return ret;
|
||||
}
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_off(dev_priv);
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
vmw_resource_evict_all(dev_priv);
|
||||
vmw_release_device_early(dev_priv);
|
||||
while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
|
||||
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
WARN_ON(vmw_request_device_late(dev_priv));
|
||||
dev_priv->suspend_locked = false;
|
||||
if (dev_priv->suspend_state)
|
||||
vmw_kms_resume(dev);
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1523,24 +1537,19 @@ static int vmw_pm_restore(struct device *kdev)
|
|||
|
||||
vmw_detect_version(dev_priv);
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
__vmw_svga_enable(dev_priv);
|
||||
__vmw_svga_enable(dev_priv);
|
||||
|
||||
vmw_fence_fifo_up(dev_priv->fman);
|
||||
dev_priv->suspend_locked = false;
|
||||
if (dev_priv->suspend_state)
|
||||
vmw_kms_resume(&dev_priv->drm);
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1631,6 +1640,10 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (ret)
|
||||
goto out_unload;
|
||||
|
||||
vmw_fifo_resource_inc(vmw);
|
||||
vmw_svga_enable(vmw);
|
||||
drm_fbdev_generic_setup(&vmw->drm, 0);
|
||||
|
||||
vmw_debugfs_gem_init(vmw);
|
||||
vmw_debugfs_resource_managers_init(vmw);
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/sync_file.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#include <drm/drm_auth.h>
|
||||
#include <drm/drm_device.h>
|
||||
|
@ -42,7 +43,6 @@
|
|||
#include "ttm_object.h"
|
||||
|
||||
#include "vmwgfx_fence.h"
|
||||
#include "vmwgfx_hashtab.h"
|
||||
#include "vmwgfx_reg.h"
|
||||
#include "vmwgfx_validation.h"
|
||||
|
||||
|
@ -62,6 +62,9 @@
|
|||
#define VMWGFX_MAX_DISPLAYS 16
|
||||
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
|
||||
|
||||
#define VMWGFX_MIN_INITIAL_WIDTH 1280
|
||||
#define VMWGFX_MIN_INITIAL_HEIGHT 800
|
||||
|
||||
#define VMWGFX_PCI_ID_SVGA2 0x0405
|
||||
#define VMWGFX_PCI_ID_SVGA3 0x0406
|
||||
|
||||
|
@ -93,6 +96,7 @@
|
|||
#define VMW_RES_STREAM ttm_driver_type2
|
||||
#define VMW_RES_FENCE ttm_driver_type3
|
||||
#define VMW_RES_SHADER ttm_driver_type4
|
||||
#define VMW_RES_HT_ORDER 12
|
||||
|
||||
#define MKSSTAT_CAPACITY_LOG2 5U
|
||||
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
|
||||
|
@ -102,6 +106,11 @@ struct vmw_fpriv {
|
|||
bool gb_aware; /* user-space is guest-backed aware */
|
||||
};
|
||||
|
||||
struct vmwgfx_hash_item {
|
||||
struct hlist_node head;
|
||||
unsigned long key;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
|
||||
* @base: The TTM buffer object
|
||||
|
@ -425,8 +434,7 @@ struct vmw_ctx_validation_info;
|
|||
* @ctx: The validation context
|
||||
*/
|
||||
struct vmw_sw_context{
|
||||
struct vmwgfx_open_hash res_ht;
|
||||
bool res_ht_initialized;
|
||||
DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER);
|
||||
bool kernel;
|
||||
struct vmw_fpriv *fp;
|
||||
struct drm_file *filp;
|
||||
|
@ -546,7 +554,6 @@ struct vmw_private {
|
|||
* Framebuffer info.
|
||||
*/
|
||||
|
||||
void *fb_info;
|
||||
enum vmw_display_unit_type active_display_unit;
|
||||
struct vmw_legacy_display *ldu_priv;
|
||||
struct vmw_overlay *overlay_priv;
|
||||
|
@ -605,8 +612,6 @@ struct vmw_private {
|
|||
struct mutex cmdbuf_mutex;
|
||||
struct mutex binding_mutex;
|
||||
|
||||
bool enable_fb;
|
||||
|
||||
/**
|
||||
* PM management.
|
||||
*/
|
||||
|
@ -1184,35 +1189,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
|
|||
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count);
|
||||
|
||||
|
||||
/**
|
||||
* Kernel framebuffer - vmwgfx_fb.c
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_close(struct vmw_private *dev_priv);
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv);
|
||||
#else
|
||||
static inline int vmw_fb_init(struct vmw_private *vmw_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int vmw_fb_close(struct vmw_private *dev_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int vmw_fb_off(struct vmw_private *vmw_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int vmw_fb_on(struct vmw_private *vmw_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Kernel modesetting - vmwgfx_kms.c
|
||||
*/
|
||||
|
@ -1232,9 +1208,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
|||
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
uint32_t pitch,
|
||||
uint32_t height);
|
||||
u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
|
||||
int vmw_enable_vblank(struct drm_crtc *crtc);
|
||||
void vmw_disable_vblank(struct drm_crtc *crtc);
|
||||
int vmw_kms_present(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
|
@ -25,6 +25,7 @@
|
|||
*
|
||||
**************************************************************************/
|
||||
#include <linux/sync_file.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_reg.h"
|
||||
|
@ -34,7 +35,6 @@
|
|||
#include "vmwgfx_binding.h"
|
||||
#include "vmwgfx_mksstat.h"
|
||||
|
||||
#define VMW_RES_HT_ORDER 12
|
||||
|
||||
/*
|
||||
* Helper macro to get dx_ctx_node if available otherwise print an error
|
||||
|
@ -3869,7 +3869,6 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
|
|||
* @fence: Pointer to the fenc object.
|
||||
* @fence_handle: User-space fence handle.
|
||||
* @out_fence_fd: exported file descriptor for the fence. -1 if not used
|
||||
* @sync_file: Only used to clean up in case of an error in this function.
|
||||
*
|
||||
* This function copies fence information to user-space. If copying fails, the
|
||||
* user-space struct drm_vmw_fence_rep::error member is hopefully left
|
||||
|
@ -4101,7 +4100,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||
int ret;
|
||||
int32_t out_fence_fd = -1;
|
||||
struct sync_file *sync_file = NULL;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
|
||||
DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
|
||||
|
||||
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
|
||||
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
|
@ -4164,14 +4163,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||
if (sw_context->staged_bindings)
|
||||
vmw_binding_state_reset(sw_context->staged_bindings);
|
||||
|
||||
if (!sw_context->res_ht_initialized) {
|
||||
ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unlock;
|
||||
|
||||
sw_context->res_ht_initialized = true;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
|
||||
sw_context->ctx = &val_ctx;
|
||||
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
|
||||
|
|
|
@ -1,831 +0,0 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2007 David Airlie
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <linux/fb.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
#define VMW_DIRTY_DELAY (HZ / 30)
|
||||
|
||||
struct vmw_fb_par {
|
||||
struct vmw_private *vmw_priv;
|
||||
|
||||
void *vmalloc;
|
||||
|
||||
struct mutex bo_mutex;
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
unsigned bo_size;
|
||||
struct drm_framebuffer *set_fb;
|
||||
struct drm_display_mode *set_mode;
|
||||
u32 fb_x;
|
||||
u32 fb_y;
|
||||
bool bo_iowrite;
|
||||
|
||||
u32 pseudo_palette[17];
|
||||
|
||||
unsigned max_width;
|
||||
unsigned max_height;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
bool active;
|
||||
unsigned x1;
|
||||
unsigned y1;
|
||||
unsigned x2;
|
||||
unsigned y2;
|
||||
} dirty;
|
||||
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_connector *con;
|
||||
struct delayed_work local_work;
|
||||
};
|
||||
|
||||
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
|
||||
unsigned blue, unsigned transp,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
u32 *pal = par->pseudo_palette;
|
||||
|
||||
if (regno > 15) {
|
||||
DRM_ERROR("Bad regno %u.\n", regno);
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (par->set_fb->format->depth) {
|
||||
case 24:
|
||||
case 32:
|
||||
pal[regno] = ((red & 0xff00) << 8) |
|
||||
(green & 0xff00) |
|
||||
((blue & 0xff00) >> 8);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad depth %u, bpp %u.\n",
|
||||
par->set_fb->format->depth,
|
||||
par->set_fb->format->cpp[0] * 8);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_check_var(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
int depth = var->bits_per_pixel;
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
|
||||
switch (var->bits_per_pixel) {
|
||||
case 32:
|
||||
depth = (var->transp.length > 0) ? 32 : 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (depth) {
|
||||
case 24:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 0;
|
||||
var->transp.offset = 0;
|
||||
break;
|
||||
case 32:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 8;
|
||||
var->transp.offset = 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad depth %u.\n", depth);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((var->xoffset + var->xres) > par->max_width ||
|
||||
(var->yoffset + var->yres) > par->max_height) {
|
||||
DRM_ERROR("Requested geom can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vmw_kms_validate_mode_vram(vmw_priv,
|
||||
var->xres * var->bits_per_pixel/8,
|
||||
var->yoffset + var->yres)) {
|
||||
DRM_ERROR("Requested geom can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_blank(int blank, struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
|
||||
*
|
||||
* @work: The struct work_struct associated with this task.
|
||||
*
|
||||
* This function flushes the dirty regions of the vmalloc framebuffer to the
|
||||
* kms framebuffer, and if the kms framebuffer is visible, also updated the
|
||||
* corresponding displays. Note that this function runs even if the kms
|
||||
* framebuffer is not bound to a crtc and thus not visible, but it's turned
|
||||
* off during hibernation using the par->dirty.active bool.
|
||||
*/
|
||||
static void vmw_fb_dirty_flush(struct work_struct *work)
|
||||
{
|
||||
struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
|
||||
local_work.work);
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
struct fb_info *info = vmw_priv->fb_info;
|
||||
unsigned long irq_flags;
|
||||
s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
|
||||
u32 cpp, max_x, max_y;
|
||||
struct drm_clip_rect clip;
|
||||
struct drm_framebuffer *cur_fb;
|
||||
u8 *src_ptr, *dst_ptr;
|
||||
struct vmw_buffer_object *vbo = par->vmw_bo;
|
||||
void *virtual;
|
||||
|
||||
if (!READ_ONCE(par->dirty.active))
|
||||
return;
|
||||
|
||||
mutex_lock(&par->bo_mutex);
|
||||
cur_fb = par->set_fb;
|
||||
if (!cur_fb)
|
||||
goto out_unlock;
|
||||
|
||||
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
|
||||
virtual = vmw_bo_map_and_cache(vbo);
|
||||
if (!virtual)
|
||||
goto out_unreserve;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, irq_flags);
|
||||
if (!par->dirty.active) {
|
||||
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
|
||||
goto out_unreserve;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle panning when copying from vmalloc to framebuffer.
|
||||
* Clip dirty area to framebuffer.
|
||||
*/
|
||||
cpp = cur_fb->format->cpp[0];
|
||||
max_x = par->fb_x + cur_fb->width;
|
||||
max_y = par->fb_y + cur_fb->height;
|
||||
|
||||
dst_x1 = par->dirty.x1 - par->fb_x;
|
||||
dst_y1 = par->dirty.y1 - par->fb_y;
|
||||
dst_x1 = max_t(s32, dst_x1, 0);
|
||||
dst_y1 = max_t(s32, dst_y1, 0);
|
||||
|
||||
dst_x2 = par->dirty.x2 - par->fb_x;
|
||||
dst_y2 = par->dirty.y2 - par->fb_y;
|
||||
dst_x2 = min_t(s32, dst_x2, max_x);
|
||||
dst_y2 = min_t(s32, dst_y2, max_y);
|
||||
w = dst_x2 - dst_x1;
|
||||
h = dst_y2 - dst_y1;
|
||||
w = max_t(s32, 0, w);
|
||||
h = max_t(s32, 0, h);
|
||||
|
||||
par->dirty.x1 = par->dirty.x2 = 0;
|
||||
par->dirty.y1 = par->dirty.y2 = 0;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
|
||||
|
||||
if (w && h) {
|
||||
dst_ptr = (u8 *)virtual +
|
||||
(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
|
||||
src_ptr = (u8 *)par->vmalloc +
|
||||
((dst_y1 + par->fb_y) * info->fix.line_length +
|
||||
(dst_x1 + par->fb_x) * cpp);
|
||||
|
||||
while (h-- > 0) {
|
||||
memcpy(dst_ptr, src_ptr, w*cpp);
|
||||
dst_ptr += par->set_fb->pitches[0];
|
||||
src_ptr += info->fix.line_length;
|
||||
}
|
||||
|
||||
clip.x1 = dst_x1;
|
||||
clip.x2 = dst_x2;
|
||||
clip.y1 = dst_y1;
|
||||
clip.y2 = dst_y2;
|
||||
}
|
||||
|
||||
out_unreserve:
|
||||
ttm_bo_unreserve(&vbo->base);
|
||||
if (w && h) {
|
||||
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
|
||||
&clip, 1));
|
||||
vmw_cmd_flush(vmw_priv, false);
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
}
|
||||
|
||||
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
|
||||
unsigned x1, unsigned y1,
|
||||
unsigned width, unsigned height)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned x2 = x1 + width;
|
||||
unsigned y2 = y1 + height;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
if (par->dirty.x1 == par->dirty.x2) {
|
||||
par->dirty.x1 = x1;
|
||||
par->dirty.y1 = y1;
|
||||
par->dirty.x2 = x2;
|
||||
par->dirty.y2 = y2;
|
||||
/* if we are active start the dirty work
|
||||
* we share the work with the defio system */
|
||||
if (par->dirty.active)
|
||||
schedule_delayed_work(&par->local_work,
|
||||
VMW_DIRTY_DELAY);
|
||||
} else {
|
||||
if (x1 < par->dirty.x1)
|
||||
par->dirty.x1 = x1;
|
||||
if (y1 < par->dirty.y1)
|
||||
par->dirty.y1 = y1;
|
||||
if (x2 > par->dirty.x2)
|
||||
par->dirty.x2 = x2;
|
||||
if (y2 > par->dirty.y2)
|
||||
par->dirty.y2 = y2;
|
||||
}
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
}
|
||||
|
||||
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
|
||||
if ((var->xoffset + var->xres) > var->xres_virtual ||
|
||||
(var->yoffset + var->yres) > var->yres_virtual) {
|
||||
DRM_ERROR("Requested panning can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&par->bo_mutex);
|
||||
par->fb_x = var->xoffset;
|
||||
par->fb_y = var->yoffset;
|
||||
if (par->set_fb)
|
||||
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
|
||||
par->set_fb->height);
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
unsigned long start, end, min, max;
|
||||
unsigned long flags;
|
||||
struct fb_deferred_io_pageref *pageref;
|
||||
int y1, y2;
|
||||
|
||||
min = ULONG_MAX;
|
||||
max = 0;
|
||||
list_for_each_entry(pageref, pagereflist, list) {
|
||||
start = pageref->offset;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
min = min(min, start);
|
||||
max = max(max, end);
|
||||
}
|
||||
|
||||
if (min < max) {
|
||||
y1 = min / info->fix.line_length;
|
||||
y2 = (max / info->fix.line_length) + 1;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.x1 = 0;
|
||||
par->dirty.y1 = y1;
|
||||
par->dirty.x2 = info->var.xres;
|
||||
par->dirty.y2 = y2;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
/*
|
||||
* Since we've already waited on this work once, try to
|
||||
* execute asap.
|
||||
*/
|
||||
cancel_delayed_work(&par->local_work);
|
||||
schedule_delayed_work(&par->local_work, 0);
|
||||
}
|
||||
};
|
||||
|
||||
static struct fb_deferred_io vmw_defio = {
|
||||
.delay = VMW_DIRTY_DELAY,
|
||||
.deferred_io = vmw_deferred_io,
|
||||
};
|
||||
|
||||
/*
|
||||
* Draw code
|
||||
*/
|
||||
|
||||
static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
cfb_fillrect(info, rect);
|
||||
vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
|
||||
rect->width, rect->height);
|
||||
}
|
||||
|
||||
static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
cfb_copyarea(info, region);
|
||||
vmw_fb_dirty_mark(info->par, region->dx, region->dy,
|
||||
region->width, region->height);
|
||||
}
|
||||
|
||||
static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
cfb_imageblit(info, image);
|
||||
vmw_fb_dirty_mark(info->par, image->dx, image->dy,
|
||||
image->width, image->height);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring up code
|
||||
*/
|
||||
|
||||
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
|
||||
size_t size, struct vmw_buffer_object **out)
|
||||
{
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
int ret;
|
||||
|
||||
ret = vmw_bo_create(vmw_priv, size,
|
||||
&vmw_sys_placement,
|
||||
false, false,
|
||||
&vmw_bo_bo_free, &vmw_bo);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
*out = vmw_bo;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
|
||||
int *depth)
|
||||
{
|
||||
switch (var->bits_per_pixel) {
|
||||
case 32:
|
||||
*depth = (var->transp.length > 0) ? 32 : 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmwgfx_set_config_internal(struct drm_mode_set *set)
|
||||
{
|
||||
struct drm_crtc *crtc = set->crtc;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
int ret;
|
||||
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
|
||||
restart:
|
||||
ret = crtc->funcs->set_config(set, &ctx);
|
||||
|
||||
if (ret == -EDEADLK) {
|
||||
drm_modeset_backoff(&ctx);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_fb_kms_detach(struct vmw_fb_par *par,
|
||||
bool detach_bo,
|
||||
bool unref_bo)
|
||||
{
|
||||
struct drm_framebuffer *cur_fb = par->set_fb;
|
||||
int ret;
|
||||
|
||||
/* Detach the KMS framebuffer from crtcs */
|
||||
if (par->set_mode) {
|
||||
struct drm_mode_set set;
|
||||
|
||||
set.crtc = par->crtc;
|
||||
set.x = 0;
|
||||
set.y = 0;
|
||||
set.mode = NULL;
|
||||
set.fb = NULL;
|
||||
set.num_connectors = 0;
|
||||
set.connectors = &par->con;
|
||||
ret = vmwgfx_set_config_internal(&set);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not unset a mode.\n");
|
||||
return ret;
|
||||
}
|
||||
drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
|
||||
par->set_mode = NULL;
|
||||
}
|
||||
|
||||
if (cur_fb) {
|
||||
drm_framebuffer_put(cur_fb);
|
||||
par->set_fb = NULL;
|
||||
}
|
||||
|
||||
if (par->vmw_bo && detach_bo && unref_bo)
|
||||
vmw_bo_unreference(&par->vmw_bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_kms_framebuffer(struct fb_info *info)
|
||||
{
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {0};
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct fb_var_screeninfo *var = &info->var;
|
||||
struct drm_framebuffer *cur_fb;
|
||||
struct vmw_framebuffer *vfb;
|
||||
int ret = 0, depth;
|
||||
size_t new_bo_size;
|
||||
|
||||
ret = vmw_fb_compute_depth(var, &depth);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mode_cmd.width = var->xres;
|
||||
mode_cmd.height = var->yres;
|
||||
mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
|
||||
mode_cmd.pixel_format =
|
||||
drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
|
||||
|
||||
cur_fb = par->set_fb;
|
||||
if (cur_fb && cur_fb->width == mode_cmd.width &&
|
||||
cur_fb->height == mode_cmd.height &&
|
||||
cur_fb->format->format == mode_cmd.pixel_format &&
|
||||
cur_fb->pitches[0] == mode_cmd.pitches[0])
|
||||
return 0;
|
||||
|
||||
/* Need new buffer object ? */
|
||||
new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
|
||||
ret = vmw_fb_kms_detach(par,
|
||||
par->bo_size < new_bo_size ||
|
||||
par->bo_size > 2*new_bo_size,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!par->vmw_bo) {
|
||||
ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
|
||||
&par->vmw_bo);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed creating a buffer object for "
|
||||
"fbdev.\n");
|
||||
return ret;
|
||||
}
|
||||
par->bo_size = new_bo_size;
|
||||
}
|
||||
|
||||
vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
|
||||
true, &mode_cmd);
|
||||
if (IS_ERR(vfb))
|
||||
return PTR_ERR(vfb);
|
||||
|
||||
par->set_fb = &vfb->base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_set_par(struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
struct drm_mode_set set;
|
||||
struct fb_var_screeninfo *var = &info->var;
|
||||
struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
|
||||
DRM_MODE_TYPE_DRIVER,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
|
||||
};
|
||||
struct drm_display_mode *mode;
|
||||
int ret;
|
||||
|
||||
mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
|
||||
if (!mode) {
|
||||
DRM_ERROR("Could not create new fb mode.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mode->hdisplay = var->xres;
|
||||
mode->vdisplay = var->yres;
|
||||
vmw_guess_mode_timing(mode);
|
||||
|
||||
if (!vmw_kms_validate_mode_vram(vmw_priv,
|
||||
mode->hdisplay *
|
||||
DIV_ROUND_UP(var->bits_per_pixel, 8),
|
||||
mode->vdisplay)) {
|
||||
drm_mode_destroy(&vmw_priv->drm, mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&par->bo_mutex);
|
||||
ret = vmw_fb_kms_framebuffer(info);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
par->fb_x = var->xoffset;
|
||||
par->fb_y = var->yoffset;
|
||||
|
||||
set.crtc = par->crtc;
|
||||
set.x = 0;
|
||||
set.y = 0;
|
||||
set.mode = mode;
|
||||
set.fb = par->set_fb;
|
||||
set.num_connectors = 1;
|
||||
set.connectors = &par->con;
|
||||
|
||||
ret = vmwgfx_set_config_internal(&set);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
|
||||
par->set_fb->width, par->set_fb->height);
|
||||
|
||||
/* If there already was stuff dirty we wont
|
||||
* schedule a new work, so lets do it now */
|
||||
|
||||
schedule_delayed_work(&par->local_work, 0);
|
||||
|
||||
out_unlock:
|
||||
if (par->set_mode)
|
||||
drm_mode_destroy(&vmw_priv->drm, par->set_mode);
|
||||
par->set_mode = mode;
|
||||
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static const struct fb_ops vmw_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = vmw_fb_check_var,
|
||||
.fb_set_par = vmw_fb_set_par,
|
||||
.fb_setcolreg = vmw_fb_setcolreg,
|
||||
.fb_fillrect = vmw_fb_fillrect,
|
||||
.fb_copyarea = vmw_fb_copyarea,
|
||||
.fb_imageblit = vmw_fb_imageblit,
|
||||
.fb_pan_display = vmw_fb_pan_display,
|
||||
.fb_blank = vmw_fb_blank,
|
||||
.fb_mmap = fb_deferred_io_mmap,
|
||||
};
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct device *device = vmw_priv->drm.dev;
|
||||
struct vmw_fb_par *par;
|
||||
struct fb_info *info;
|
||||
unsigned fb_width, fb_height;
|
||||
unsigned int fb_bpp, fb_pitch, fb_size;
|
||||
struct drm_display_mode *init_mode;
|
||||
int ret;
|
||||
|
||||
fb_bpp = 32;
|
||||
|
||||
/* XXX As shouldn't these be as well. */
|
||||
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
|
||||
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
|
||||
|
||||
fb_pitch = fb_width * fb_bpp / 8;
|
||||
fb_size = fb_pitch * fb_height;
|
||||
|
||||
info = framebuffer_alloc(sizeof(*par), device);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Par
|
||||
*/
|
||||
vmw_priv->fb_info = info;
|
||||
par = info->par;
|
||||
memset(par, 0, sizeof(*par));
|
||||
INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
|
||||
par->vmw_priv = vmw_priv;
|
||||
par->vmalloc = NULL;
|
||||
par->max_width = fb_width;
|
||||
par->max_height = fb_height;
|
||||
|
||||
ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
|
||||
par->max_height, &par->con,
|
||||
&par->crtc, &init_mode);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
|
||||
info->var.xres = init_mode->hdisplay;
|
||||
info->var.yres = init_mode->vdisplay;
|
||||
|
||||
/*
|
||||
* Create buffers and alloc memory
|
||||
*/
|
||||
par->vmalloc = vzalloc(fb_size);
|
||||
if (unlikely(par->vmalloc == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixed and var
|
||||
*/
|
||||
strcpy(info->fix.id, "svgadrmfb");
|
||||
info->fix.type = FB_TYPE_PACKED_PIXELS;
|
||||
info->fix.visual = FB_VISUAL_TRUECOLOR;
|
||||
info->fix.type_aux = 0;
|
||||
info->fix.xpanstep = 1; /* doing it in hw */
|
||||
info->fix.ypanstep = 1; /* doing it in hw */
|
||||
info->fix.ywrapstep = 0;
|
||||
info->fix.accel = FB_ACCEL_NONE;
|
||||
info->fix.line_length = fb_pitch;
|
||||
|
||||
info->fix.smem_start = 0;
|
||||
info->fix.smem_len = fb_size;
|
||||
|
||||
info->pseudo_palette = par->pseudo_palette;
|
||||
info->screen_base = (char __iomem *)par->vmalloc;
|
||||
info->screen_size = fb_size;
|
||||
|
||||
info->fbops = &vmw_fb_ops;
|
||||
|
||||
/* 24 depth per default */
|
||||
info->var.red.offset = 16;
|
||||
info->var.green.offset = 8;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 8;
|
||||
info->var.green.length = 8;
|
||||
info->var.blue.length = 8;
|
||||
info->var.transp.offset = 0;
|
||||
info->var.transp.length = 0;
|
||||
|
||||
info->var.xres_virtual = fb_width;
|
||||
info->var.yres_virtual = fb_height;
|
||||
info->var.bits_per_pixel = fb_bpp;
|
||||
info->var.xoffset = 0;
|
||||
info->var.yoffset = 0;
|
||||
info->var.activate = FB_ACTIVATE_NOW;
|
||||
info->var.height = -1;
|
||||
info->var.width = -1;
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
info->apertures = alloc_apertures(1);
|
||||
if (!info->apertures) {
|
||||
ret = -ENOMEM;
|
||||
goto err_aper;
|
||||
}
|
||||
info->apertures->ranges[0].base = vmw_priv->vram_start;
|
||||
info->apertures->ranges[0].size = vmw_priv->vram_size;
|
||||
|
||||
/*
|
||||
* Dirty & Deferred IO
|
||||
*/
|
||||
par->dirty.x1 = par->dirty.x2 = 0;
|
||||
par->dirty.y1 = par->dirty.y2 = 0;
|
||||
par->dirty.active = true;
|
||||
spin_lock_init(&par->dirty.lock);
|
||||
mutex_init(&par->bo_mutex);
|
||||
info->fbdefio = &vmw_defio;
|
||||
fb_deferred_io_init(info);
|
||||
|
||||
ret = register_framebuffer(info);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_defio;
|
||||
|
||||
vmw_fb_set_par(info);
|
||||
|
||||
return 0;
|
||||
|
||||
err_defio:
|
||||
fb_deferred_io_cleanup(info);
|
||||
err_aper:
|
||||
err_free:
|
||||
vfree(par->vmalloc);
|
||||
err_kms:
|
||||
framebuffer_release(info);
|
||||
vmw_priv->fb_info = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fb_close(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return 0;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
/* ??? order */
|
||||
fb_deferred_io_cleanup(info);
|
||||
cancel_delayed_work_sync(&par->local_work);
|
||||
unregister_framebuffer(info);
|
||||
|
||||
mutex_lock(&par->bo_mutex);
|
||||
(void) vmw_fb_kms_detach(par, true, true);
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
|
||||
vfree(par->vmalloc);
|
||||
framebuffer_release(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
unsigned long flags;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return -EINVAL;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = false;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
flush_delayed_work(&info->deferred_work);
|
||||
flush_delayed_work(&par->local_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
unsigned long flags;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return -EINVAL;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = true;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
/*
|
||||
* Need to reschedule a dirty update, because otherwise that's
|
||||
* only done in dirty_mark() if the previous coalesced
|
||||
* dirty region was empty.
|
||||
*/
|
||||
schedule_delayed_work(&par->local_work, 0);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,199 +0,0 @@
|
|||
/*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Simple open hash tab implementation.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "vmwgfx_hashtab.h"
|
||||
|
||||
int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order)
|
||||
{
|
||||
unsigned int size = 1 << order;
|
||||
|
||||
ht->order = order;
|
||||
ht->table = NULL;
|
||||
if (size <= PAGE_SIZE / sizeof(*ht->table))
|
||||
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
|
||||
else
|
||||
ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
|
||||
if (!ht->table) {
|
||||
DRM_ERROR("Out of memory for hash table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key)
|
||||
{
|
||||
struct vmwgfx_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
unsigned int hashed_key;
|
||||
int count = 0;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry(entry, h_list, head)
|
||||
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
|
||||
}
|
||||
|
||||
static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key)
|
||||
{
|
||||
struct vmwgfx_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
unsigned int hashed_key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return &entry->head;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key)
|
||||
{
|
||||
struct vmwgfx_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
unsigned int hashed_key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry_rcu(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return &entry->head;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
|
||||
{
|
||||
struct vmwgfx_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
struct hlist_node *parent;
|
||||
unsigned int hashed_key;
|
||||
unsigned long key = item->key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
parent = NULL;
|
||||
hlist_for_each_entry(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return -EINVAL;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
parent = &entry->head;
|
||||
}
|
||||
if (parent)
|
||||
hlist_add_behind_rcu(&item->head, parent);
|
||||
else
|
||||
hlist_add_head_rcu(&item->head, h_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Just insert an item and return any "bits" bit key that hasn't been
|
||||
* used before.
|
||||
*/
|
||||
int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
|
||||
unsigned long seed, int bits, int shift,
|
||||
unsigned long add)
|
||||
{
|
||||
int ret;
|
||||
unsigned long mask = (1UL << bits) - 1;
|
||||
unsigned long first, unshifted_key;
|
||||
|
||||
unshifted_key = hash_long(seed, bits);
|
||||
first = unshifted_key;
|
||||
do {
|
||||
item->key = (unshifted_key << shift) + add;
|
||||
ret = vmwgfx_ht_insert_item(ht, item);
|
||||
if (ret)
|
||||
unshifted_key = (unshifted_key + 1) & mask;
|
||||
} while (ret && (unshifted_key != first));
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Available key bit space exhausted\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
|
||||
struct vmwgfx_hash_item **item)
|
||||
{
|
||||
struct hlist_node *list;
|
||||
|
||||
list = vmwgfx_ht_find_key_rcu(ht, key);
|
||||
if (!list)
|
||||
return -EINVAL;
|
||||
|
||||
*item = hlist_entry(list, struct vmwgfx_hash_item, head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key)
|
||||
{
|
||||
struct hlist_node *list;
|
||||
|
||||
list = vmwgfx_ht_find_key(ht, key);
|
||||
if (list) {
|
||||
hlist_del_init_rcu(list);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
|
||||
{
|
||||
hlist_del_init_rcu(&item->head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht)
|
||||
{
|
||||
if (ht->table) {
|
||||
kvfree(ht->table);
|
||||
ht->table = NULL;
|
||||
}
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Simple open hash tab implementation.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* TODO: Replace this hashtable with Linux' generic implementation
|
||||
* from <linux/hashtable.h>.
|
||||
*/
|
||||
|
||||
#ifndef VMWGFX_HASHTAB_H
|
||||
#define VMWGFX_HASHTAB_H
|
||||
|
||||
#include <linux/list.h>
|
||||
|
||||
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
|
||||
|
||||
struct vmwgfx_hash_item {
|
||||
struct hlist_node head;
|
||||
unsigned long key;
|
||||
};
|
||||
|
||||
struct vmwgfx_open_hash {
|
||||
struct hlist_head *table;
|
||||
u8 order;
|
||||
};
|
||||
|
||||
int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order);
|
||||
int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
|
||||
int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
|
||||
unsigned long seed, int bits, int shift,
|
||||
unsigned long add);
|
||||
int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
|
||||
struct vmwgfx_hash_item **item);
|
||||
|
||||
void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key);
|
||||
int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key);
|
||||
int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
|
||||
void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht);
|
||||
|
||||
/*
|
||||
* RCU-safe interface
|
||||
*
|
||||
* The user of this API needs to make sure that two or more instances of the
|
||||
* hash table manipulation functions are never run simultaneously.
|
||||
* The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously
|
||||
* with any of the manipulation functions as long as it's called from within
|
||||
* an RCU read-locked section.
|
||||
*/
|
||||
#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item
|
||||
#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please
|
||||
#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key
|
||||
#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item
|
||||
#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item
|
||||
|
||||
#endif
|
|
@ -31,7 +31,6 @@
|
|||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_rect.h>
|
||||
#include <drm/drm_sysfs.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
|
@ -52,9 +51,9 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
|
|||
* Display Unit Cursor functions
|
||||
*/
|
||||
|
||||
static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
|
||||
static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_bo_kmap_obj *map,
|
||||
struct vmw_plane_state *vps,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
|
||||
|
@ -63,23 +62,23 @@ struct vmw_svga_fifo_cmd_define_cursor {
|
|||
SVGAFifoCmdDefineAlphaCursor cursor;
|
||||
};
|
||||
|
||||
static void vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *cm_bo,
|
||||
struct ttm_bo_kmap_obj *cm_map,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY)
|
||||
/**
|
||||
* vmw_send_define_cursor_cmd - queue a define cursor command
|
||||
* @dev_priv: the private driver struct
|
||||
* @image: buffer which holds the cursor image
|
||||
* @width: width of the mouse cursor image
|
||||
* @height: height of the mouse cursor image
|
||||
* @hotspotX: the horizontal position of mouse hotspot
|
||||
* @hotspotY: the vertical position of mouse hotspot
|
||||
*/
|
||||
static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY)
|
||||
{
|
||||
struct vmw_svga_fifo_cmd_define_cursor *cmd;
|
||||
const u32 image_size = width * height * sizeof(*image);
|
||||
const u32 cmd_size = sizeof(*cmd) + image_size;
|
||||
|
||||
if (cm_bo != NULL) {
|
||||
vmw_cursor_update_mob(dev_priv, cm_bo, cm_map, image,
|
||||
width, height,
|
||||
hotspotX, hotspotY);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Try to reserve fifocmd space and swallow any failures;
|
||||
such reservations cannot be left unconsumed for long
|
||||
under the risk of clogging other fifocmd users, so
|
||||
|
@ -87,7 +86,7 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv,
|
|||
other fallible KMS-atomic resources at prepare_fb */
|
||||
cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
|
||||
|
||||
if (unlikely(cmd == NULL))
|
||||
if (unlikely(!cmd))
|
||||
return;
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
|
@ -104,12 +103,40 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv,
|
|||
vmw_cmd_commit_flush(dev_priv, cmd_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cursor_update_image - update the cursor image on the provided plane
|
||||
* @dev_priv: the private driver struct
|
||||
* @vps: the plane state of the cursor plane
|
||||
* @image: buffer which holds the cursor image
|
||||
* @width: width of the mouse cursor image
|
||||
* @height: height of the mouse cursor image
|
||||
* @hotspotX: the horizontal position of mouse hotspot
|
||||
* @hotspotY: the vertical position of mouse hotspot
|
||||
*/
|
||||
static void vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
struct vmw_plane_state *vps,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY)
|
||||
{
|
||||
if (vps->cursor.bo)
|
||||
vmw_cursor_update_mob(dev_priv, vps, image,
|
||||
vps->base.crtc_w, vps->base.crtc_h,
|
||||
hotspotX, hotspotY);
|
||||
|
||||
else
|
||||
vmw_send_define_cursor_cmd(dev_priv, image, width, height,
|
||||
hotspotX, hotspotY);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
|
||||
*
|
||||
* Called from inside vmw_du_cursor_plane_atomic_update to actually
|
||||
* make the cursor-image live.
|
||||
*
|
||||
* @dev_priv: device to work with
|
||||
* @bo: BO for the MOB
|
||||
* @map: kmap obj for the BO
|
||||
* @vps: the plane state of the cursor plane
|
||||
* @image: cursor source data to fill the MOB with
|
||||
* @width: source data width
|
||||
* @height: source data height
|
||||
|
@ -117,8 +144,7 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv,
|
|||
* @hotspotY: cursor hotspot Y
|
||||
*/
|
||||
static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_bo_kmap_obj *map,
|
||||
struct vmw_plane_state *vps,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY)
|
||||
{
|
||||
|
@ -127,11 +153,11 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
|
|||
const u32 image_size = width * height * sizeof(*image);
|
||||
bool dummy;
|
||||
|
||||
BUG_ON(!image);
|
||||
|
||||
header = (SVGAGBCursorHeader *)ttm_kmap_obj_virtual(map, &dummy);
|
||||
header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
|
||||
alpha_header = &header->header.alphaHeader;
|
||||
|
||||
memset(header, 0, sizeof(*header));
|
||||
|
||||
header->type = SVGA_ALPHA_CURSOR;
|
||||
header->sizeInBytes = image_size;
|
||||
|
||||
|
@ -141,102 +167,161 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
|
|||
alpha_header->height = height;
|
||||
|
||||
memcpy(header + 1, image, image_size);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, bo->resource->start);
|
||||
vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
|
||||
vps->cursor.bo->resource->start);
|
||||
}
|
||||
|
||||
void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mob); i++) {
|
||||
if (vcp->cursor_mob[i] != NULL) {
|
||||
ttm_bo_unpin(vcp->cursor_mob[i]);
|
||||
ttm_bo_put(vcp->cursor_mob[i]);
|
||||
kfree(vcp->cursor_mob[i]);
|
||||
vcp->cursor_mob[i] = NULL;
|
||||
static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
|
||||
{
|
||||
return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_du_cursor_plane_acquire_image -- Acquire the image data
|
||||
* @vps: cursor plane state
|
||||
*/
|
||||
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
|
||||
{
|
||||
bool dummy;
|
||||
if (vps->surf) {
|
||||
if (vps->surf_mapped)
|
||||
return vmw_bo_map_and_cache(vps->surf->res.backup);
|
||||
return vps->surf->snooper.image;
|
||||
} else if (vps->bo)
|
||||
return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
|
||||
struct vmw_plane_state *new_vps)
|
||||
{
|
||||
void *old_image;
|
||||
void *new_image;
|
||||
u32 size;
|
||||
bool changed;
|
||||
|
||||
if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
|
||||
old_vps->base.crtc_h != new_vps->base.crtc_h)
|
||||
return true;
|
||||
|
||||
if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
|
||||
old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
|
||||
return true;
|
||||
|
||||
size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
|
||||
|
||||
old_image = vmw_du_cursor_plane_acquire_image(old_vps);
|
||||
new_image = vmw_du_cursor_plane_acquire_image(new_vps);
|
||||
|
||||
changed = false;
|
||||
if (old_image && new_image)
|
||||
changed = memcmp(old_image, new_image, size) != 0;
|
||||
|
||||
return changed;
|
||||
}
|
||||
|
||||
static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
|
||||
{
|
||||
if (!(*bo))
|
||||
return;
|
||||
|
||||
ttm_bo_unpin(*bo);
|
||||
ttm_bo_put(*bo);
|
||||
kfree(*bo);
|
||||
*bo = NULL;
|
||||
}
|
||||
|
||||
static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
|
||||
struct vmw_plane_state *vps)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (!vps->cursor.bo)
|
||||
return;
|
||||
|
||||
vmw_du_cursor_plane_unmap_cm(vps);
|
||||
|
||||
/* Look for a free slot to return this mob to the cache. */
|
||||
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
|
||||
if (!vcp->cursor_mobs[i]) {
|
||||
vcp->cursor_mobs[i] = vps->cursor.bo;
|
||||
vps->cursor.bo = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cache is full: See if this mob is bigger than an existing mob. */
|
||||
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
|
||||
if (vcp->cursor_mobs[i]->base.size <
|
||||
vps->cursor.bo->base.size) {
|
||||
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
|
||||
vcp->cursor_mobs[i] = vps->cursor.bo;
|
||||
vps->cursor.bo = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Destroy it if it's not worth caching. */
|
||||
vmw_du_destroy_cursor_mob(&vps->cursor.bo);
|
||||
}
|
||||
|
||||
#define CURSOR_MOB_SIZE(dimension) \
|
||||
((dimension) * (dimension) * sizeof(u32) + sizeof(SVGAGBCursorHeader))
|
||||
|
||||
int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *cursor)
|
||||
static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
|
||||
struct vmw_plane_state *vps)
|
||||
{
|
||||
struct vmw_private *dev_priv = cursor->base.dev->dev_private;
|
||||
uint32_t cursor_max_dim, mob_max_size;
|
||||
int ret = 0;
|
||||
size_t i;
|
||||
struct vmw_private *dev_priv = vcp->base.dev->dev_private;
|
||||
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
|
||||
u32 i;
|
||||
u32 cursor_max_dim, mob_max_size;
|
||||
int ret;
|
||||
|
||||
if (!dev_priv->has_mob || (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
|
||||
return -ENOSYS;
|
||||
if (!dev_priv->has_mob ||
|
||||
(dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
|
||||
cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
|
||||
|
||||
if (CURSOR_MOB_SIZE(cursor_max_dim) > mob_max_size)
|
||||
cursor_max_dim = 64; /* Mandatorily-supported cursor dimension */
|
||||
if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
|
||||
vps->base.crtc_h > cursor_max_dim)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cursor->cursor_mob); i++) {
|
||||
struct ttm_buffer_object **const bo = &cursor->cursor_mob[i];
|
||||
|
||||
ret = vmw_bo_create_kernel(dev_priv,
|
||||
CURSOR_MOB_SIZE(cursor_max_dim),
|
||||
&vmw_mob_placement, bo);
|
||||
|
||||
if (ret != 0)
|
||||
goto teardown;
|
||||
|
||||
if ((*bo)->resource->mem_type != VMW_PL_MOB) {
|
||||
DRM_ERROR("Obtained buffer object is not a MOB.\n");
|
||||
ret = -ENOSYS;
|
||||
goto teardown;
|
||||
}
|
||||
|
||||
/* Fence the mob creation so we are guarateed to have the mob */
|
||||
ret = ttm_bo_reserve(*bo, false, false, NULL);
|
||||
|
||||
if (ret != 0)
|
||||
goto teardown;
|
||||
|
||||
vmw_bo_fence_single(*bo, NULL);
|
||||
|
||||
ttm_bo_unreserve(*bo);
|
||||
|
||||
drm_info(&dev_priv->drm, "Using CursorMob mobid %lu, max dimension %u\n",
|
||||
(*bo)->resource->start, cursor_max_dim);
|
||||
if (vps->cursor.bo) {
|
||||
if (vps->cursor.bo->base.size >= size)
|
||||
return 0;
|
||||
vmw_du_put_cursor_mob(vcp, vps);
|
||||
}
|
||||
|
||||
/* Look for an unused mob in the cache. */
|
||||
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
|
||||
if (vcp->cursor_mobs[i] &&
|
||||
vcp->cursor_mobs[i]->base.size >= size) {
|
||||
vps->cursor.bo = vcp->cursor_mobs[i];
|
||||
vcp->cursor_mobs[i] = NULL;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* Create a new mob if we can't find an existing one. */
|
||||
ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
|
||||
&vps->cursor.bo);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
/* Fence the mob creation so we are guarateed to have the mob */
|
||||
ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
|
||||
if (ret != 0)
|
||||
goto teardown;
|
||||
|
||||
vmw_bo_fence_single(vps->cursor.bo, NULL);
|
||||
ttm_bo_unreserve(vps->cursor.bo);
|
||||
return 0;
|
||||
|
||||
teardown:
|
||||
vmw_du_destroy_cursor_mob_array(cursor);
|
||||
|
||||
vmw_du_destroy_cursor_mob(&vps->cursor.bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#undef CURSOR_MOB_SIZE
|
||||
|
||||
static void vmw_cursor_update_bo(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *cm_bo,
|
||||
struct ttm_bo_kmap_obj *cm_map,
|
||||
struct vmw_buffer_object *bo,
|
||||
u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY)
|
||||
{
|
||||
void *virtual;
|
||||
bool dummy;
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&bo->map, &dummy);
|
||||
if (virtual) {
|
||||
vmw_cursor_update_image(dev_priv, cm_bo, cm_map, virtual,
|
||||
width, height,
|
||||
hotspotX, hotspotY);
|
||||
atomic_dec(&bo->base_mapped_count);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y)
|
||||
|
@ -287,7 +372,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
|||
|
||||
cmd = container_of(header, struct vmw_dma_cmd, header);
|
||||
|
||||
/* No snooper installed */
|
||||
/* No snooper installed, nothing to copy */
|
||||
if (!srf->snooper.image)
|
||||
return;
|
||||
|
||||
|
@ -387,15 +472,16 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
|
|||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
du = vmw_crtc_to_du(crtc);
|
||||
if (!du->cursor_surface ||
|
||||
du->cursor_age == du->cursor_surface->snooper.age)
|
||||
du->cursor_age == du->cursor_surface->snooper.age ||
|
||||
!du->cursor_surface->snooper.image)
|
||||
continue;
|
||||
|
||||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
vmw_cursor_update_image(dev_priv, NULL, NULL,
|
||||
du->cursor_surface->snooper.image,
|
||||
64, 64,
|
||||
du->hotspot_x + du->core_hotspot_x,
|
||||
du->hotspot_y + du->core_hotspot_y);
|
||||
vmw_send_define_cursor_cmd(dev_priv,
|
||||
du->cursor_surface->snooper.image,
|
||||
64, 64,
|
||||
du->hotspot_x + du->core_hotspot_x,
|
||||
du->hotspot_y + du->core_hotspot_y);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
@ -404,8 +490,14 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
|
|||
|
||||
void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
|
||||
u32 i;
|
||||
|
||||
vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
|
||||
vmw_du_destroy_cursor_mob_array(vmw_plane_to_vcp(plane));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
|
||||
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
|
||||
|
||||
drm_plane_cleanup(plane);
|
||||
}
|
||||
|
||||
|
@ -462,6 +554,87 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
|
||||
*
|
||||
* @vps: plane_state
|
||||
*
|
||||
* Returns 0 on success
|
||||
*/
|
||||
|
||||
static int
|
||||
vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
|
||||
{
|
||||
int ret;
|
||||
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
|
||||
struct ttm_buffer_object *bo = vps->cursor.bo;
|
||||
|
||||
if (!bo)
|
||||
return -EINVAL;
|
||||
|
||||
if (bo->base.size < size)
|
||||
return -EINVAL;
|
||||
|
||||
if (vps->cursor.mapped)
|
||||
return 0;
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, false, NULL);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
|
||||
|
||||
/*
|
||||
* We just want to try to get mob bind to finish
|
||||
* so that the first write to SVGA_REG_CURSOR_MOBID
|
||||
* is done with a buffer that the device has already
|
||||
* seen
|
||||
*/
|
||||
(void) ttm_bo_wait(bo, false, false);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return -ENOMEM;
|
||||
|
||||
vps->cursor.mapped = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
|
||||
*
|
||||
* @vps: state of the cursor plane
|
||||
*
|
||||
* Returns 0 on success
|
||||
*/
|
||||
|
||||
static int
|
||||
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ttm_buffer_object *bo = vps->cursor.bo;
|
||||
|
||||
if (!vps->cursor.mapped)
|
||||
return 0;
|
||||
|
||||
if (!bo)
|
||||
return 0;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, false, NULL);
|
||||
if (likely(ret == 0)) {
|
||||
ttm_bo_kunmap(&vps->cursor.map);
|
||||
ttm_bo_unreserve(bo);
|
||||
vps->cursor.mapped = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
|
||||
*
|
||||
|
@ -476,10 +649,16 @@ void
|
|||
vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
|
||||
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
|
||||
bool dummy;
|
||||
|
||||
if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
|
||||
if (vps->surf_mapped) {
|
||||
vmw_bo_unmap(vps->surf->res.backup);
|
||||
vps->surf_mapped = false;
|
||||
}
|
||||
|
||||
if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
|
||||
const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
|
@ -489,14 +668,8 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
|
|||
}
|
||||
}
|
||||
|
||||
if (vps->cm_bo != NULL && ttm_kmap_obj_virtual(&vps->cm_map, &dummy) != NULL) {
|
||||
const int ret = ttm_bo_reserve(vps->cm_bo, true, false, NULL);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
ttm_bo_kunmap(&vps->cm_map);
|
||||
ttm_bo_unreserve(vps->cm_bo);
|
||||
}
|
||||
}
|
||||
vmw_du_cursor_plane_unmap_cm(vps);
|
||||
vmw_du_put_cursor_mob(vcp, vps);
|
||||
|
||||
vmw_du_plane_unpin_surf(vps, false);
|
||||
|
||||
|
@ -511,6 +684,7 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
|
||||
*
|
||||
|
@ -526,8 +700,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
|
|||
struct drm_framebuffer *fb = new_state->fb;
|
||||
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
|
||||
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
|
||||
struct ttm_buffer_object *cm_bo = NULL;
|
||||
bool dummy;
|
||||
int ret = 0;
|
||||
|
||||
if (vps->surf) {
|
||||
|
@ -550,13 +722,14 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
|
|||
}
|
||||
}
|
||||
|
||||
vps->cm_bo = NULL;
|
||||
|
||||
if (vps->surf == NULL && vps->bo != NULL) {
|
||||
if (!vps->surf && vps->bo) {
|
||||
const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
|
||||
|
||||
/* Not using vmw_bo_map_and_cache() helper here as we need to reserve
|
||||
the ttm_buffer_object first which wmw_bo_map_and_cache() omits. */
|
||||
/*
|
||||
* Not using vmw_bo_map_and_cache() helper here as we need to
|
||||
* reserve the ttm_buffer_object first which
|
||||
* vmw_bo_map_and_cache() omits.
|
||||
*/
|
||||
ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -571,69 +744,24 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
|
|||
|
||||
if (unlikely(ret != 0))
|
||||
return -ENOMEM;
|
||||
} else if (vps->surf && !vps->bo && vps->surf->res.backup) {
|
||||
|
||||
WARN_ON(vps->surf->snooper.image);
|
||||
ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
|
||||
NULL);
|
||||
if (unlikely(ret != 0))
|
||||
return -ENOMEM;
|
||||
vmw_bo_map_and_cache(vps->surf->res.backup);
|
||||
ttm_bo_unreserve(&vps->surf->res.backup->base);
|
||||
vps->surf_mapped = true;
|
||||
}
|
||||
|
||||
if (vps->surf || vps->bo) {
|
||||
unsigned cursor_mob_idx = vps->cursor_mob_idx;
|
||||
|
||||
/* Lazily set up cursor MOBs just once -- no reattempts. */
|
||||
if (cursor_mob_idx == 0 && vcp->cursor_mob[0] == NULL)
|
||||
if (vmw_du_create_cursor_mob_array(vcp) != 0)
|
||||
vps->cursor_mob_idx = cursor_mob_idx = -1U;
|
||||
|
||||
if (cursor_mob_idx < ARRAY_SIZE(vcp->cursor_mob)) {
|
||||
const u32 size = sizeof(SVGAGBCursorHeader) +
|
||||
new_state->crtc_w * new_state->crtc_h * sizeof(u32);
|
||||
|
||||
cm_bo = vcp->cursor_mob[cursor_mob_idx];
|
||||
|
||||
if (cm_bo->resource->num_pages * PAGE_SIZE < size) {
|
||||
ret = -EINVAL;
|
||||
goto error_bo_unmap;
|
||||
}
|
||||
|
||||
ret = ttm_bo_reserve(cm_bo, false, false, NULL);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto error_bo_unmap;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(cm_bo, 0, PFN_UP(size), &vps->cm_map);
|
||||
|
||||
/*
|
||||
* We just want to try to get mob bind to finish
|
||||
* so that the first write to SVGA_REG_CURSOR_MOBID
|
||||
* is done with a buffer that the device has already
|
||||
* seen
|
||||
*/
|
||||
(void) ttm_bo_wait(cm_bo, false, false);
|
||||
|
||||
ttm_bo_unreserve(cm_bo);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto error_bo_unmap;
|
||||
}
|
||||
|
||||
vps->cursor_mob_idx = cursor_mob_idx ^ 1;
|
||||
vps->cm_bo = cm_bo;
|
||||
}
|
||||
vmw_du_get_cursor_mob(vcp, vps);
|
||||
vmw_du_cursor_plane_map_cm(vps);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_bo_unmap:
|
||||
if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
|
||||
const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
|
||||
if (likely(ret == 0)) {
|
||||
atomic_dec(&vps->bo->base_mapped_count);
|
||||
ttm_bo_kunmap(&vps->bo->map);
|
||||
ttm_bo_unreserve(&vps->bo->base);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
@ -649,7 +777,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
|||
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
|
||||
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
|
||||
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
|
||||
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
|
||||
s32 hotspot_x, hotspot_y;
|
||||
bool dummy;
|
||||
|
||||
hotspot_x = du->hotspot_x;
|
||||
hotspot_y = du->hotspot_y;
|
||||
|
@ -662,25 +792,40 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
|||
du->cursor_surface = vps->surf;
|
||||
du->cursor_bo = vps->bo;
|
||||
|
||||
if (vps->surf) {
|
||||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
|
||||
vmw_cursor_update_image(dev_priv, vps->cm_bo, &vps->cm_map,
|
||||
vps->surf->snooper.image,
|
||||
new_state->crtc_w,
|
||||
new_state->crtc_h,
|
||||
hotspot_x, hotspot_y);
|
||||
} else if (vps->bo) {
|
||||
vmw_cursor_update_bo(dev_priv, vps->cm_bo, &vps->cm_map,
|
||||
vps->bo,
|
||||
new_state->crtc_w,
|
||||
new_state->crtc_h,
|
||||
hotspot_x, hotspot_y);
|
||||
} else {
|
||||
if (!vps->surf && !vps->bo) {
|
||||
vmw_cursor_update_position(dev_priv, false, 0, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
vps->cursor.hotspot_x = hotspot_x;
|
||||
vps->cursor.hotspot_y = hotspot_y;
|
||||
|
||||
if (vps->surf) {
|
||||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
}
|
||||
|
||||
if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
|
||||
/*
|
||||
* If it hasn't changed, avoid making the device do extra
|
||||
* work by keeping the old cursor active.
|
||||
*/
|
||||
struct vmw_cursor_plane_state tmp = old_vps->cursor;
|
||||
old_vps->cursor = vps->cursor;
|
||||
vps->cursor = tmp;
|
||||
} else {
|
||||
void *image = vmw_du_cursor_plane_acquire_image(vps);
|
||||
if (image)
|
||||
vmw_cursor_update_image(dev_priv, vps, image,
|
||||
new_state->crtc_w,
|
||||
new_state->crtc_h,
|
||||
hotspot_x, hotspot_y);
|
||||
}
|
||||
|
||||
if (vps->bo) {
|
||||
if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
|
||||
atomic_dec(&vps->bo->base_mapped_count);
|
||||
}
|
||||
|
||||
du->cursor_x = new_state->crtc_x + du->set_gui_x;
|
||||
du->cursor_y = new_state->crtc_y + du->set_gui_y;
|
||||
|
||||
|
@ -778,12 +923,16 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vmw_framebuffer_to_vfb(fb)->bo)
|
||||
if (!vmw_framebuffer_to_vfb(fb)->bo) {
|
||||
surface = vmw_framebuffer_to_vfbs(fb)->surface;
|
||||
|
||||
if (surface && !surface->snooper.image) {
|
||||
DRM_ERROR("surface not suitable for cursor\n");
|
||||
return -EINVAL;
|
||||
WARN_ON(!surface);
|
||||
|
||||
if (!surface ||
|
||||
(!surface->snooper.image && !surface->res.backup)) {
|
||||
DRM_ERROR("surface not suitable for cursor\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -831,15 +980,6 @@ void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
|
|||
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_pending_vblank_event *event = crtc->state->event;
|
||||
|
||||
if (event) {
|
||||
crtc->state->event = NULL;
|
||||
|
||||
spin_lock_irq(&crtc->dev->event_lock);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
spin_unlock_irq(&crtc->dev->event_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -943,6 +1083,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
|
|||
vps->pinned = 0;
|
||||
vps->cpp = 0;
|
||||
|
||||
memset(&vps->cursor, 0, sizeof(vps->cursor));
|
||||
|
||||
/* Each ref counted resource needs to be acquired again */
|
||||
if (vps->surf)
|
||||
(void) vmw_surface_reference(vps->surf);
|
||||
|
@ -997,7 +1139,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
|
|||
{
|
||||
struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
|
||||
|
||||
|
||||
/* Should have been freed by cleanup_fb */
|
||||
if (vps->surf)
|
||||
vmw_surface_unreference(&vps->surf);
|
||||
|
@ -2052,6 +2193,8 @@ int vmw_kms_init(struct vmw_private *dev_priv)
|
|||
dev->mode_config.min_height = 1;
|
||||
dev->mode_config.max_width = dev_priv->texture_max_width;
|
||||
dev->mode_config.max_height = dev_priv->texture_max_height;
|
||||
dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
|
||||
dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob;
|
||||
|
||||
drm_mode_create_suggested_offset_properties(dev);
|
||||
vmw_kms_create_hotplug_mode_update_property(dev_priv);
|
||||
|
@ -2093,7 +2236,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
|
||||
|
||||
|
@ -2155,30 +2297,6 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
|||
dev_priv->max_primary_mem : dev_priv->vram_size);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Function called by DRM code called with vbl_lock held.
|
||||
*/
|
||||
u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function called by DRM code called with vbl_lock held.
|
||||
*/
|
||||
int vmw_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function called by DRM code called with vbl_lock held.
|
||||
*/
|
||||
void vmw_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_du_update_layout - Update the display unit with topology from resolution
|
||||
* plugin and generate DRM uevent
|
||||
|
@ -2222,8 +2340,8 @@ retry:
|
|||
du->gui_x = rects[du->unit].x1;
|
||||
du->gui_y = rects[du->unit].y1;
|
||||
} else {
|
||||
du->pref_width = 800;
|
||||
du->pref_height = 600;
|
||||
du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
|
||||
du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
|
||||
du->pref_active = false;
|
||||
du->gui_x = 0;
|
||||
du->gui_y = 0;
|
||||
|
@ -2250,13 +2368,13 @@ retry:
|
|||
}
|
||||
con->status = vmw_du_connector_detect(con, true);
|
||||
}
|
||||
|
||||
drm_sysfs_hotplug_event(dev);
|
||||
out_fini:
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
drm_sysfs_hotplug_event(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2536,10 +2654,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
|||
int ret, i;
|
||||
|
||||
if (!arg->num_outputs) {
|
||||
struct drm_rect def_rect = {0, 0, 800, 600};
|
||||
VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
|
||||
def_rect.x1, def_rect.y1,
|
||||
def_rect.x2, def_rect.y2);
|
||||
struct drm_rect def_rect = {0, 0,
|
||||
VMWGFX_MIN_INITIAL_WIDTH,
|
||||
VMWGFX_MIN_INITIAL_HEIGHT};
|
||||
vmw_du_update_layout(dev_priv, 1, &def_rect);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2834,68 +2951,6 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
||||
unsigned unit,
|
||||
u32 max_width,
|
||||
u32 max_height,
|
||||
struct drm_connector **p_con,
|
||||
struct drm_crtc **p_crtc,
|
||||
struct drm_display_mode **p_mode)
|
||||
{
|
||||
struct drm_connector *con;
|
||||
struct vmw_display_unit *du;
|
||||
struct drm_display_mode *mode;
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->drm.mode_config.mutex);
|
||||
list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
|
||||
head) {
|
||||
if (i == unit)
|
||||
break;
|
||||
|
||||
++i;
|
||||
}
|
||||
|
||||
if (&con->head == &dev_priv->drm.mode_config.connector_list) {
|
||||
DRM_ERROR("Could not find initial display unit.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (list_empty(&con->modes))
|
||||
(void) vmw_du_connector_fill_modes(con, max_width, max_height);
|
||||
|
||||
if (list_empty(&con->modes)) {
|
||||
DRM_ERROR("Could not find initial display mode.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
du = vmw_connector_to_du(con);
|
||||
*p_con = con;
|
||||
*p_crtc = &du->crtc;
|
||||
|
||||
list_for_each_entry(mode, &con->modes, head) {
|
||||
if (mode->type & DRM_MODE_TYPE_PREFERRED)
|
||||
break;
|
||||
}
|
||||
|
||||
if (&mode->head == &con->modes) {
|
||||
WARN_ONCE(true, "Could not find initial preferred mode.\n");
|
||||
*p_mode = list_first_entry(&con->modes,
|
||||
struct drm_display_mode,
|
||||
head);
|
||||
} else {
|
||||
*p_mode = mode;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dev_priv->drm.mode_config.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_create_implicit_placement_property - Set up the implicit placement
|
||||
* property.
|
||||
|
|
|
@ -272,6 +272,14 @@ struct vmw_crtc_state {
|
|||
struct drm_crtc_state base;
|
||||
};
|
||||
|
||||
struct vmw_cursor_plane_state {
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
bool mapped;
|
||||
s32 hotspot_x;
|
||||
s32 hotspot_y;
|
||||
};
|
||||
|
||||
/**
|
||||
* Derived class for plane state object
|
||||
*
|
||||
|
@ -295,13 +303,8 @@ struct vmw_plane_state {
|
|||
/* For CPU Blit */
|
||||
unsigned int cpp;
|
||||
|
||||
/* CursorMob flipping index; -1 if cursor mobs not used */
|
||||
unsigned int cursor_mob_idx;
|
||||
/* Currently-active CursorMob */
|
||||
struct ttm_buffer_object *cm_bo;
|
||||
/* CursorMob kmap_obj; expected valid at cursor_plane_atomic_update
|
||||
IFF currently-active CursorMob above is valid */
|
||||
struct ttm_bo_kmap_obj cm_map;
|
||||
bool surf_mapped;
|
||||
struct vmw_cursor_plane_state cursor;
|
||||
};
|
||||
|
||||
|
||||
|
@ -338,11 +341,12 @@ struct vmw_connector_state {
|
|||
* Derived class for cursor plane object
|
||||
*
|
||||
* @base DRM plane object
|
||||
* @cursor_mob array of two MOBs for CursorMob flipping
|
||||
* @cursor.cursor_mobs Cursor mobs available for re-use
|
||||
*/
|
||||
struct vmw_cursor_plane {
|
||||
struct drm_plane base;
|
||||
struct ttm_buffer_object *cursor_mob[2];
|
||||
|
||||
struct ttm_buffer_object *cursor_mobs[3];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -458,13 +462,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
|
|||
struct vmw_surface *surface,
|
||||
bool only_2d,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
||||
unsigned unit,
|
||||
u32 max_width,
|
||||
u32 max_height,
|
||||
struct drm_connector **p_con,
|
||||
struct drm_crtc **p_crtc,
|
||||
struct drm_display_mode **p_mode);
|
||||
void vmw_guess_mode_timing(struct drm_display_mode *mode);
|
||||
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
|
||||
void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
|
||||
|
@ -472,8 +469,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
|
|||
/* Universal Plane Helpers */
|
||||
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
|
||||
void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
|
||||
int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *vcp);
|
||||
void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp);
|
||||
|
||||
/* Atomic Helpers */
|
||||
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
|
@ -235,9 +234,6 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
|
|||
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
|
||||
.atomic_destroy_state = vmw_du_crtc_destroy_state,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.get_vblank_counter = vmw_get_vblank_counter,
|
||||
.enable_vblank = vmw_enable_vblank,
|
||||
.disable_vblank = vmw_disable_vblank,
|
||||
};
|
||||
|
||||
|
||||
|
@ -507,10 +503,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
|
|||
dev_priv->ldu_priv->last_num_active = 0;
|
||||
dev_priv->ldu_priv->fb = NULL;
|
||||
|
||||
ret = drm_vblank_init(dev, num_display_units);
|
||||
if (ret != 0)
|
||||
goto err_free;
|
||||
|
||||
vmw_kms_create_implicit_placement_property(dev_priv);
|
||||
|
||||
for (i = 0; i < num_display_units; ++i) {
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define _VMWGFX_MKSSTAT_H_
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <linux/kconfig.h>
|
||||
|
||||
/* Reservation marker for mksstat pid's */
|
||||
#define MKSSTAT_PID_RESERVED -1
|
||||
|
@ -41,6 +42,7 @@
|
|||
|
||||
typedef enum {
|
||||
MKSSTAT_KERN_EXECBUF, /* vmw_execbuf_ioctl */
|
||||
MKSSTAT_KERN_COTABLE_RESIZE,
|
||||
|
||||
MKSSTAT_KERN_COUNT /* Reserved entry; always last */
|
||||
} mksstat_kern_stats_t;
|
||||
|
|
|
@ -85,7 +85,14 @@ struct rpc_channel {
|
|||
u32 cookie_low;
|
||||
};
|
||||
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
|
||||
/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
|
||||
static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
|
||||
{
|
||||
{ "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
|
||||
{ "vmw_cotable_resize", "vmw_cotable_resize" },
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* vmw_open_channel
|
||||
|
@ -695,12 +702,6 @@ static inline void hypervisor_ppn_remove(PPN64 pfn)
|
|||
/* Header to the text description of mksGuestStat instance descriptor */
|
||||
#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
|
||||
|
||||
/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
|
||||
static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
|
||||
{
|
||||
{ "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
|
||||
};
|
||||
|
||||
/**
|
||||
* mksstat_init_record: Initializes an MKSGuestStatCounter-based record
|
||||
* for the respective mksGuestStat index.
|
||||
|
@ -786,6 +787,7 @@ static int mksstat_init_kern_id(struct page **ppage)
|
|||
/* Set up all kernel-internal counters and corresponding structures */
|
||||
pstrs_acc = pstrs;
|
||||
pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
|
||||
pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc);
|
||||
|
||||
/* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
|
||||
|
||||
|
@ -1014,8 +1016,6 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
struct vmw_private *const dev_priv = vmw_priv(dev);
|
||||
|
||||
struct page *page;
|
||||
MKSGuestStatInstanceDescriptor *pdesc;
|
||||
const size_t num_pages_stat = PFN_UP(arg->stat_len);
|
||||
const size_t num_pages_info = PFN_UP(arg->info_len);
|
||||
const size_t num_pages_strs = PFN_UP(arg->strs_len);
|
||||
|
@ -1023,10 +1023,13 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
|
|||
long nr_pinned_stat;
|
||||
long nr_pinned_info;
|
||||
long nr_pinned_strs;
|
||||
struct page *pages_stat[ARRAY_SIZE(pdesc->statPPNs)];
|
||||
struct page *pages_info[ARRAY_SIZE(pdesc->infoPPNs)];
|
||||
struct page *pages_strs[ARRAY_SIZE(pdesc->strsPPNs)];
|
||||
MKSGuestStatInstanceDescriptor *pdesc;
|
||||
struct page *page = NULL;
|
||||
struct page **pages_stat = NULL;
|
||||
struct page **pages_info = NULL;
|
||||
struct page **pages_strs = NULL;
|
||||
size_t i, slot;
|
||||
int ret_err = -ENOMEM;
|
||||
|
||||
arg->id = -1;
|
||||
|
||||
|
@ -1054,13 +1057,23 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
BUG_ON(dev_priv->mksstat_user_pages[slot]);
|
||||
|
||||
/* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
|
||||
pages_stat = (struct page **)kmalloc_array(
|
||||
ARRAY_SIZE(pdesc->statPPNs) +
|
||||
ARRAY_SIZE(pdesc->infoPPNs) +
|
||||
ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
|
||||
|
||||
if (!pages_stat)
|
||||
goto err_nomem;
|
||||
|
||||
pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs);
|
||||
pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs);
|
||||
|
||||
/* Allocate a page for the instance descriptor */
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
|
||||
if (!page) {
|
||||
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!page)
|
||||
goto err_nomem;
|
||||
|
||||
/* Set up the instance descriptor */
|
||||
pdesc = page_address(page);
|
||||
|
@ -1075,9 +1088,8 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
|
|||
ARRAY_SIZE(pdesc->description) - 1);
|
||||
|
||||
if (desc_len < 0) {
|
||||
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
|
||||
__free_page(page);
|
||||
return -EFAULT;
|
||||
ret_err = -EFAULT;
|
||||
goto err_nomem;
|
||||
}
|
||||
|
||||
reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
|
||||
|
@ -1118,6 +1130,7 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
|
||||
|
||||
kfree(pages_stat);
|
||||
return 0;
|
||||
|
||||
err_pin_strs:
|
||||
|
@ -1132,9 +1145,13 @@ err_pin_stat:
|
|||
if (nr_pinned_stat > 0)
|
||||
unpin_user_pages(pages_stat, nr_pinned_stat);
|
||||
|
||||
err_nomem:
|
||||
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
|
||||
__free_page(page);
|
||||
return -ENOMEM;
|
||||
if (page)
|
||||
__free_page(page);
|
||||
kfree(pages_stat);
|
||||
|
||||
return ret_err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_damage_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
|
@ -320,9 +319,6 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
|
|||
.atomic_destroy_state = vmw_du_crtc_destroy_state,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.get_vblank_counter = vmw_get_vblank_counter,
|
||||
.enable_vblank = vmw_enable_vblank,
|
||||
.disable_vblank = vmw_disable_vblank,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -730,7 +726,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
|
|||
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
|
||||
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
|
||||
struct drm_crtc *crtc = new_state->crtc;
|
||||
struct drm_pending_vblank_event *event = NULL;
|
||||
struct vmw_fence_obj *fence = NULL;
|
||||
int ret;
|
||||
|
||||
|
@ -754,24 +749,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
|
|||
return;
|
||||
}
|
||||
|
||||
/* For error case vblank event is send from vmw_du_crtc_atomic_flush */
|
||||
event = crtc->state->event;
|
||||
if (event && fence) {
|
||||
struct drm_file *file_priv = event->base.file_priv;
|
||||
|
||||
ret = vmw_event_fence_action_queue(file_priv,
|
||||
fence,
|
||||
&event->base,
|
||||
&event->event.vbl.tv_sec,
|
||||
&event->event.vbl.tv_usec,
|
||||
true);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Failed to queue event on fence.\n");
|
||||
else
|
||||
crtc->state->event = NULL;
|
||||
}
|
||||
|
||||
if (fence)
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
}
|
||||
|
@ -947,18 +924,12 @@ err_free:
|
|||
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
|
||||
vmw_sou_init(dev_priv, i);
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_damage_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
#include "vmw_surface_cache.h"
|
||||
|
@ -925,9 +924,6 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
|
|||
.atomic_destroy_state = vmw_du_crtc_destroy_state,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.get_vblank_counter = vmw_get_vblank_counter,
|
||||
.enable_vblank = vmw_enable_vblank,
|
||||
.disable_vblank = vmw_disable_vblank,
|
||||
};
|
||||
|
||||
|
||||
|
@ -1591,7 +1587,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
|
|||
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
|
||||
struct drm_crtc *crtc = new_state->crtc;
|
||||
struct vmw_screen_target_display_unit *stdu;
|
||||
struct drm_pending_vblank_event *event;
|
||||
struct vmw_fence_obj *fence = NULL;
|
||||
struct vmw_private *dev_priv;
|
||||
int ret;
|
||||
|
@ -1640,23 +1635,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
|
|||
return;
|
||||
}
|
||||
|
||||
/* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
|
||||
event = crtc->state->event;
|
||||
if (event && fence) {
|
||||
struct drm_file *file_priv = event->base.file_priv;
|
||||
|
||||
ret = vmw_event_fence_action_queue(file_priv,
|
||||
fence,
|
||||
&event->base,
|
||||
&event->event.vbl.tv_sec,
|
||||
&event->event.vbl.tv_usec,
|
||||
true);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to queue event on fence.\n");
|
||||
else
|
||||
crtc->state->event = NULL;
|
||||
}
|
||||
|
||||
if (fence)
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
}
|
||||
|
@ -1883,10 +1861,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
|
|||
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
|
||||
return -ENOSYS;
|
||||
|
||||
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
dev_priv->active_display_unit = vmw_du_screen_target;
|
||||
|
||||
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -180,11 +180,16 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
|
|||
if (!ctx->merge_dups)
|
||||
return NULL;
|
||||
|
||||
if (ctx->ht) {
|
||||
if (ctx->sw_context) {
|
||||
struct vmwgfx_hash_item *hash;
|
||||
unsigned long key = (unsigned long) vbo;
|
||||
|
||||
if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
|
||||
bo_node = container_of(hash, typeof(*bo_node), hash);
|
||||
hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
|
||||
if (hash->key == key) {
|
||||
bo_node = container_of(hash, typeof(*bo_node), hash);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
struct vmw_validation_bo_node *entry;
|
||||
|
||||
|
@ -217,11 +222,16 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
|
|||
if (!ctx->merge_dups)
|
||||
return NULL;
|
||||
|
||||
if (ctx->ht) {
|
||||
if (ctx->sw_context) {
|
||||
struct vmwgfx_hash_item *hash;
|
||||
unsigned long key = (unsigned long) res;
|
||||
|
||||
if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
|
||||
res_node = container_of(hash, typeof(*res_node), hash);
|
||||
hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
|
||||
if (hash->key == key) {
|
||||
res_node = container_of(hash, typeof(*res_node), hash);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
struct vmw_validation_res_node *entry;
|
||||
|
||||
|
@ -269,20 +279,15 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
|
|||
}
|
||||
} else {
|
||||
struct ttm_validate_buffer *val_buf;
|
||||
int ret;
|
||||
|
||||
bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
|
||||
if (!bo_node)
|
||||
return -ENOMEM;
|
||||
|
||||
if (ctx->ht) {
|
||||
if (ctx->sw_context) {
|
||||
bo_node->hash.key = (unsigned long) vbo;
|
||||
ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize a buffer "
|
||||
"validation entry.\n");
|
||||
return ret;
|
||||
}
|
||||
hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
|
||||
bo_node->hash.key);
|
||||
}
|
||||
val_buf = &bo_node->base;
|
||||
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
|
||||
|
@ -316,7 +321,6 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
|
|||
bool *first_usage)
|
||||
{
|
||||
struct vmw_validation_res_node *node;
|
||||
int ret;
|
||||
|
||||
node = vmw_validation_find_res_dup(ctx, res);
|
||||
if (node) {
|
||||
|
@ -330,14 +334,9 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (ctx->ht) {
|
||||
if (ctx->sw_context) {
|
||||
node->hash.key = (unsigned long) res;
|
||||
ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize a resource validation "
|
||||
"entry.\n");
|
||||
return ret;
|
||||
}
|
||||
hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
|
||||
}
|
||||
node->res = vmw_resource_reference_unless_doomed(res);
|
||||
if (!node->res)
|
||||
|
@ -681,19 +680,19 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
|
|||
struct vmw_validation_bo_node *entry;
|
||||
struct vmw_validation_res_node *val;
|
||||
|
||||
if (!ctx->ht)
|
||||
if (!ctx->sw_context)
|
||||
return;
|
||||
|
||||
list_for_each_entry(entry, &ctx->bo_list, base.head)
|
||||
(void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
|
||||
hash_del_rcu(&entry->hash.head);
|
||||
|
||||
list_for_each_entry(val, &ctx->resource_list, head)
|
||||
(void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
|
||||
hash_del_rcu(&val->hash.head);
|
||||
|
||||
list_for_each_entry(val, &ctx->resource_ctx_list, head)
|
||||
(void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
|
||||
hash_del_rcu(&entry->hash.head);
|
||||
|
||||
ctx->ht = NULL;
|
||||
ctx->sw_context = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -29,12 +29,11 @@
|
|||
#define _VMWGFX_VALIDATION_H_
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/ww_mutex.h>
|
||||
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
|
||||
#include "vmwgfx_hashtab.h"
|
||||
|
||||
#define VMW_RES_DIRTY_NONE 0
|
||||
#define VMW_RES_DIRTY_SET BIT(0)
|
||||
#define VMW_RES_DIRTY_CLEAR BIT(1)
|
||||
|
@ -59,7 +58,7 @@
|
|||
* @total_mem: Amount of reserved memory.
|
||||
*/
|
||||
struct vmw_validation_context {
|
||||
struct vmwgfx_open_hash *ht;
|
||||
struct vmw_sw_context *sw_context;
|
||||
struct list_head resource_list;
|
||||
struct list_head resource_ctx_list;
|
||||
struct list_head bo_list;
|
||||
|
@ -82,16 +81,16 @@ struct vmw_fence_obj;
|
|||
/**
|
||||
* DECLARE_VAL_CONTEXT - Declare a validation context with initialization
|
||||
* @_name: The name of the variable
|
||||
* @_ht: The hash table used to find dups or NULL if none
|
||||
* @_sw_context: Contains the hash table used to find dups or NULL if none
|
||||
* @_merge_dups: Whether to merge duplicate buffer object- or resource
|
||||
* entries. If set to true, ideally a hash table pointer should be supplied
|
||||
* as well unless the number of resources and buffer objects per validation
|
||||
* is known to be very small
|
||||
*/
|
||||
#endif
|
||||
#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \
|
||||
#define DECLARE_VAL_CONTEXT(_name, _sw_context, _merge_dups) \
|
||||
struct vmw_validation_context _name = \
|
||||
{ .ht = _ht, \
|
||||
{ .sw_context = _sw_context, \
|
||||
.resource_list = LIST_HEAD_INIT((_name).resource_list), \
|
||||
.resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
|
||||
.bo_list = LIST_HEAD_INIT((_name).bo_list), \
|
||||
|
@ -114,19 +113,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
|
|||
return !list_empty(&ctx->bo_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_set_ht - Register a hash table for duplicate finding
|
||||
* @ctx: The validation context
|
||||
* @ht: Pointer to a hash table to use for duplicate finding
|
||||
* This function is intended to be used if the hash table wasn't
|
||||
* available at validation context declaration time
|
||||
*/
|
||||
static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
|
||||
struct vmwgfx_open_hash *ht)
|
||||
{
|
||||
ctx->ht = ht;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_bo_reserve - Reserve buffer objects registered with a
|
||||
* validation context
|
||||
|
|
|
@ -1550,12 +1550,20 @@ struct drm_connector {
|
|||
struct drm_cmdline_mode cmdline_mode;
|
||||
/** @force: a DRM_FORCE_<foo> state for forced mode sets */
|
||||
enum drm_connector_force force;
|
||||
|
||||
/**
|
||||
* @override_edid: has the EDID been overwritten through debugfs for
|
||||
* testing? Do not modify outside of drm_edid_override_set() and
|
||||
* drm_edid_override_reset().
|
||||
* @edid_override: Override EDID set via debugfs.
|
||||
*
|
||||
* Do not modify or access outside of the drm_edid_override_* family of
|
||||
* functions.
|
||||
*/
|
||||
bool override_edid;
|
||||
const struct drm_edid *edid_override;
|
||||
|
||||
/**
|
||||
* @edid_override_mutex: Protect access to edid_override.
|
||||
*/
|
||||
struct mutex edid_override_mutex;
|
||||
|
||||
/** @epoch_counter: used to detect any other changes in connector, besides status */
|
||||
u64 epoch_counter;
|
||||
|
||||
|
|
|
@ -388,15 +388,8 @@ int drm_av_sync_delay(struct drm_connector *connector,
|
|||
const struct drm_display_mode *mode);
|
||||
|
||||
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
|
||||
struct edid *drm_load_edid_firmware(struct drm_connector *connector);
|
||||
int __drm_set_edid_firmware_path(const char *path);
|
||||
int __drm_get_edid_firmware_path(char *buf, size_t bufsize);
|
||||
#else
|
||||
static inline struct edid *
|
||||
drm_load_edid_firmware(struct drm_connector *connector)
|
||||
{
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
|
||||
|
@ -577,7 +570,7 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
|
|||
struct i2c_adapter *adapter);
|
||||
struct edid *drm_edid_duplicate(const struct edid *edid);
|
||||
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
|
||||
int drm_add_override_edid_modes(struct drm_connector *connector);
|
||||
int drm_edid_override_connector_update(struct drm_connector *connector);
|
||||
|
||||
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
|
||||
bool drm_detect_hdmi_monitor(const struct edid *edid);
|
||||
|
@ -606,6 +599,7 @@ drm_display_mode_from_cea_vic(struct drm_device *dev,
|
|||
const struct drm_edid *drm_edid_alloc(const void *edid, size_t size);
|
||||
const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid);
|
||||
void drm_edid_free(const struct drm_edid *drm_edid);
|
||||
bool drm_edid_valid(const struct drm_edid *drm_edid);
|
||||
const struct edid *drm_edid_raw(const struct drm_edid *drm_edid);
|
||||
const struct drm_edid *drm_edid_read(struct drm_connector *connector);
|
||||
const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
|
||||
|
|
Loading…
Reference in New Issue