Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "This is probably a bit big, but just because I fell behind last week
  and didn't get to doing any pulls, so stuff backed up behind me, I
  actually should have sent this for -rc3 but failed to even manage
  that.

  So this has radeon, intel, nouveau, vmware, exynos and tegra fixes in
  it, and the line count isn't all the bad in the end"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (50 commits)
  drm: fix the addition of the side-by-side (half) flag for extra 3D modes
  drm/edid: fix length check when adding extra 3D modes
  drm/radeon/atom: fix bus probes when hw_i2c is set (v2)
  drm/radeon: fix null pointer dereference in dce6+ audio code
  drm/radeon: fixup bad vram size on SI
  udl: fix issue with imported prime buffers
  drm/vmwgfx: Add our connectors to sysfs
  drm/vmwgfx: Fix dma buffer memory size accounting
  drm/vmwgfx: Fix up and comment the dumb buffer implementation
  drm/vmwgfx: Correctly set the enabled state on crtcs
  drm/nv50/disp: min/max are reversed in nv50_crtc_gamma_set()
  drm/nouveau/sw: fix oops if gpu has its display block disabled
  drm/nouveau: unreference fence after syncing
  drm/nouveau/kms: send timestamp data for correct head in flip completion events
  drm/nouveau/clk: Add support for NVAA/NVAC
  drm/nouveau/fifo: Hook up pause and resume for NV50 and NV84+
  drm/nv10/plane: some chipsets don't support NV12
  drm/nv10/plane: add downscaling restrictions
  drm/nv10/plane: fix format computation
  drm/nv04-nv30/clk: provide an empty domain list
  ...
This commit is contained in:
Linus Torvalds 2013-12-09 09:43:07 -08:00
commit 78fd82238d
53 changed files with 849 additions and 267 deletions

View File

@ -2674,7 +2674,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
int modes = 0; int modes = 0;
u8 cea_mode; u8 cea_mode;
if (video_db == NULL || video_index > video_len) if (video_db == NULL || video_index >= video_len)
return 0; return 0;
/* CEA modes are numbered 1..127 */ /* CEA modes are numbered 1..127 */
@ -2701,7 +2701,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
if (structure & (1 << 8)) { if (structure & (1 << 8)) {
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (newmode) { if (newmode) {
newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode); drm_mode_probed_add(connector, newmode);
modes++; modes++;
} }

View File

@ -173,29 +173,38 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
static void exynos_drm_preclose(struct drm_device *dev, static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file) struct drm_file *file)
{ {
struct exynos_drm_private *private = dev->dev_private;
struct drm_pending_vblank_event *e, *t;
unsigned long flags;
/* release events of current file */
spin_lock_irqsave(&dev->event_lock, flags);
list_for_each_entry_safe(e, t, &private->pageflip_event_list,
base.link) {
if (e->base.file_priv == file) {
list_del(&e->base.link);
e->base.destroy(&e->base);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
exynos_drm_subdrv_close(dev, file); exynos_drm_subdrv_close(dev, file);
} }
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct exynos_drm_private *private = dev->dev_private;
struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
if (!file->driver_priv) if (!file->driver_priv)
return; return;
/* Release all events not unhandled by page flip handler. */
spin_lock_irqsave(&dev->event_lock, flags);
list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
base.link) {
if (v->base.file_priv == file) {
list_del(&v->base.link);
drm_vblank_put(dev, v->pipe);
v->base.destroy(&v->base);
}
}
/* Release all events handled by page flip handler but not freed. */
list_for_each_entry_safe(e, et, &file->event_list, link) {
list_del(&e->link);
e->destroy(e);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(file->driver_priv); kfree(file->driver_priv);
file->driver_priv = NULL; file->driver_priv = NULL;
} }

View File

@ -31,7 +31,7 @@
#include "exynos_drm_iommu.h" #include "exynos_drm_iommu.h"
/* /*
* FIMD is stand for Fully Interactive Mobile Display and * FIMD stands for Fully Interactive Mobile Display and
* as a display controller, it transfers contents drawn on memory * as a display controller, it transfers contents drawn on memory
* to a LCD Panel through Display Interfaces such as RGB or * to a LCD Panel through Display Interfaces such as RGB or
* CPU Interface. * CPU Interface.

View File

@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
* Disable CRTCs directly since we want to preserve sw state * Disable CRTCs directly since we want to preserve sw state
* for _thaw. * for _thaw.
*/ */
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc); dev_priv->display.crtc_disable(crtc);
mutex_unlock(&dev->mode_config.mutex);
intel_modeset_suspend_hw(dev); intel_modeset_suspend_hw(dev);
} }

View File

@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (dev_priv->ellc_size) if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HSW_GT3(dev)) if (IS_HASWELL(dev))
I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
else LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev)) { if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL); u32 temp = I915_READ(GEN7_MSG_CTL);

View File

@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_get_pages(obj);
if (ret) if (ret)
goto error; goto err;
i915_gem_object_pin_pages(obj);
ret = -ENOMEM; ret = -ENOMEM;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL) if (pages == NULL)
goto error; goto err_unpin;
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
drm_free_large(pages); drm_free_large(pages);
if (!obj->dma_buf_vmapping) if (!obj->dma_buf_vmapping)
goto error; goto err_unpin;
obj->vmapping_count = 1; obj->vmapping_count = 1;
i915_gem_object_pin_pages(obj);
out_unlock: out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping; return obj->dma_buf_vmapping;
error: err_unpin:
i915_gem_object_unpin_pages(obj);
err:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret); return ERR_PTR(ret);
} }

View File

@ -33,6 +33,9 @@
#include "intel_drv.h" #include "intel_drv.h"
#include <linux/dma_remapping.h> #include <linux/dma_remapping.h>
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
struct eb_vmas { struct eb_vmas {
struct list_head vmas; struct list_head vmas;
int and; int and;
@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
} }
} }
static void eb_destroy(struct eb_vmas *eb) { static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
struct drm_i915_gem_object *obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node))
return;
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static void eb_destroy(struct eb_vmas *eb)
{
while (!list_empty(&eb->vmas)) { while (!list_empty(&eb->vmas)) {
struct i915_vma *vma; struct i915_vma *vma;
@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
struct i915_vma, struct i915_vma,
exec_list); exec_list);
list_del_init(&vma->exec_list); list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base); drm_gem_object_unreference(&vma->obj->base);
} }
kfree(eb); kfree(eb);
@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
return ret; return ret;
} }
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int static int
need_reloc_mappable(struct i915_vma *vma) need_reloc_mappable(struct i915_vma *vma)
{ {
@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0; return 0;
} }
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
struct drm_i915_gem_object *obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node))
return;
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static int static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas, struct list_head *vmas,
@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
goto err; goto err;
} }
err: /* Decrement pin count for bound objects */ err:
list_for_each_entry(vma, vmas, exec_list)
i915_gem_execbuffer_unreserve_vma(vma);
if (ret != -ENOSPC || retry++) if (ret != -ENOSPC || retry++)
return ret; return ret;
/* Decrement pin count for bound objects */
list_for_each_entry(vma, vmas, exec_list)
i915_gem_execbuffer_unreserve_vma(vma);
ret = i915_gem_evict_vm(vm, true); ret = i915_gem_evict_vm(vm, true);
if (ret) if (ret)
return ret; return ret;
@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
while (!list_empty(&eb->vmas)) { while (!list_empty(&eb->vmas)) {
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list); list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base); drm_gem_object_unreference(&vma->obj->base);
} }

View File

@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
case I915_CACHE_NONE: case I915_CACHE_NONE:
break; break;
case I915_CACHE_WT: case I915_CACHE_WT:
pte |= HSW_WT_ELLC_LLC_AGE0; pte |= HSW_WT_ELLC_LLC_AGE3;
break; break;
default: default:
pte |= HSW_WB_ELLC_LLC_AGE0; pte |= HSW_WB_ELLC_LLC_AGE3;
break; break;
} }

View File

@ -235,6 +235,7 @@
*/ */
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21) #define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18) #define MI_INVALIDATE_TLB (1<<18)

View File

@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations = ddi_translations_dp; ddi_translations = ddi_translations_dp;
break; break;
case PORT_D: case PORT_D:
if (intel_dpd_is_edp(dev)) if (intel_dp_is_edp(dev, PORT_D))
ddi_translations = ddi_translations_edp; ddi_translations = ddi_translations_edp;
else else
ddi_translations = ddi_translations_dp; ddi_translations = ddi_translations_dp;
@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (wait) if (wait)
intel_wait_ddi_buf_idle(dev_priv, port); intel_wait_ddi_buf_idle(dev_priv, port);
if (type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp); ironlake_edp_panel_off(intel_dp);
} }

View File

@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
uint16_t postoff = 0; uint16_t postoff = 0;
if (intel_crtc->config.limited_color_range) if (intel_crtc->config.limited_color_range)
postoff = (16 * (1 << 13) / 255) & 0x1fff; postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/* Make sure we're not on PC8 state before disabling PC8, otherwise /* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */ * we'll hang the machine! */
dev_priv->uncore.funcs.force_wake_get(dev_priv); gen6_gt_force_wake_get(dev_priv);
if (val & LCPLL_POWER_DOWN_ALLOW) { if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW; val &= ~LCPLL_POWER_DOWN_ALLOW;
@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n"); DRM_ERROR("Switching back to LCPLL failed\n");
} }
dev_priv->uncore.funcs.force_wake_put(dev_priv); gen6_gt_force_wake_put(dev_priv);
} }
void hsw_enable_pc8_work(struct work_struct *__work) void hsw_enable_pc8_work(struct work_struct *__work)
@ -8354,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE)); DERRMR_PIPEC_PRI_FLIP_DONE));
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR); intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256); intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
} }
@ -10049,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D); intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev)) {
int found; int found;
dpd_is_edp = intel_dpd_is_edp(dev); dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
if (has_edp_a(dev)) if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A); intel_dp_init(dev, DP_A, PORT_A);
@ -10086,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C); PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
PORT_C);
} }
intel_dsi_init(dev); intel_dsi_init(dev);

View File

@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
} }
/* check the VBT to see whether the eDP is on DP-D port */ /* check the VBT to see whether the eDP is on DP-D port */
bool intel_dpd_is_edp(struct drm_device *dev) bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
union child_device_config *p_child; union child_device_config *p_child;
int i; int i;
static const short port_mapping[] = {
[PORT_B] = PORT_IDPB,
[PORT_C] = PORT_IDPC,
[PORT_D] = PORT_IDPD,
};
if (port == PORT_A)
return true;
if (!dev_priv->vbt.child_dev_num) if (!dev_priv->vbt.child_dev_num)
return false; return false;
@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i; p_child = dev_priv->vbt.child_dev + i;
if (p_child->common.dvo_port == PORT_IDPD && if (p_child->common.dvo_port == port_mapping[port] &&
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true; return true;
@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg); intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector; intel_dp->attached_connector = intel_connector;
type = DRM_MODE_CONNECTOR_DisplayPort; if (intel_dp_is_edp(dev, port))
/*
* FIXME : We need to initialize built-in panels before external panels.
* For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
*/
switch (port) {
case PORT_A:
type = DRM_MODE_CONNECTOR_eDP; type = DRM_MODE_CONNECTOR_eDP;
break; else
case PORT_C: type = DRM_MODE_CONNECTOR_DisplayPort;
if (IS_VALLEYVIEW(dev))
type = DRM_MODE_CONNECTOR_eDP;
break;
case PORT_D:
if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
type = DRM_MODE_CONNECTOR_eDP;
break;
default: /* silence GCC warning */
break;
}
/* /*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but

View File

@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp); void intel_dp_check_link_status(struct intel_dp *intel_dp);
bool intel_dp_compute_config(struct intel_encoder *encoder, bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
bool intel_dpd_is_edp(struct drm_device *dev); bool intel_dp_is_edp(struct drm_device *dev, enum port port);
void ironlake_edp_backlight_on(struct intel_dp *intel_dp); void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
void ironlake_edp_backlight_off(struct intel_dp *intel_dp); void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_on(struct intel_dp *intel_dp); void ironlake_edp_panel_on(struct intel_dp *intel_dp);

View File

@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock; clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->htotal; htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8; pixel_size = crtc->fb->bits_per_pixel / 8;
@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
crtc = intel_get_crtc_for_plane(dev, plane); crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock; clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->htotal; htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8; pixel_size = crtc->fb->bits_per_pixel / 8;
@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode = const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(crtc)->config.adjusted_mode; &to_intel_crtc(crtc)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock; int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->htotal; int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8; int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us; unsigned long line_time_us;
@ -1624,7 +1624,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode = const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(enabled)->config.adjusted_mode; &to_intel_crtc(enabled)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock; int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->htotal; int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8; int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us; unsigned long line_time_us;
@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
crtc = intel_get_crtc_for_plane(dev, plane); crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock; clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->htotal; htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8; pixel_size = crtc->fb->bits_per_pixel / 8;
@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
/* The WM are computed with base on how long it takes to fill a single /* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8. * row at the given clock rate, multiplied by 8.
* */ * */
linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
intel_ddi_get_cdclk_freq(dev_priv)); intel_ddi_get_cdclk_freq(dev_priv));
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |

View File

@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o nouveau-y += core/subdev/clock/nv50.o
nouveau-y += core/subdev/clock/nv84.o nouveau-y += core/subdev/clock/nv84.o
nouveau-y += core/subdev/clock/nva3.o nouveau-y += core/subdev/clock/nva3.o
nouveau-y += core/subdev/clock/nvaa.o
nouveau-y += core/subdev/clock/nvc0.o nouveau-y += core/subdev/clock/nvc0.o
nouveau-y += core/subdev/clock/nve0.o nouveau-y += core/subdev/clock/nve0.o
nouveau-y += core/subdev/clock/pllnv04.o nouveau-y += core/subdev/clock/pllnv04.o

View File

@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;

View File

@ -33,6 +33,7 @@
#include <engine/dmaobj.h> #include <engine/dmaobj.h>
#include <engine/fifo.h> #include <engine/fifo.h>
#include "nv04.h"
#include "nv50.h" #include "nv50.h"
/******************************************************************************* /*******************************************************************************
@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr; nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv50_fifo_cclass; nv_engine(priv)->cclass = &nv50_fifo_cclass;
nv_engine(priv)->sclass = nv50_fifo_sclass; nv_engine(priv)->sclass = nv50_fifo_sclass;
priv->base.pause = nv04_fifo_pause;
priv->base.start = nv04_fifo_start;
return 0; return 0;
} }

View File

@ -35,6 +35,7 @@
#include <engine/dmaobj.h> #include <engine/dmaobj.h>
#include <engine/fifo.h> #include <engine/fifo.h>
#include "nv04.h"
#include "nv50.h" #include "nv50.h"
/******************************************************************************* /*******************************************************************************
@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr; nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass; nv_engine(priv)->cclass = &nv84_fifo_cclass;
nv_engine(priv)->sclass = nv84_fifo_sclass; nv_engine(priv)->sclass = nv84_fifo_sclass;
priv->base.pause = nv04_fifo_pause;
priv->base.start = nv04_fifo_start;
return 0; return 0;
} }

View File

@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
if (ret) if (ret)
return ret; return ret;
chan->vblank.nr_event = pdisp->vblank->index_nr; chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
chan->vblank.event = kzalloc(chan->vblank.nr_event * chan->vblank.event = kzalloc(chan->vblank.nr_event *
sizeof(*chan->vblank.event), GFP_KERNEL); sizeof(*chan->vblank.event), GFP_KERNEL);
if (!chan->vblank.event) if (!chan->vblank.event)

View File

@ -14,6 +14,9 @@ enum nv_clk_src {
nv_clk_src_hclk, nv_clk_src_hclk,
nv_clk_src_hclkm3, nv_clk_src_hclkm3,
nv_clk_src_hclkm3d2, nv_clk_src_hclkm3d2,
nv_clk_src_hclkm2d3, /* NVAA */
nv_clk_src_hclkm4, /* NVAA */
nv_clk_src_cclk, /* NVAA */
nv_clk_src_host, nv_clk_src_host,
@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
extern struct nouveau_oclass nv40_clock_oclass; extern struct nouveau_oclass nv40_clock_oclass;
extern struct nouveau_oclass *nv50_clock_oclass; extern struct nouveau_oclass *nv50_clock_oclass;
extern struct nouveau_oclass *nv84_clock_oclass; extern struct nouveau_oclass *nv84_clock_oclass;
extern struct nouveau_oclass *nvaa_clock_oclass;
extern struct nouveau_oclass nva3_clock_oclass; extern struct nouveau_oclass nva3_clock_oclass;
extern struct nouveau_oclass nvc0_clock_oclass; extern struct nouveau_oclass nvc0_clock_oclass;
extern struct nouveau_oclass nve0_clock_oclass; extern struct nouveau_oclass nve0_clock_oclass;

View File

@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
return 0; return 0;
} }
static struct nouveau_clocks
nv04_domain[] = {
{ nv_clk_src_max }
};
static int static int
nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_oclass *oclass, void *data, u32 size,
@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv; struct nv04_clock_priv *priv;
int ret; int ret;
ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv); ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
*pobject = nv_object(priv); *pobject = nv_object(priv);
if (ret) if (ret)
return ret; return ret;

View File

@ -0,0 +1,445 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <engine/fifo.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
#include <subdev/clock.h>
#include "pll.h"
struct nvaa_clock_priv {
struct nouveau_clock base;
enum nv_clk_src csrc, ssrc, vsrc;
u32 cctrl, sctrl;
u32 ccoef, scoef;
u32 cpost, spost;
u32 vdiv;
};
static u32
read_div(struct nouveau_clock *clk)
{
return nv_rd32(clk, 0x004600);
}
static u32
read_pll(struct nouveau_clock *clk, u32 base)
{
u32 ctrl = nv_rd32(clk, base + 0);
u32 coef = nv_rd32(clk, base + 4);
u32 ref = clk->read(clk, nv_clk_src_href);
u32 post_div = 0;
u32 clock = 0;
int N1, M1;
switch (base){
case 0x4020:
post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
break;
case 0x4028:
post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
break;
default:
break;
}
N1 = (coef & 0x0000ff00) >> 8;
M1 = (coef & 0x000000ff);
if ((ctrl & 0x80000000) && M1) {
clock = ref * N1 / M1;
clock = clock / post_div;
}
return clock;
}
static int
nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
{
struct nvaa_clock_priv *priv = (void *)clk;
u32 mast = nv_rd32(clk, 0x00c054);
u32 P = 0;
switch (src) {
case nv_clk_src_crystal:
return nv_device(priv)->crystal;
case nv_clk_src_href:
return 100000; /* PCIE reference clock */
case nv_clk_src_hclkm4:
return clk->read(clk, nv_clk_src_href) * 4;
case nv_clk_src_hclkm2d3:
return clk->read(clk, nv_clk_src_href) * 2 / 3;
case nv_clk_src_host:
switch (mast & 0x000c0000) {
case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
case 0x00040000: break;
case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
}
break;
case nv_clk_src_core:
P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) {
case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
case 0x00000001: return 0;
case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
case 0x00000003: return read_pll(clk, 0x004028) >> P;
}
break;
case nv_clk_src_cclk:
if ((mast & 0x03000000) != 0x03000000)
return clk->read(clk, nv_clk_src_core);
if ((mast & 0x00000200) == 0x00000000)
return clk->read(clk, nv_clk_src_core);
switch (mast & 0x00000c00) {
case 0x00000000: return clk->read(clk, nv_clk_src_href);
case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
default: return 0;
}
case nv_clk_src_shader:
P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) {
case 0x00000000:
if (mast & 0x00000040)
return clk->read(clk, nv_clk_src_href) >> P;
return clk->read(clk, nv_clk_src_crystal) >> P;
case 0x00000010: break;
case 0x00000020: return read_pll(clk, 0x004028) >> P;
case 0x00000030: return read_pll(clk, 0x004020) >> P;
}
break;
case nv_clk_src_mem:
return 0;
break;
case nv_clk_src_vdec:
P = (read_div(clk) & 0x00000700) >> 8;
switch (mast & 0x00400000) {
case 0x00400000:
return clk->read(clk, nv_clk_src_core) >> P;
break;
default:
return 500000 >> P;
break;
}
break;
default:
break;
}
nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
return 0;
}
static u32
calc_pll(struct nvaa_clock_priv *priv, u32 reg,
u32 clock, int *N, int *M, int *P)
{
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_pll pll;
struct nouveau_clock *clk = &priv->base;
int ret;
ret = nvbios_pll_parse(bios, reg, &pll);
if (ret)
return 0;
pll.vco2.max_freq = 0;
pll.refclk = clk->read(clk, nv_clk_src_href);
if (!pll.refclk)
return 0;
return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
}
static inline u32
calc_P(u32 src, u32 target, int *div)
{
u32 clk0 = src, clk1 = src;
for (*div = 0; *div <= 7; (*div)++) {
if (clk0 <= target) {
clk1 = clk0 << (*div ? 1 : 0);
break;
}
clk0 >>= 1;
}
if (target - clk0 <= clk1 - target)
return clk0;
(*div)--;
return clk1;
}
static int
nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
{
struct nvaa_clock_priv *priv = (void *)clk;
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
u32 out = 0, clock = 0;
int N, M, P1, P2 = 0;
int divs = 0;
/* cclk: find suitable source, disable PLL if we can */
if (core < clk->read(clk, nv_clk_src_hclkm4))
out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
/* Calculate clock * 2, so shader clock can use it too */
clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
if (abs(core - out) <=
abs(core - (clock >> 1))) {
priv->csrc = nv_clk_src_hclkm4;
priv->cctrl = divs << 16;
} else {
/* NVCTRL is actually used _after_ NVPOST, and after what we
* call NVPLL. To make matters worse, NVPOST is an integer
* divider instead of a right-shift number. */
if(P1 > 2) {
P2 = P1 - 2;
P1 = 2;
}
priv->csrc = nv_clk_src_core;
priv->ccoef = (N << 8) | M;
priv->cctrl = (P2 + 1) << 16;
priv->cpost = (1 << P1) << 16;
}
/* sclk: nvpll + divisor, href or spll */
out = 0;
if (shader == clk->read(clk, nv_clk_src_href)) {
priv->ssrc = nv_clk_src_href;
} else {
clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
if (priv->csrc == nv_clk_src_core) {
out = calc_P((core << 1), shader, &divs);
}
if (abs(shader - out) <=
abs(shader - clock) &&
(divs + P2) <= 7) {
priv->ssrc = nv_clk_src_core;
priv->sctrl = (divs + P2) << 16;
} else {
priv->ssrc = nv_clk_src_shader;
priv->scoef = (N << 8) | M;
priv->sctrl = P1 << 16;
}
}
/* vclk */
out = calc_P(core, vdec, &divs);
clock = calc_P(500000, vdec, &P1);
if(abs(vdec - out) <=
abs(vdec - clock)) {
priv->vsrc = nv_clk_src_cclk;
priv->vdiv = divs << 16;
} else {
priv->vsrc = nv_clk_src_vdec;
priv->vdiv = P1 << 16;
}
/* Print strategy! */
nv_debug(priv, "nvpll: %08x %08x %08x\n",
priv->ccoef, priv->cpost, priv->cctrl);
nv_debug(priv, " spll: %08x %08x %08x\n",
priv->scoef, priv->spost, priv->sctrl);
nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
if (priv->csrc == nv_clk_src_hclkm4)
nv_debug(priv, "core: hrefm4\n");
else
nv_debug(priv, "core: nvpll\n");
if (priv->ssrc == nv_clk_src_hclkm4)
nv_debug(priv, "shader: hrefm4\n");
else if (priv->ssrc == nv_clk_src_core)
nv_debug(priv, "shader: nvpll\n");
else
nv_debug(priv, "shader: spll\n");
if (priv->vsrc == nv_clk_src_hclkm4)
nv_debug(priv, "vdec: 500MHz\n");
else
nv_debug(priv, "vdec: core\n");
return 0;
}
static int
nvaa_clock_prog(struct nouveau_clock *clk)
{
struct nvaa_clock_priv *priv = (void *)clk;
struct nouveau_fifo *pfifo = nouveau_fifo(clk);
unsigned long flags;
u32 pllmask = 0, mast, ptherm_gate;
int ret = -EBUSY;
/* halt and idle execution engines */
ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
/* Wait until the interrupt handler is finished */
if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
goto resume;
if (pfifo)
pfifo->pause(pfifo, &flags);
if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
goto resume;
if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
goto resume;
/* First switch to safe clocks: href */
mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
mast &= ~0x00400e73;
mast |= 0x03000000;
switch (priv->csrc) {
case nv_clk_src_hclkm4:
nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
mast |= 0x00000002;
break;
case nv_clk_src_core:
nv_wr32(clk, 0x402c, priv->ccoef);
nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
nv_wr32(clk, 0x4040, priv->cpost);
pllmask |= (0x3 << 8);
mast |= 0x00000003;
break;
default:
nv_warn(priv,"Reclocking failed: unknown core clock\n");
goto resume;
}
switch (priv->ssrc) {
case nv_clk_src_href:
nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
/* mast |= 0x00000000; */
break;
case nv_clk_src_core:
nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
mast |= 0x00000020;
break;
case nv_clk_src_shader:
nv_wr32(clk, 0x4024, priv->scoef);
nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
nv_wr32(clk, 0x4070, priv->spost);
pllmask |= (0x3 << 12);
mast |= 0x00000030;
break;
default:
nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
goto resume;
}
if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
nv_warn(priv,"Reclocking failed: unstable PLLs\n");
goto resume;
}
switch (priv->vsrc) {
case nv_clk_src_cclk:
mast |= 0x00400000;
default:
nv_wr32(clk, 0x4600, priv->vdiv);
}
nv_wr32(clk, 0xc054, mast);
ret = 0;
resume:
if (pfifo)
pfifo->start(pfifo, &flags);
nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
nv_wr32(clk, 0x020060, ptherm_gate);
/* Disable some PLLs and dividers when unused */
if (priv->csrc != nv_clk_src_core) {
nv_wr32(clk, 0x4040, 0x00000000);
nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
}
if (priv->ssrc != nv_clk_src_shader) {
nv_wr32(clk, 0x4070, 0x00000000);
nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
}
return ret;
}
static void
nvaa_clock_tidy(struct nouveau_clock *clk)
{
}
static struct nouveau_clocks
nvaa_domains[] = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_core , 0xff, 0, "core", 1000 },
{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
{ nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
{ nv_clk_src_max }
};
static int
nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvaa_clock_priv *priv;
int ret;
ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.read = nvaa_clock_read;
priv->base.calc = nvaa_clock_calc;
priv->base.prog = nvaa_clock_prog;
priv->base.tidy = nvaa_clock_tidy;
return 0;
}
struct nouveau_oclass *
nvaa_clock_oclass = &(struct nouveau_oclass) {
.handle = NV_SUBDEV(CLOCK, 0xaa),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvaa_clock_ctor,
.dtor = _nouveau_clock_dtor,
.init = _nouveau_clock_init,
.fini = _nouveau_clock_fini,
},
};

View File

@ -58,8 +58,8 @@ struct nouveau_plane {
}; };
static uint32_t formats[] = { static uint32_t formats[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_UYVY, DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
}; };
/* Sine can be approximated with /* Sine can be approximated with
@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur; struct nouveau_bo *cur = nv_plane->cur;
bool flip = nv_plane->flip; bool flip = nv_plane->flip;
int format = ALIGN(src_w * 4, 0x100);
int soff = NV_PCRTC0_SIZE * nv_crtc->index; int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
int ret; int format, ret;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x >>= 16;
src_y >>= 16;
src_w >>= 16;
src_h >>= 16;
format = ALIGN(src_w * 4, 0x100);
if (format > 0xffff) if (format > 0xffff)
return -EINVAL; return -ERANGE;
if (dev->chipset >= 0x30) {
if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
return -ERANGE;
} else {
if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
return -ERANGE;
}
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
if (ret) if (ret)
@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo; nv_plane->cur = nv_fb->nvbo;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x = src_x >> 16;
src_y = src_y >> 16;
src_w = src_w >> 16;
src_h = src_h >> 16;
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
{ {
struct nouveau_device *dev = nouveau_dev(device); struct nouveau_device *dev = nouveau_dev(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
int num_formats = ARRAY_SIZE(formats);
int ret; int ret;
if (!plane) if (!plane)
return; return;
switch (dev->chipset) {
case 0x10:
case 0x11:
case 0x15:
case 0x1a:
case 0x20:
num_formats = 1;
break;
}
ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
&nv10_plane_funcs, &nv10_plane_funcs,
formats, ARRAY_SIZE(formats), false); formats, num_formats, false);
if (ret) if (ret)
goto err; goto err;

View File

@ -608,6 +608,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fence = nouveau_fence_ref(new_bo->bo.sync_obj); fence = nouveau_fence_ref(new_bo->bo.sync_obj);
spin_unlock(&new_bo->bo.bdev->fence_lock); spin_unlock(&new_bo->bo.bdev->fence_lock);
ret = nouveau_fence_sync(fence, chan); ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
if (ret) if (ret)
return ret; return ret;
@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) if (s->event)
drm_send_vblank_event(dev, -1, s->event); drm_send_vblank_event(dev, s->crtc, s->event);
list_del(&s->head); list_del(&s->head);
if (ps) if (ps)

View File

@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size) uint32_t start, uint32_t size)
{ {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 end = max(start + size, (u32)256); u32 end = min_t(u32, start + size, 256);
u32 i; u32 i;
for (i = start; i < end; i++) { for (i = start; i < end; i++) {

View File

@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base; unsigned char *base;
u16 out; u16 out = cpu_to_le16(0);
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
return -EINVAL; return -EINVAL;
} }
args.ucRegIndex = buf[0]; if (buf == NULL)
if (num > 1) { args.ucRegIndex = 0;
else
args.ucRegIndex = buf[0];
if (num)
num--; num--;
if (num)
memcpy(&out, &buf[1], num); memcpy(&out, &buf[1], num);
}
args.lpI2CDataOut = cpu_to_le16(out); args.lpI2CDataOut = cpu_to_le16(out);
} else { } else {
if (num > ATOM_MAX_HW_I2C_READ) { if (num > ATOM_MAX_HW_I2C_READ) {
@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p; struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret; int i, remaining, current_count, buffer_offset, max_bytes, ret;
u8 buf = 0, flags; u8 flags;
/* check for bus probe */ /* check for bus probe */
p = &msgs[0]; p = &msgs[0];
if ((num == 1) && (p->len == 0)) { if ((num == 1) && (p->len == 0)) {
ret = radeon_process_i2c_ch(i2c, ret = radeon_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE, p->addr, HW_I2C_WRITE,
&buf, 1); NULL, 0);
if (ret) if (ret)
return ret; return ret;
else else

View File

@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset = dig->afmt->offset; u32 offset;
if (!dig->afmt->pin) if (!dig || !dig->afmt || !dig->afmt->pin)
return; return;
offset = dig->afmt->offset;
WREG32(AFMT_AUDIO_SRC_CONTROL + offset, WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
} }
@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct radeon_connector *radeon_connector = NULL; struct radeon_connector *radeon_connector = NULL;
u32 tmp = 0, offset; u32 tmp = 0, offset;
if (!dig->afmt->pin) if (!dig || !dig->afmt || !dig->afmt->pin)
return; return;
offset = dig->afmt->pin->offset; offset = dig->afmt->pin->offset;
@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb; u8 *sadb;
int sad_count; int sad_count;
if (!dig->afmt->pin) if (!dig || !dig->afmt || !dig->afmt->pin)
return; return;
offset = dig->afmt->pin->offset; offset = dig->afmt->pin->offset;
@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
}; };
if (!dig->afmt->pin) if (!dig || !dig->afmt || !dig->afmt->pin)
return; return;
offset = dig->afmt->pin->offset; offset = dig->afmt->pin->offset;

View File

@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
struct ni_ps *ps = ni_get_ps(rps); struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits; struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching; bool disable_mclk_switching;
u32 mclk, sclk; u32 mclk;
u16 vddc, vddci; u16 vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i; int i;
@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
/* XXX validate the min clocks required for display */ /* XXX validate the min clocks required for display */
/* adjust low state */
if (disable_mclk_switching) { if (disable_mclk_switching) {
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; ps->performance_levels[0].mclk =
sclk = ps->performance_levels[0].sclk; ps->performance_levels[ps->performance_level_count - 1].mclk;
vddc = ps->performance_levels[0].vddc; ps->performance_levels[0].vddci =
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; ps->performance_levels[ps->performance_level_count - 1].vddci;
} else {
sclk = ps->performance_levels[0].sclk;
mclk = ps->performance_levels[0].mclk;
vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[0].vddci;
} }
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
ps->performance_levels[0].vddc = vddc;
ps->performance_levels[0].vddci = vddci;
btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
&ps->performance_levels[0].sclk, &ps->performance_levels[0].sclk,
&ps->performance_levels[0].mclk); &ps->performance_levels[0].mclk);
@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
} }
/* adjust remaining states */
if (disable_mclk_switching) { if (disable_mclk_switching) {
mclk = ps->performance_levels[0].mclk; mclk = ps->performance_levels[0].mclk;
vddci = ps->performance_levels[0].vddci;
for (i = 1; i < ps->performance_level_count; i++) { for (i = 1; i < ps->performance_level_count; i++) {
if (mclk < ps->performance_levels[i].mclk) if (mclk < ps->performance_levels[i].mclk)
mclk = ps->performance_levels[i].mclk; mclk = ps->performance_levels[i].mclk;
if (vddci < ps->performance_levels[i].vddci)
vddci = ps->performance_levels[i].vddci;
} }
for (i = 0; i < ps->performance_level_count; i++) { for (i = 0; i < ps->performance_level_count; i++) {
ps->performance_levels[i].mclk = mclk; ps->performance_levels[i].mclk = mclk;

View File

@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
} }
} else if (ASIC_IS_DCE3(rdev)) { } else {
/* according to the reg specs, this should DCE3.2 only, but in /* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0/3.1 as well. * practice it seems to cover DCE2.0/3.0/3.1 as well.
*/ */
if (dig->dig_encoder == 0) { if (dig->dig_encoder == 0) {
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
} }
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
AUDIO_DTO_MODULE(clock / 10));
} }
} }

View File

@ -2710,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_fence *fence); struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_bo *bo, struct radeon_bo *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev, void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo); struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,

View File

@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
mpll_param->dll_speed = args.ucDllSpeed; mpll_param->dll_speed = args.ucDllSpeed;
mpll_param->bwcntl = args.ucBWCntl; mpll_param->bwcntl = args.ucBWCntl;
mpll_param->vco_mode = mpll_param->vco_mode =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0; (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
mpll_param->yclk_sel = mpll_param->yclk_sel =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
mpll_param->qdr = mpll_param->qdr =

View File

@ -360,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
struct radeon_bo *bo; struct radeon_bo *bo;
int r; int r;
r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
if (r) { if (r) {
return r; return r;
} }
list_for_each_entry(lobj, &parser->validated, tv.head) { list_for_each_entry(lobj, &parser->validated, tv.head) {
bo = lobj->bo; bo = lobj->bo;
r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
if (r) { if (r) {
return r; return r;
} }

View File

@ -108,9 +108,10 @@
* 1.31- Add support for num Z pipes from GET_PARAM * 1.31- Add support for num Z pipes from GET_PARAM
* 1.32- fixes for rv740 setup * 1.32- fixes for rv740 setup
* 1.33- Add r6xx/r7xx const buffer support * 1.33- Add r6xx/r7xx const buffer support
* 1.34- fix evergreen/cayman GS register
*/ */
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 33 #define DRIVER_MINOR 34
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
long radeon_drm_ioctl(struct file *filp, long radeon_drm_ioctl(struct file *filp,

View File

@ -29,6 +29,7 @@
#include <drm/radeon_drm.h> #include <drm/radeon_drm.h>
#include "radeon.h" #include "radeon.h"
#include "radeon_reg.h" #include "radeon_reg.h"
#include "radeon_trace.h"
/* /*
* GART * GART
@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
for (i = 0; i < 2; ++i) { for (i = 0; i < 2; ++i) {
if (choices[i]) { if (choices[i]) {
vm->id = choices[i]; vm->id = choices[i];
trace_radeon_vm_grab_id(vm->id, ring);
return rdev->vm_manager.active[choices[i]]; return rdev->vm_manager.active[choices[i]];
} }
} }
@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
} }
/** /**
* radeon_vm_bo_update_pte - map a bo into the vm page table * radeon_vm_bo_update - map a bo into the vm page table
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @vm: requested vm * @vm: requested vm
@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* *
* Object have to be reserved & global and local mutex must be locked! * Object have to be reserved & global and local mutex must be locked!
*/ */
int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_bo *bo, struct radeon_bo *bo,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct radeon_ib ib; struct radeon_ib ib;
struct radeon_bo_va *bo_va; struct radeon_bo_va *bo_va;
@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false; bo_va->valid = false;
} }
trace_radeon_vm_bo_update(bo_va);
nptes = radeon_bo_ngpu_pages(bo); nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */ /* assume two extra pdes in case the mapping overlaps the borders */
@ -1257,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&rdev->vm_manager.lock); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex); mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset) { if (bo_va->soffset) {
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
} }
mutex_unlock(&rdev->vm_manager.lock); mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list); list_del(&bo_va->vm_list);

View File

@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int temp; int temp;
if (rdev->asic->pm.get_temperature) if (rdev->asic->pm.get_temperature)
@ -566,23 +565,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp); return snprintf(buf, PAGE_SIZE, "%d\n", temp);
} }
static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "radeon\n");
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = { static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL NULL
}; };
@ -607,11 +597,15 @@ static const struct attribute_group hwmon_attrgroup = {
.is_visible = hwmon_attributes_visible, .is_visible = hwmon_attributes_visible,
}; };
static const struct attribute_group *hwmon_groups[] = {
&hwmon_attrgroup,
NULL
};
static int radeon_hwmon_init(struct radeon_device *rdev) static int radeon_hwmon_init(struct radeon_device *rdev)
{ {
int err = 0; int err = 0;
struct device *hwmon_dev;
rdev->pm.int_hwmon_dev = NULL;
switch (rdev->pm.int_thermal_type) { switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX: case THERMAL_TYPE_RV6XX:
@ -624,20 +618,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_KV: case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL) if (rdev->asic->pm.get_temperature == NULL)
return err; return err;
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
if (IS_ERR(rdev->pm.int_hwmon_dev)) { "radeon", rdev,
err = PTR_ERR(rdev->pm.int_hwmon_dev); hwmon_groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
dev_err(rdev->dev, dev_err(rdev->dev,
"Unable to register hwmon device: %d\n", err); "Unable to register hwmon device: %d\n", err);
break;
}
dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
&hwmon_attrgroup);
if (err) {
dev_err(rdev->dev,
"Unable to create hwmon sysfs file: %d\n", err);
hwmon_device_unregister(rdev->dev);
} }
break; break;
default: default:
@ -647,14 +634,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
return err; return err;
} }
static void radeon_hwmon_fini(struct radeon_device *rdev)
{
if (rdev->pm.int_hwmon_dev) {
sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
hwmon_device_unregister(rdev->pm.int_hwmon_dev);
}
}
static void radeon_dpm_thermal_work_handler(struct work_struct *work) static void radeon_dpm_thermal_work_handler(struct work_struct *work)
{ {
struct radeon_device *rdev = struct radeon_device *rdev =
@ -1337,8 +1316,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
if (rdev->pm.power_state) if (rdev->pm.power_state)
kfree(rdev->pm.power_state); kfree(rdev->pm.power_state);
radeon_hwmon_fini(rdev);
} }
static void radeon_pm_fini_dpm(struct radeon_device *rdev) static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@ -1358,8 +1335,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
if (rdev->pm.power_state) if (rdev->pm.power_state)
kfree(rdev->pm.power_state); kfree(rdev->pm.power_state);
radeon_hwmon_fini(rdev);
} }
void radeon_pm_fini(struct radeon_device *rdev) void radeon_pm_fini(struct radeon_device *rdev)

View File

@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
__entry->fences) __entry->fences)
); );
TRACE_EVENT(radeon_vm_grab_id,
TP_PROTO(unsigned vmid, int ring),
TP_ARGS(vmid, ring),
TP_STRUCT__entry(
__field(u32, vmid)
__field(u32, ring)
),
TP_fast_assign(
__entry->vmid = vmid;
__entry->ring = ring;
),
TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
);
TRACE_EVENT(radeon_vm_bo_update,
TP_PROTO(struct radeon_bo_va *bo_va),
TP_ARGS(bo_va),
TP_STRUCT__entry(
__field(u64, soffset)
__field(u64, eoffset)
__field(u32, flags)
),
TP_fast_assign(
__entry->soffset = bo_va->soffset;
__entry->eoffset = bo_va->eoffset;
__entry->flags = bo_va->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
__entry->soffset, __entry->eoffset, __entry->flags)
);
TRACE_EVENT(radeon_vm_set_page, TRACE_EVENT(radeon_vm_set_page,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags), uint32_t incr, uint32_t flags),

View File

@ -21,7 +21,7 @@ cayman 0x9400
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x000089B0 VGT_HS_OFFCHIP_PARAM 0x000089B0 VGT_HS_OFFCHIP_PARAM
0x00008A14 PA_CL_ENHANCE 0x00008A14 PA_CL_ENHANCE
0x00008A60 PA_SC_LINE_STIPPLE_VALUE 0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE 0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE 0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@ -532,7 +532,7 @@ cayman 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028B74 VGT_GS_INSTANCE_CNT 0x00028B90 VGT_GS_INSTANCE_CNT
0x00028BD4 PA_SC_CENTROID_PRIORITY_0 0x00028BD4 PA_SC_CENTROID_PRIORITY_0
0x00028BD8 PA_SC_CENTROID_PRIORITY_1 0x00028BD8 PA_SC_CENTROID_PRIORITY_1
0x00028BDC PA_SC_LINE_CNTL 0x00028BDC PA_SC_LINE_CNTL

View File

@ -22,7 +22,7 @@ evergreen 0x9400
0x000089A4 VGT_COMPUTE_START_Z 0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE 0x00008A14 PA_CL_ENHANCE
0x00008A60 PA_SC_LINE_STIPPLE_VALUE 0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE 0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE 0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@ -545,7 +545,7 @@ evergreen 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028B74 VGT_GS_INSTANCE_CNT 0x00028B90 VGT_GS_INSTANCE_CNT
0x00028C00 PA_SC_LINE_CNTL 0x00028C00 PA_SC_LINE_CNTL
0x00028C08 PA_SU_VTX_CNTL 0x00028C08 PA_SU_VTX_CNTL
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ 0x00028C0C PA_CL_GB_VERT_CLIP_ADJ

View File

@ -3882,8 +3882,15 @@ static int si_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */ /* size in MB on si */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; tmp = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; /* some boards may have garbage in the upper 16 bits */
if (tmp & 0xffff0000) {
DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
if (tmp & 0xffff)
tmp &= 0xffff;
}
rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc); si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev); radeon_update_bandwidth_info(rdev);

View File

@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_relocs = args->num_relocs; unsigned int num_relocs = args->num_relocs;
unsigned int num_waitchks = args->num_waitchks; unsigned int num_waitchks = args->num_waitchks;
struct drm_tegra_cmdbuf __user *cmdbufs = struct drm_tegra_cmdbuf __user *cmdbufs =
(void * __user)(uintptr_t)args->cmdbufs; (void __user *)(uintptr_t)args->cmdbufs;
struct drm_tegra_reloc __user *relocs = struct drm_tegra_reloc __user *relocs =
(void * __user)(uintptr_t)args->relocs; (void __user *)(uintptr_t)args->relocs;
struct drm_tegra_waitchk __user *waitchks = struct drm_tegra_waitchk __user *waitchks =
(void * __user)(uintptr_t)args->waitchks; (void __user *)(uintptr_t)args->waitchks;
struct drm_tegra_syncpt syncpt; struct drm_tegra_syncpt syncpt;
struct host1x_job *job; struct host1x_job *job;
int err; int err;
@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_cmdbuf cmdbuf; struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo; struct host1x_bo *bo;
err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf)); if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
if (err) err = -EFAULT;
goto fail; goto fail;
}
bo = host1x_bo_lookup(drm, file, cmdbuf.handle); bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
if (!bo) { if (!bo) {
@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
cmdbufs++; cmdbufs++;
} }
err = copy_from_user(job->relocarray, relocs, if (copy_from_user(job->relocarray, relocs,
sizeof(*relocs) * num_relocs); sizeof(*relocs) * num_relocs)) {
if (err) err = -EFAULT;
goto fail; goto fail;
}
while (num_relocs--) { while (num_relocs--) {
struct host1x_reloc *reloc = &job->relocarray[num_relocs]; struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
} }
} }
err = copy_from_user(job->waitchk, waitchks, if (copy_from_user(job->waitchk, waitchks,
sizeof(*waitchks) * num_waitchks); sizeof(*waitchks) * num_waitchks)) {
if (err) err = -EFAULT;
goto fail; goto fail;
}
err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts, if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
sizeof(syncpt)); sizeof(syncpt))) {
if (err) err = -EFAULT;
goto fail; goto fail;
}
job->is_addr_reg = context->client->ops->is_addr_reg; job->is_addr_reg = context->client->ops->is_addr_reg;
job->syncpt_incrs = syncpt.incrs; job->syncpt_incrs = syncpt.incrs;
@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
} }
#endif #endif
struct drm_driver tegra_drm_driver = { static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM, .driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = tegra_drm_load, .load = tegra_drm_load,
.unload = tegra_drm_unload, .unload = tegra_drm_unload,

View File

@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
{ {
return container_of(crtc, struct tegra_dc, base); return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
} }
static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,

View File

@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info->var.yoffset * fb->pitches[0]; info->var.yoffset * fb->pitches[0];
drm->mode_config.fb_base = (resource_size_t)bo->paddr; drm->mode_config.fb_base = (resource_size_t)bo->paddr;
info->screen_base = bo->vaddr + offset; info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size; info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->paddr + offset); info->fix.smem_start = (unsigned long)(bo->paddr + offset);
info->fix.smem_len = size; info->fix.smem_len = size;

View File

@ -14,6 +14,8 @@
struct tegra_rgb { struct tegra_rgb {
struct tegra_output output; struct tegra_output output;
struct tegra_dc *dc;
struct clk *clk_parent; struct clk *clk_parent;
struct clk *clk; struct clk *clk;
}; };
@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
static int tegra_output_rgb_enable(struct tegra_output *output) static int tegra_output_rgb_enable(struct tegra_output *output)
{ {
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); struct tegra_rgb *rgb = to_rgb(output);
tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
return 0; return 0;
} }
static int tegra_output_rgb_disable(struct tegra_output *output) static int tegra_output_rgb_disable(struct tegra_output *output)
{ {
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); struct tegra_rgb *rgb = to_rgb(output);
tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
return 0; return 0;
} }
@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->output.dev = dc->dev; rgb->output.dev = dc->dev;
rgb->output.of_node = np; rgb->output.of_node = np;
rgb->dc = dc;
err = tegra_output_probe(&rgb->output); err = tegra_output_probe(&rgb->output);
if (err < 0) if (err < 0)

View File

@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
static void udl_gem_put_pages(struct udl_gem_object *obj) static void udl_gem_put_pages(struct udl_gem_object *obj)
{ {
if (obj->base.import_attach) {
drm_free_large(obj->pages);
obj->pages = NULL;
return;
}
drm_gem_put_pages(&obj->base, obj->pages, false, false); drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL; obj->pages = NULL;
} }

View File

@ -150,6 +150,8 @@ struct vmw_ttm_tt {
bool mapped; bool mapped;
}; };
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/** /**
* Helper functions to advance a struct vmw_piter iterator. * Helper functions to advance a struct vmw_piter iterator.
* *

View File

@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
* TTM buffer object driver - vmwgfx_buffer.c * TTM buffer object driver - vmwgfx_buffer.c
*/ */
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement; extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement; extern struct ttm_placement vmw_vram_sys_placement;

View File

@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
vmw_surface_unreference(&du->cursor_surface); vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf) if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf); vmw_dmabuf_unreference(&du->cursor_dmabuf);
drm_sysfs_connector_remove(&du->connector);
drm_crtc_cleanup(&du->crtc); drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder); drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector); drm_connector_cleanup(&du->connector);

View File

@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
connector->encoder = NULL; connector->encoder = NULL;
encoder->crtc = NULL; encoder->crtc = NULL;
crtc->fb = NULL; crtc->fb = NULL;
crtc->enabled = false;
vmw_ldu_del_active(dev_priv, ldu); vmw_ldu_del_active(dev_priv, ldu);
@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
crtc->x = set->x; crtc->x = set->x;
crtc->y = set->y; crtc->y = set->y;
crtc->mode = *mode; crtc->mode = *mode;
crtc->enabled = true;
vmw_ldu_add_active(dev_priv, ldu, vfb); vmw_ldu_add_active(dev_priv, ldu, vfb);
@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit); encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0; encoder->possible_clones = 0;
(void) drm_sysfs_connector_add(connector);
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256); drm_mode_crtc_set_gamma_size(crtc, 256);

View File

@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
/** /**
* Buffer management. * Buffer management.
*/ */
/**
* vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @size: The requested buffer size.
* @user: Whether this is an ordinary dma buffer or a user dma buffer.
*/
static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
bool user)
{
static size_t struct_size, user_struct_size;
size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
if (unlikely(struct_size == 0)) {
size_t backend_size = ttm_round_pot(vmw_tt_size);
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_dma_buffer));
user_struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
page_array_size +=
ttm_round_pot(num_pages * sizeof(dma_addr_t));
return ((user) ? user_struct_size : struct_size) +
page_array_size;
}
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{ {
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
kfree(vmw_bo); kfree(vmw_bo);
} }
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
int vmw_dmabuf_init(struct vmw_private *dev_priv, int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo, struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement, size_t size, struct ttm_placement *placement,
@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct ttm_bo_device *bdev = &dev_priv->bdev; struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size; size_t acc_size;
int ret; int ret;
bool user = (bo_free == &vmw_user_dmabuf_destroy);
BUG_ON(!bo_free); BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo)); memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->res_list); INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size, ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement, (user) ? ttm_bo_type_device :
ttm_bo_type_kernel, placement,
0, interruptible, 0, interruptible,
NULL, acc_size, NULL, bo_free); NULL, acc_size, NULL, bo_free);
return ret; return ret;
} }
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
{ {
struct vmw_user_dma_buffer *vmw_user_bo; struct vmw_user_dma_buffer *vmw_user_bo;
@ -781,54 +815,55 @@ err_ref:
} }
/**
* vmw_dumb_create - Create a dumb kms buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @args: Pointer to a struct drm_mode_create_dumb structure
*
* This is a driver callback for the core drm create_dumb functionality.
* Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
* that the arguments have a different format.
*/
int vmw_dumb_create(struct drm_file *file_priv, int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args) struct drm_mode_create_dumb *args)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master); struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_user_dma_buffer *vmw_user_bo; struct vmw_dma_buffer *dma_buf;
struct ttm_buffer_object *tmp;
int ret; int ret;
args->pitch = args->width * ((args->bpp + 7) / 8); args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height; args->size = args->pitch * args->height;
vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
if (vmw_user_bo == NULL)
return -ENOMEM;
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&vmaster->lock, true);
if (ret != 0) { if (unlikely(ret != 0))
kfree(vmw_user_bo);
return ret; return ret;
}
ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
&vmw_vram_sys_placement, true, args->size, false, &args->handle,
&vmw_user_dmabuf_destroy); &dma_buf);
if (ret != 0) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_dmabuf;
tmp = ttm_bo_reference(&vmw_user_bo->dma.base); vmw_dmabuf_unreference(&dma_buf);
ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
args->size,
&vmw_user_bo->prime,
false,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0))
goto out_no_base_object;
args->handle = vmw_user_bo->prime.base.hash.key;
out_no_base_object:
ttm_bo_unref(&tmp);
out_no_dmabuf: out_no_dmabuf:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&vmaster->lock);
return ret; return ret;
} }
/**
* vmw_dumb_map_offset - Return the address space offset of a dumb buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
* @offset: The address space offset returned.
*
* This is a driver callback for the core drm dumb_map_offset functionality.
*/
int vmw_dumb_map_offset(struct drm_file *file_priv, int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle, struct drm_device *dev, uint32_t handle,
uint64_t *offset) uint64_t *offset)
@ -846,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
return 0; return 0;
} }
/**
* vmw_dumb_destroy - Destroy a dumb boffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
*
* This is a driver callback for the core drm dumb_destroy functionality.
*/
int vmw_dumb_destroy(struct drm_file *file_priv, int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
uint32_t handle) uint32_t handle)

View File

@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL; crtc->fb = NULL;
crtc->x = 0; crtc->x = 0;
crtc->y = 0; crtc->y = 0;
crtc->enabled = false;
vmw_sou_del_active(dev_priv, sou); vmw_sou_del_active(dev_priv, sou);
@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL; crtc->fb = NULL;
crtc->x = 0; crtc->x = 0;
crtc->y = 0; crtc->y = 0;
crtc->enabled = false;
return ret; return ret;
} }
@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = fb; crtc->fb = fb;
crtc->x = set->x; crtc->x = set->x;
crtc->y = set->y; crtc->y = set->y;
crtc->enabled = true;
return 0; return 0;
} }
@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit); encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0; encoder->possible_clones = 0;
(void) drm_sysfs_connector_add(connector);
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256); drm_mode_crtc_set_gamma_size(crtc, 256);

View File

@ -19,6 +19,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "bus.h"
#include "dev.h" #include "dev.h"
static DEFINE_MUTEX(clients_lock); static DEFINE_MUTEX(clients_lock);
@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x,
return -ENODEV; return -ENODEV;
} }
struct bus_type host1x_bus_type = { static struct bus_type host1x_bus_type = {
.name = "host1x", .name = "host1x",
}; };
@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask; device->dev.dma_mask = &device->dev.coherent_dma_mask;
device->dev.release = host1x_device_release; device->dev.release = host1x_device_release;
dev_set_name(&device->dev, driver->name); dev_set_name(&device->dev, "%s", driver->name);
device->dev.bus = &host1x_bus_type; device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev; device->dev.parent = host1x->dev;

View File

@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
u32 *p = (u32 *)((u32)pb->mapped + getptr); u32 *p = (u32 *)((u32)pb->mapped + getptr);
*(p++) = HOST1X_OPCODE_NOP; *(p++) = HOST1X_OPCODE_NOP;
*(p++) = HOST1X_OPCODE_NOP; *(p++) = HOST1X_OPCODE_NOP;
dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__, dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
pb->phys + getptr); (u64)pb->phys + getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1); getptr = (getptr + 8) & (pb->size_bytes - 1);
} }
wmb(); wmb();

View File

@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
continue; continue;
} }
host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n", host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
g->base, g->offset, g->words); (u64)g->base, g->offset, g->words);
show_gather(o, g->base + g->offset, g->words, cdma, show_gather(o, g->base + g->offset, g->words, cdma,
g->base, mapped); g->base, mapped);