diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index cdd8b24db68d..cc303a2f641c 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -229,6 +229,7 @@ X!Isound/sound_firmware.c !Iinclude/media/v4l2-dv-timings.h !Iinclude/media/v4l2-event.h !Iinclude/media/v4l2-flash-led-class.h +!Iinclude/media/v4l2-mc.h !Iinclude/media/v4l2-mediabus.h !Iinclude/media/v4l2-mem2mem.h !Iinclude/media/v4l2-of.h diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index fe6b36a2fd98..1692c4dd5487 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -1816,7 +1816,7 @@ void intel_crt_init(struct drm_device *dev) Description/Restrictions - DRM + DRM Generic “rotation” BITMASK @@ -2068,7 +2068,7 @@ void intel_crt_init(struct drm_device *dev) property to suggest an Y offset for a connector - Optional + Optional “scaling mode” ENUM { "None", "Full", "Center", "Full aspect" } @@ -2092,6 +2092,61 @@ void intel_crt_init(struct drm_device *dev) TBD + “DEGAMMA_LUT” + BLOB + 0 + CRTC + DRM property to set the degamma lookup table + (LUT) mapping pixel data from the framebuffer before it is + given to the transformation matrix. The data is an interpreted + as an array of struct drm_color_lut elements. Hardware might + choose not to use the full precision of the LUT elements nor + use all the elements of the LUT (for example the hardware + might choose to interpolate between LUT[0] and LUT[4]). + + + “DEGAMMA_LUT_SIZE” + RANGE | IMMUTABLE + Min=0, Max=UINT_MAX + CRTC + DRM property to gives the size of the lookup + table to be set on the DEGAMMA_LUT property (the size depends + on the underlying hardware). + + + “CTM” + BLOB + 0 + CRTC + DRM property to set the current + transformation matrix (CTM) apply to pixel data after the + lookup through the degamma LUT and before the lookup through + the gamma LUT. The data is an interpreted as a struct + drm_color_ctm. + + + “GAMMA_LUT” + BLOB + 0 + CRTC + DRM property to set the gamma lookup table + (LUT) mapping pixel data after to the transformation matrix to + data sent to the connector. The data is an interpreted as an + array of struct drm_color_lut elements. Hardware might choose + not to use the full precision of the LUT elements nor use all + the elements of the LUT (for example the hardware might choose + to interpolate between LUT[0] and LUT[4]). + + + “GAMMA_LUT_SIZE” + RANGE | IMMUTABLE + Min=0, Max=UINT_MAX + CRTC + DRM property to gives the size of the lookup + table to be set on the GAMMA_LUT property (the size depends on + the underlying hardware). + + i915 Generic "Broadcast RGB" diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml index f13a429093f1..361040e6b0f4 100644 --- a/Documentation/DocBook/media/v4l/controls.xml +++ b/Documentation/DocBook/media/v4l/controls.xml @@ -2329,6 +2329,14 @@ to search and match for the present Macroblock (MB) in the reference picture. Th vertical search range for motion estimation module in video encoder. + + + V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME  + button + Force a key frame for the next queued buffer. Applicable to encoders. +This is a general, codec-agnostic keyframe control. + + V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE  @@ -5069,6 +5077,46 @@ interface and may change in the future. This control is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors. + + V4L2_CID_DV_TX_IT_CONTENT_TYPE + enum v4l2_dv_it_content_type + + Configures the IT Content Type + of the transmitted video. This information is sent over HDMI and DisplayPort connectors + as part of the AVI InfoFrame. The term 'IT Content' is used for content that originates + from a computer as opposed to content from a TV broadcast or an analog source. The + enum v4l2_dv_it_content_type defines the possible content types: + + + + + + V4L2_DV_IT_CONTENT_TYPE_GRAPHICS  + Graphics content. Pixel data should be passed unfiltered and without + analog reconstruction. + + + V4L2_DV_IT_CONTENT_TYPE_PHOTO  + Photo content. The content is derived from digital still pictures. + The content should be passed through with minimal scaling and picture + enhancements. + + + V4L2_DV_IT_CONTENT_TYPE_CINEMA  + Cinema content. + + + V4L2_DV_IT_CONTENT_TYPE_GAME  + Game content. Audio and video latency should be minimized. + + + V4L2_DV_IT_CONTENT_TYPE_NO_ITC  + No IT Content information is available and the ITC bit in the AVI + InfoFrame is set to 0. + + + + V4L2_CID_DV_RX_POWER_PRESENT bitmask @@ -5098,6 +5146,16 @@ interface and may change in the future. This control is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors. + + V4L2_CID_DV_RX_IT_CONTENT_TYPE + enum v4l2_dv_it_content_type + + Reads the IT Content Type + of the received video. This information is sent over HDMI and DisplayPort connectors + as part of the AVI InfoFrame. The term 'IT Content' is used for content that originates + from a computer as opposed to content from a TV broadcast or an analog source. See + V4L2_CID_DV_TX_IT_CONTENT_TYPE for the available content types. + diff --git a/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml b/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml index 63152ab9efba..e0d49fa329f0 100644 --- a/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml +++ b/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml @@ -48,9 +48,6 @@ Description - - NOTE: This new ioctl is programmed to be added on Kernel 4.6. Its definition/arguments may change until its final version. - The typical usage of this ioctl is to call it twice. On the first call, the structure defined at &media-v2-topology; should be zeroed. At return, if no errors happen, this ioctl will return the diff --git a/Documentation/DocBook/media/v4l/media-types.xml b/Documentation/DocBook/media/v4l/media-types.xml index 1af384250910..8b4fa39cf611 100644 --- a/Documentation/DocBook/media/v4l/media-types.xml +++ b/Documentation/DocBook/media/v4l/media-types.xml @@ -56,10 +56,6 @@ MEDIA_ENT_F_CONN_COMPOSITE Connector for a RGB composite signal. - - MEDIA_ENT_F_CONN_TEST - Connector for a test generator. - MEDIA_ENT_F_CAM_SENSOR Camera video sensor entity. @@ -84,7 +80,34 @@ MEDIA_ENT_F_TUNER - Digital TV, analog TV, radio and/or software radio tuner. + Digital TV, analog TV, radio and/or software radio tuner, + with consists on a PLL tuning stage that converts radio + frequency (RF) signal into an Intermediate Frequency (IF). + Modern tuners have internally IF-PLL decoders for audio + and video, but older models have those stages implemented + on separate entities. + + + + MEDIA_ENT_F_IF_VID_DECODER + IF-PLL video decoder. It receives the IF from a PLL + and decodes the analog TV video signal. This is commonly + found on some very old analog tuners, like Philips MK3 + designs. They all contain a tda9887 (or some software + compatible similar chip, like tda9885). Those devices + use a different I2C address than the tuner PLL. + + + + MEDIA_ENT_F_IF_AUD_DECODER + IF-PLL sound decoder. It receives the IF from a PLL + and decodes the analog TV audio signal. This is commonly + found on some very old analog hardware, like Micronas + msp3400, Philips tda9840, tda985x, etc. Those devices + use a different I2C address than the tuner PLL and + should be controlled together with the IF-PLL video + decoder. + diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml index e781cc61786c..7d13fe96657d 100644 --- a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml +++ b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml @@ -1,35 +1,43 @@ - + - V4L2_PIX_FMT_YUV420M ('YM12') + V4L2_PIX_FMT_YUV420M ('YM12'), V4L2_PIX_FMT_YVU420M ('YM21') &manvol; - V4L2_PIX_FMT_YUV420M - Variation of V4L2_PIX_FMT_YUV420 - with planes non contiguous in memory. + V4L2_PIX_FMT_YUV420M + V4L2_PIX_FMT_YVU420M + Variation of V4L2_PIX_FMT_YUV420 and + V4L2_PIX_FMT_YVU420 with planes non contiguous + in memory. Description This is a multi-planar format, as opposed to a packed format. -The three components are separated into three sub- images or planes. +The three components are separated into three sub-images or planes. -The Y plane is first. The Y plane has one byte per pixel. The Cb data + The Y plane is first. The Y plane has one byte per pixel. +For V4L2_PIX_FMT_YUV420M the Cb data constitutes the second plane which is half the width and half the height of the Y plane (and of the image). Each Cb belongs to four pixels, a two-by-two square of the image. For example, Cb0 belongs to Y'00, Y'01, Y'10, and Y'11. The Cr data, just like the Cb plane, is -in the third plane. +in the third plane. + + V4L2_PIX_FMT_YVU420M is the same except +the Cr data is stored in the second plane and the Cb data in the third plane. + If the Y plane has pad bytes after each row, then the Cb and Cr planes have half as many pad bytes after their rows. In other words, two Cx rows (including padding) is exactly as long as one Y row (including padding). - V4L2_PIX_FMT_YUV420M is intended to be + V4L2_PIX_FMT_YUV420M and +V4L2_PIX_FMT_YVU420M are intended to be used only in drivers and applications that support the multi-planar API, described in . diff --git a/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml similarity index 58% rename from Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml rename to Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml index 2330667907c7..dd502802cb75 100644 --- a/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml +++ b/Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml @@ -1,40 +1,45 @@ - + - V4L2_PIX_FMT_YVU420M ('YM21') + V4L2_PIX_FMT_YUV422M ('YM16'), V4L2_PIX_FMT_YVU422M ('YM61') &manvol; - V4L2_PIX_FMT_YVU420M - Variation of V4L2_PIX_FMT_YVU420 - with planes non contiguous in memory. + V4L2_PIX_FMT_YUV422M + V4L2_PIX_FMT_YVU422M + Planar formats with ½ horizontal resolution, also + known as YUV and YVU 4:2:2 Description This is a multi-planar format, as opposed to a packed format. -The three components are separated into three sub-images or planes. +The three components are separated into three sub-images or planes. -The Y plane is first. The Y plane has one byte per pixel. The Cr data -constitutes the second plane which is half the width and half -the height of the Y plane (and of the image). Each Cr belongs to four -pixels, a two-by-two square of the image. For example, -Cr0 belongs to Y'00, -Y'01, Y'10, and -Y'11. The Cb data, just like the Cr plane, constitutes -the third plane. + The Y plane is first. The Y plane has one byte per pixel. +For V4L2_PIX_FMT_YUV422M the Cb data +constitutes the second plane which is half the width of the Y plane (and of the +image). Each Cb belongs to two pixels. For example, +Cb0 belongs to Y'00, +Y'01. The Cr data, just like the Cb plane, is +in the third plane. - If the Y plane has pad bytes after each row, then the Cr -and Cb planes have half as many pad bytes after their rows. In other + V4L2_PIX_FMT_YVU422M is the same except +the Cr data is stored in the second plane and the Cb data in the third plane. + + + If the Y plane has pad bytes after each row, then the Cb +and Cr planes have half as many pad bytes after their rows. In other words, two Cx rows (including padding) is exactly as long as one Y row (including padding). - V4L2_PIX_FMT_YVU420M is intended to be + V4L2_PIX_FMT_YUV422M and +V4L2_PIX_FMT_YVU422M are intended to be used only in drivers and applications that support the multi-planar API, described in . - <constant>V4L2_PIX_FMT_YVU420M</constant> 4 × 4 + <title><constant>V4L2_PIX_FMT_YUV422M</constant> 4 × 4 pixel image @@ -75,25 +80,45 @@ pixel image start1 + 0: - Cr00 - Cr01 - - - start1 + 2: - Cr10 - Cr11 - - - - start2 + 0: Cb00 Cb01 - start2 + 2: + start1 + 2: Cb10 Cb11 + + start1 + 4: + Cb20 + Cb21 + + + start1 + 6: + Cb30 + Cb31 + + + + start2 + 0: + Cr00 + Cr01 + + + start2 + 2: + Cr10 + Cr11 + + + start2 + 4: + Cr20 + Cr21 + + + start2 + 6: + Cr30 + Cr31 + @@ -113,36 +138,23 @@ pixel image 0 - YY - YY - - - - C - C + YCY + YCY 1 - YY - YY - - - + YCY + YCY 2 - YY - YY - - - - C - C + YCY + YCY 3 - YY - YY + YCY + YCY diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml new file mode 100644 index 000000000000..1b7335940bc7 --- /dev/null +++ b/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml @@ -0,0 +1,177 @@ + + + V4L2_PIX_FMT_YUV444M ('YM24'), V4L2_PIX_FMT_YVU444M ('YM42') + &manvol; + + + V4L2_PIX_FMT_YUV444M + V4L2_PIX_FMT_YVU444M + Planar formats with full horizontal resolution, also + known as YUV and YVU 4:4:4 + + + + Description + + This is a multi-planar format, as opposed to a packed format. +The three components are separated into three sub-images or planes. + + The Y plane is first. The Y plane has one byte per pixel. +For V4L2_PIX_FMT_YUV444M the Cb data +constitutes the second plane which is the same width and height as the Y plane +(and as the image). The Cr data, just like the Cb plane, is in the third plane. + + + V4L2_PIX_FMT_YVU444M is the same except +the Cr data is stored in the second plane and the Cb data in the third plane. + + If the Y plane has pad bytes after each row, then the Cb +and Cr planes have the same number of pad bytes after their rows. + + V4L2_PIX_FMT_YUV444M and +V4L2_PIX_FMT_YUV444M are intended to be +used only in drivers and applications that support the multi-planar API, +described in . + + + <constant>V4L2_PIX_FMT_YUV444M</constant> 4 × 4 +pixel image + + + Byte Order. + Each cell is one byte. + + + + + + start0 + 0: + Y'00 + Y'01 + Y'02 + Y'03 + + + start0 + 4: + Y'10 + Y'11 + Y'12 + Y'13 + + + start0 + 8: + Y'20 + Y'21 + Y'22 + Y'23 + + + start0 + 12: + Y'30 + Y'31 + Y'32 + Y'33 + + + + start1 + 0: + Cb00 + Cb01 + Cb02 + Cb03 + + + start1 + 4: + Cb10 + Cb11 + Cb12 + Cb13 + + + start1 + 8: + Cb20 + Cb21 + Cb22 + Cb23 + + + start1 + 12: + Cb20 + Cb21 + Cb32 + Cb33 + + + + start2 + 0: + Cr00 + Cr01 + Cr02 + Cr03 + + + start2 + 4: + Cr10 + Cr11 + Cr12 + Cr13 + + + start2 + 8: + Cr20 + Cr21 + Cr22 + Cr23 + + + start2 + 12: + Cr30 + Cr31 + Cr32 + Cr33 + + + + + + + + + Color Sample Location. + + + + + + + 01 + 23 + + + 0 + YCYC + YCYC + + + 1 + YCYC + YCYC + + + 2 + YCYC + YCYC + + + 3 + YCYC + YCYC + + + + + + + + + diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml index d871245d2973..2f02f9441443 100644 --- a/Documentation/DocBook/media/v4l/pixfmt.xml +++ b/Documentation/DocBook/media/v4l/pixfmt.xml @@ -1628,7 +1628,8 @@ information. &sub-y41p; &sub-yuv420; &sub-yuv420m; - &sub-yvu420m; + &sub-yuv422m; + &sub-yuv444m; &sub-yuv410; &sub-yuv422p; &sub-yuv411p; diff --git a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml index e9c70a8f3476..0c93677d16b4 100644 --- a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml +++ b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml @@ -60,9 +60,19 @@ input automatically, similar to sensing the video standard. To do so, applications call VIDIOC_QUERY_DV_TIMINGS with a pointer to a &v4l2-dv-timings;. Once the hardware detects the timings, it will fill in the -timings structure. +timings structure. -If the timings could not be detected because there was no signal, then +Please note that drivers shall not switch timings automatically +if new timings are detected. Instead, drivers should send the +V4L2_EVENT_SOURCE_CHANGE event (if they support this) and expect +that userspace will take action by calling VIDIOC_QUERY_DV_TIMINGS. +The reason is that new timings usually mean different buffer sizes as well, and you +cannot change buffer sizes on the fly. In general, applications that receive the +Source Change event will have to call VIDIOC_QUERY_DV_TIMINGS, +and if the detected timings are valid they will have to stop streaming, set the new +timings, allocate new buffers and start streaming again. + +If the timings could not be detected because there was no signal, then ENOLINK is returned. If a signal was detected, but it was unstable and the receiver could not lock to the signal, then ENOLCK is returned. If the receiver could lock to the signal, diff --git a/Documentation/DocBook/media/v4l/vidioc-querystd.xml b/Documentation/DocBook/media/v4l/vidioc-querystd.xml index 222348542182..3ceae35fab03 100644 --- a/Documentation/DocBook/media/v4l/vidioc-querystd.xml +++ b/Documentation/DocBook/media/v4l/vidioc-querystd.xml @@ -59,6 +59,16 @@ then the driver will return V4L2_STD_UNKNOWN. When detection is not possible or fails, the set must contain all standards supported by the current video input or output. +Please note that drivers shall not switch the video standard +automatically if a new video standard is detected. Instead, drivers should send the +V4L2_EVENT_SOURCE_CHANGE event (if they support this) and expect +that userspace will take action by calling VIDIOC_QUERYSTD. +The reason is that a new video standard can mean different buffer sizes as well, and you +cannot change buffer sizes on the fly. In general, applications that receive the +Source Change event will have to call VIDIOC_QUERYSTD, +and if the detected video standard is valid they will have to stop streaming, set the new +standard, allocate new buffers and start streaming again. + diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index e8d25e784214..ff49cf901148 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt @@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and conventions of cgroup v2. It describes all userland-visible aspects of cgroup including core and specific controller behaviors. All future changes must be reflected in this document. Documentation for -v1 is available under Documentation/cgroup-legacy/. +v1 is available under Documentation/cgroup-v1/. CONTENTS diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt index a2bd593881ca..66422d663184 100644 --- a/Documentation/devicetree/bindings/arm/omap/omap.txt +++ b/Documentation/devicetree/bindings/arm/omap/omap.txt @@ -23,6 +23,7 @@ Optional properties: during suspend. - ti,no-reset-on-init: When present, the module should not be reset at init - ti,no-idle-on-init: When present, the module should not be idled at init +- ti,no-idle: When present, the module is never allowed to idle. Example: diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt index ace05992a262..20df350b9ef3 100644 --- a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt @@ -30,7 +30,7 @@ that they are defined using standard clock bindings with following clock-output-names: - "xin24m" - crystal input - required, - "ext_i2s" - external I2S clock - optional, - - "ext_gmac" - external GMAC clock - optional + - "rmii_clkin" - external EMAC clock - optional Example: Clock controller node: diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt index 56a961aa5061..9f97df4d5152 100644 --- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt +++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt @@ -35,6 +35,12 @@ Optional properties for HDMI: as an interrupt/status bit in the HDMI controller itself). See bindings/pinctrl/brcm,bcm2835-gpio.txt +Required properties for V3D: +- compatible: Should be "brcm,bcm2835-v3d" +- reg: Physical base address and length of the V3D's registers +- interrupts: The interrupt number + See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt + Example: pixelvalve@7e807000 { compatible = "brcm,bcm2835-pixelvalve2"; @@ -60,6 +66,12 @@ hdmi: hdmi@7e902000 { clock-names = "pixel", "hdmi"; }; +v3d: v3d@7ec00000 { + compatible = "brcm,bcm2835-v3d"; + reg = <0x7ec00000 0x1000>; + interrupts = <1 10>; +}; + vc4: gpu { compatible = "brcm,bcm2835-vc4"; }; diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt index 0e6f0c024858..22756b3dede2 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt @@ -6,6 +6,7 @@ Required properties: "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */ "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ + "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ - reg: physical base address and length of the registers set for the device - interrupts: should contain DSI interrupt diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt index 27c3ce0db16a..c7c6b9af87ac 100644 --- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt +++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt @@ -12,7 +12,8 @@ Required properties: "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */ - "samsung,exynos5250-fimd"; /* for Exynos5 SoCs */ + "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ + "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ - reg: physical base address and length of the FIMD registers set. diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index e7423bea1424..f5948c48b9a2 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -44,9 +44,34 @@ Optional properties: - pinctrl-names: the pin control state names; should contain "default" - pinctrl-0: the default pinctrl state (active) - pinctrl-n: the "sleep" pinctrl state -- port: DSI controller output port. This contains one endpoint subnode, with its - remote-endpoint set to the phandle of the connected panel's endpoint. - See Documentation/devicetree/bindings/graph.txt for device graph info. +- port: DSI controller output port, containing one endpoint subnode. + + DSI Endpoint properties: + - remote-endpoint: set to phandle of the connected panel's endpoint. + See Documentation/devicetree/bindings/graph.txt for device graph info. + - qcom,data-lane-map: this describes how the logical DSI lanes are mapped + to the physical lanes on the given platform. The value contained in + index n describes what logical data lane is mapped to the physical data + lane n (DATAn, where n lies between 0 and 3). + + For example: + + qcom,data-lane-map = <3 0 1 2>; + + The above mapping describes that the logical data lane DATA3 is mapped to + the physical data lane DATA0, logical DATA0 to physical DATA1, logic DATA1 + to phys DATA2 and logic DATA2 to phys DATA3. + + There are only a limited number of physical to logical mappings possible: + + "0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3; + "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; + "2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3; + "1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3; + "0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3; + "1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3; + "2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3; + "3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3; DSI PHY: Required properties: @@ -131,6 +156,7 @@ Example: port { dsi0_out: endpoint { remote-endpoint = <&panel_in>; + lanes = <0 1 2 3>; }; }; }; diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt index 379ee2ea9a3d..b63f614e0c04 100644 --- a/Documentation/devicetree/bindings/display/msm/hdmi.txt +++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt @@ -11,6 +11,7 @@ Required properties: - reg: Physical base address and length of the controller's registers - reg-names: "core_physical" - interrupts: The interrupt signal from the hdmi block. +- power-domains: Should be <&mmcc MDSS_GDSC>. - clocks: device clocks See ../clocks/clock-bindings.txt for details. - qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin @@ -18,6 +19,8 @@ Required properties: - qcom,hdmi-tx-hpd-gpio: hpd pin - core-vdda-supply: phandle to supply regulator - hdmi-mux-supply: phandle to mux regulator +- phys: the phandle for the HDMI PHY device +- phy-names: the name of the corresponding PHY device Optional properties: - qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin @@ -27,15 +30,38 @@ Optional properties: - pinctrl-0: the default pinctrl state (active) - pinctrl-1: the "sleep" pinctrl state +HDMI PHY: +Required properties: +- compatible: Could be the following + * "qcom,hdmi-phy-8660" + * "qcom,hdmi-phy-8960" + * "qcom,hdmi-phy-8974" + * "qcom,hdmi-phy-8084" + * "qcom,hdmi-phy-8996" +- #phy-cells: Number of cells in a PHY specifier; Should be 0. +- reg: Physical base address and length of the registers of the PHY sub blocks. +- reg-names: The names of register regions. The following regions are required: + * "hdmi_phy" + * "hdmi_pll" + For HDMI PHY on msm8996, these additional register regions are required: + * "hdmi_tx_l0" + * "hdmi_tx_l1" + * "hdmi_tx_l3" + * "hdmi_tx_l4" +- power-domains: Should be <&mmcc MDSS_GDSC>. +- clocks: device clocks + See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. +- core-vdda-supply: phandle to vdda regulator device node + Example: / { ... - hdmi: qcom,hdmi-tx-8960@4a00000 { + hdmi: hdmi@4a00000 { compatible = "qcom,hdmi-tx-8960"; reg-names = "core_physical"; - reg = <0x04a00000 0x1000>; + reg = <0x04a00000 0x2f0>; interrupts = ; power-domains = <&mmcc MDSS_GDSC>; clock-names = @@ -54,5 +80,21 @@ Example: pinctrl-names = "default", "sleep"; pinctrl-0 = <&hpd_active &ddc_active &cec_active>; pinctrl-1 = <&hpd_suspend &ddc_suspend &cec_suspend>; + + phys = <&hdmi_phy>; + phy-names = "hdmi_phy"; + }; + + hdmi_phy: phy@4a00400 { + compatible = "qcom,hdmi-phy-8960"; + reg-names = "hdmi_phy", + "hdmi_pll"; + reg = <0x4a00400 0x60>, + <0x4a00500 0x100>; + #phy-cells = <0>; + power-domains = <&mmcc MDSS_GDSC>; + clock-names = "slave_iface_clk"; + clocks = <&mmcc HDMI_S_AHB_CLK>; + core-vdda-supply = <&pm8921_hdmi_mvs>; }; }; diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index eccd4f4867b2..0d30e42e40be 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -8,6 +8,7 @@ Required Properties: - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU - "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU - "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU + - "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU - reg: A list of base address and length of each memory resource, one for each entry in the reg-names property. @@ -24,7 +25,7 @@ Required Properties: - clock-names: Name of the clocks. This property is model-dependent. - R8A7779 uses a single functional clock. The clock doesn't need to be named. - - R8A779[0134] use one functional clock per channel and one clock per LVDS + - R8A779[01345] use one functional clock per channel and one clock per LVDS encoder (if available). The functional clocks must be named "du.x" with "x" being the channel numerical index. The LVDS clocks must be named "lvds.x" with "x" being the LVDS encoder numerical index. @@ -41,13 +42,14 @@ bindings specified in Documentation/devicetree/bindings/graph.txt. The following table lists for each supported model the port number corresponding to each DU output. - Port 0 Port1 Port2 + Port 0 Port1 Port2 Port3 ----------------------------------------------------------------------------- - R8A7779 (H1) DPAD 0 DPAD 1 - - R8A7790 (H2) DPAD LVDS 0 LVDS 1 - R8A7791 (M2-W) DPAD LVDS 0 - - R8A7793 (M2-N) DPAD LVDS 0 - - R8A7794 (E2) DPAD 0 DPAD 1 - + R8A7779 (H1) DPAD 0 DPAD 1 - - + R8A7790 (H2) DPAD LVDS 0 LVDS 1 - + R8A7791 (M2-W) DPAD LVDS 0 - - + R8A7793 (M2-N) DPAD LVDS 0 - - + R8A7794 (E2) DPAD 0 DPAD 1 - - + R8A7795 (H3) DPAD HDMI 0 HDMI 1 LVDS Example: R8A7790 (R-Car H2) DU diff --git a/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt new file mode 100644 index 000000000000..8096a29f9776 --- /dev/null +++ b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt @@ -0,0 +1,50 @@ +Rockchip specific extensions to the Innosilicon HDMI +================================ + +Required properties: +- compatible: + "rockchip,rk3036-inno-hdmi"; +- reg: + Physical base address and length of the controller's registers. +- clocks, clock-names: + Phandle to hdmi controller clock, name should be "pclk" +- interrupts: + HDMI interrupt number +- ports: + Contain one port node with endpoint definitions as defined in + Documentation/devicetree/bindings/graph.txt. +- pinctrl-0, pinctrl-name: + Switch the iomux of HPD/CEC pins to HDMI function. + +Example: +hdmi: hdmi@20034000 { + compatible = "rockchip,rk3036-inno-hdmi"; + reg = <0x20034000 0x4000>; + interrupts = ; + clocks = <&cru PCLK_HDMI>; + clock-names = "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_ctl>; + status = "disabled"; + + hdmi_in: port { + #address-cells = <1>; + #size-cells = <0>; + hdmi_in_lcdc: endpoint@0 { + reg = <0>; + remote-endpoint = <&lcdc_out_hdmi>; + }; + }; +}; + +&pinctrl { + hdmi { + hdmi_ctl: hdmi-ctl { + rockchip,pins = <1 8 RK_FUNC_1 &pcfg_pull_none>, + <1 9 RK_FUNC_1 &pcfg_pull_none>, + <1 10 RK_FUNC_1 &pcfg_pull_none>, + <1 11 RK_FUNC_1 &pcfg_pull_none>; + }; + }; + +}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index 7803e77d85cb..007a5b46256a 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt @@ -24,9 +24,8 @@ Main node required properties: 1 = edge triggered 4 = level triggered - Cells 4 and beyond are reserved for future use. When the 1st cell - has a value of 0 or 1, cells 4 and beyond act as padding, and may be - ignored. It is recommended that padding cells have a value of 0. + Cells 4 and beyond are reserved for future use and must have a value + of 0 if present. - reg : Specifies base physical address(s) and size of the GIC registers, in the following order: diff --git a/Documentation/devicetree/bindings/media/i2c/mt9v032.txt b/Documentation/devicetree/bindings/media/i2c/mt9v032.txt index 202565313e82..100f0ae43269 100644 --- a/Documentation/devicetree/bindings/media/i2c/mt9v032.txt +++ b/Documentation/devicetree/bindings/media/i2c/mt9v032.txt @@ -20,6 +20,8 @@ Optional Properties: - link-frequencies: List of allowed link frequencies in Hz. Each frequency is expressed as a 64-bit big-endian integer. +- reset-gpios: GPIO handle which is connected to the reset pin of the chip. +- standby-gpios: GPIO handle which is connected to the standby pin of the chip. For further reading on port node refer to Documentation/devicetree/bindings/media/video-interfaces.txt. diff --git a/Documentation/devicetree/bindings/media/i2c/tvp5150.txt b/Documentation/devicetree/bindings/media/i2c/tvp5150.txt new file mode 100644 index 000000000000..daa20e43a8e3 --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/tvp5150.txt @@ -0,0 +1,88 @@ +* Texas Instruments TVP5150 and TVP5151 video decoders + +The TVP5150 and TVP5151 are video decoders that convert baseband NTSC and PAL +(and also SECAM in the TVP5151 case) video signals to either 8-bit 4:2:2 YUV +with discrete syncs or 8-bit ITU-R BT.656 with embedded syncs output formats. + +Required Properties: +- compatible: value must be "ti,tvp5150" +- reg: I2C slave address + +Optional Properties: +- pdn-gpios: phandle for the GPIO connected to the PDN pin, if any. +- reset-gpios: phandle for the GPIO connected to the RESETB pin, if any. + +Optional nodes: +- connectors: The input connectors of tvp5150 have to be defined under + a subnode name "connectors" using the following format: + + input-connector-name { + input connector properties + }; + +Each input connector must contain the following properties: + + - label: a name for the connector. + - input: the input connector. + +The possible values for the "input" property are: + 0: Composite0 + 1: Composite1 + 2: S-Video + +and on a tvp5150am1 and tvp5151 there is another: + 4: Signal generator + +The list of valid input connectors are defined in dt-bindings/media/tvp5150.h +header file and can be included by device tree source files. + +Each input connector can be defined only once. + +The device node must contain one 'port' child node for its digital output +video port, in accordance with the video interface bindings defined in +Documentation/devicetree/bindings/media/video-interfaces.txt. + +Required Endpoint Properties for parallel synchronization: + +- hsync-active: active state of the HSYNC signal. Must be <1> (HIGH). +- vsync-active: active state of the VSYNC signal. Must be <1> (HIGH). +- field-even-active: field signal level during the even field data + transmission. Must be <0>. + +If none of hsync-active, vsync-active and field-even-active is specified, +the endpoint is assumed to use embedded BT.656 synchronization. + +Example: + +&i2c2 { + ... + tvp5150@5c { + compatible = "ti,tvp5150"; + reg = <0x5c>; + pdn-gpios = <&gpio4 30 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio6 7 GPIO_ACTIVE_LOW>; + + connectors { + composite0 { + label = "Composite0"; + input = ; + }; + + composite1 { + label = "Composite1"; + input = ; + }; + + s-video { + label = "S-Video"; + input = ; + }; + }; + + port { + tvp5150_1: endpoint { + remote-endpoint = <&ccdc_ep>; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt index 9dafe6b06cd2..619193ccf7ff 100644 --- a/Documentation/devicetree/bindings/media/rcar_vin.txt +++ b/Documentation/devicetree/bindings/media/rcar_vin.txt @@ -6,6 +6,7 @@ family of devices. The current blocks are always slaves and suppot one input channel which can be either RGB, YUYV or BT656. - compatible: Must be one of the following + - "renesas,vin-r8a7795" for the R8A7795 device - "renesas,vin-r8a7794" for the R8A7794 device - "renesas,vin-r8a7793" for the R8A7793 device - "renesas,vin-r8a7791" for the R8A7791 device diff --git a/Documentation/devicetree/bindings/media/renesas,jpu.txt b/Documentation/devicetree/bindings/media/renesas,jpu.txt index 0cb94201bf92..d3436e5190f9 100644 --- a/Documentation/devicetree/bindings/media/renesas,jpu.txt +++ b/Documentation/devicetree/bindings/media/renesas,jpu.txt @@ -5,11 +5,12 @@ and decoding function conforming to the JPEG baseline process, so that the JPU can encode image data and decode JPEG data quickly. Required properties: - - compatible: should containg one of the following: - - "renesas,jpu-r8a7790" for R-Car H2 - - "renesas,jpu-r8a7791" for R-Car M2-W - - "renesas,jpu-r8a7792" for R-Car V2H - - "renesas,jpu-r8a7793" for R-Car M2-N +- compatible: "renesas,jpu-", "renesas,rcar-gen2-jpu" as fallback. + Examples with soctypes are: + - "renesas,jpu-r8a7790" for R-Car H2 + - "renesas,jpu-r8a7791" for R-Car M2-W + - "renesas,jpu-r8a7792" for R-Car V2H + - "renesas,jpu-r8a7793" for R-Car M2-N - reg: Base address and length of the registers block for the JPU. - interrupts: JPU interrupt specifier. @@ -17,7 +18,7 @@ Required properties: Example: R8A7790 (R-Car H2) JPU node jpeg-codec@fe980000 { - compatible = "renesas,jpu-r8a7790"; + compatible = "renesas,jpu-r8a7790", "renesas,rcar-gen2-jpu"; reg = <0 0xfe980000 0 0x10300>; interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>; clocks = <&mstp1_clks R8A7790_CLK_JPU>; diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.txt b/Documentation/devicetree/bindings/media/renesas,vsp1.txt index 87fe08abf36d..627405abd144 100644 --- a/Documentation/devicetree/bindings/media/renesas,vsp1.txt +++ b/Documentation/devicetree/bindings/media/renesas,vsp1.txt @@ -1,30 +1,18 @@ -* Renesas VSP1 Video Processing Engine +* Renesas VSP Video Processing Engine -The VSP1 is a video processing engine that supports up-/down-scaling, alpha +The VSP is a video processing engine that supports up-/down-scaling, alpha blending, color space conversion and various other image processing features. It can be found in the Renesas R-Car second generation SoCs. Required properties: - - compatible: Must contain "renesas,vsp1" + - compatible: Must contain one of the following values + - "renesas,vsp1" for the R-Car Gen2 VSP1 + - "renesas,vsp2" for the R-Car Gen3 VSP2 - - reg: Base address and length of the registers block for the VSP1. - - interrupts: VSP1 interrupt specifier. - - clocks: A phandle + clock-specifier pair for the VSP1 functional clock. - - - renesas,#rpf: Number of Read Pixel Formatter (RPF) modules in the VSP1. - - renesas,#uds: Number of Up Down Scaler (UDS) modules in the VSP1. - - renesas,#wpf: Number of Write Pixel Formatter (WPF) modules in the VSP1. - - -Optional properties: - - - renesas,has-lif: Boolean, indicates that the LCD Interface (LIF) module is - available. - - renesas,has-lut: Boolean, indicates that the Look Up Table (LUT) module is - available. - - renesas,has-sru: Boolean, indicates that the Super Resolution Unit (SRU) - module is available. + - reg: Base address and length of the registers block for the VSP. + - interrupts: VSP interrupt specifier. + - clocks: A phandle + clock-specifier pair for the VSP functional clock. Example: R8A7790 (R-Car H2) VSP1-S node @@ -34,10 +22,4 @@ Example: R8A7790 (R-Car H2) VSP1-S node reg = <0 0xfe928000 0 0x8000>; interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>; clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>; - - renesas,has-lut; - renesas,has-sru; - renesas,#rpf = <5>; - renesas,#uds = <3>; - renesas,#wpf = <4>; }; diff --git a/Documentation/devicetree/bindings/media/ti-cal.txt b/Documentation/devicetree/bindings/media/ti-cal.txt new file mode 100644 index 000000000000..ae9b52f37576 --- /dev/null +++ b/Documentation/devicetree/bindings/media/ti-cal.txt @@ -0,0 +1,72 @@ +Texas Instruments DRA72x CAMERA ADAPTATION LAYER (CAL) +------------------------------------------------------ + +The Camera Adaptation Layer (CAL) is a key component for image capture +applications. The capture module provides the system interface and the +processing capability to connect CSI2 image-sensor modules to the +DRA72x device. + +Required properties: +- compatible: must be "ti,dra72-cal" +- reg: CAL Top level, Receiver Core #0, Receiver Core #1 and Camera RX + control address space +- reg-names: cal_top, cal_rx_core0, cal_rx_core1, and camerrx_control + registers +- interrupts: should contain IRQ line for the CAL; + +CAL supports 2 camera port nodes on MIPI bus. Each CSI2 camera port nodes +should contain a 'port' child node with child 'endpoint' node. Please +refer to the bindings defined in +Documentation/devicetree/bindings/media/video-interfaces.txt. + +Example: + cal: cal@4845b000 { + compatible = "ti,dra72-cal"; + ti,hwmods = "cal"; + reg = <0x4845B000 0x400>, + <0x4845B800 0x40>, + <0x4845B900 0x40>, + <0x4A002e94 0x4>; + reg-names = "cal_top", + "cal_rx_core0", + "cal_rx_core1", + "camerrx_control"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + csi2_0: port@0 { + reg = <0>; + endpoint { + slave-mode; + remote-endpoint = <&ar0330_1>; + }; + }; + csi2_1: port@1 { + reg = <1>; + }; + }; + }; + + i2c5: i2c@4807c000 { + ar0330@10 { + compatible = "ti,ar0330"; + reg = <0x10>; + + port { + #address-cells = <1>; + #size-cells = <0>; + + ar0330_1: endpoint { + reg = <0>; + clock-lanes = <1>; + data-lanes = <0 2 3 4>; + remote-endpoint = <&csi2_0>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index 81a9f9e6b45f..c8ac222eac67 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt @@ -82,8 +82,8 @@ Example: "ch16", "ch17", "ch18", "ch19", "ch20", "ch21", "ch22", "ch23", "ch24"; - clocks = <&mstp8_clks R8A7795_CLK_ETHERAVB>; - power-domains = <&cpg_clocks>; + clocks = <&cpg CPG_MOD 812>; + power-domains = <&cpg>; phy-mode = "rgmii-id"; phy-handle = <&phy0>; diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt index 4e8b90e43dd8..07a75094c5a8 100644 --- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt +++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt @@ -8,6 +8,7 @@ OHCI and EHCI controllers. Required properties: - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; "renesas,pci-r8a7791" for the R8A7791 SoC; + "renesas,pci-r8a7793" for the R8A7793 SoC; "renesas,pci-r8a7794" for the R8A7794 SoC; "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt index 558fe528ae19..6cf99690eef9 100644 --- a/Documentation/devicetree/bindings/pci/rcar-pci.txt +++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt @@ -4,6 +4,7 @@ Required properties: compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC; "renesas,pcie-r8a7790" for the R8A7790 SoC; "renesas,pcie-r8a7791" for the R8A7791 SoC; + "renesas,pcie-r8a7793" for the R8A7793 SoC; "renesas,pcie-r8a7795" for the R8A7795 SoC; "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device. diff --git a/Documentation/devicetree/bindings/regulator/tps65217.txt b/Documentation/devicetree/bindings/regulator/tps65217.txt index d18109657da6..4f05d208c95c 100644 --- a/Documentation/devicetree/bindings/regulator/tps65217.txt +++ b/Documentation/devicetree/bindings/regulator/tps65217.txt @@ -26,11 +26,7 @@ Example: ti,pmic-shutdown-controller; regulators { - #address-cells = <1>; - #size-cells = <0>; - dcdc1_reg: dcdc1 { - reg = <0>; regulator-min-microvolt = <900000>; regulator-max-microvolt = <1800000>; regulator-boot-on; @@ -38,7 +34,6 @@ Example: }; dcdc2_reg: dcdc2 { - reg = <1>; regulator-min-microvolt = <900000>; regulator-max-microvolt = <3300000>; regulator-boot-on; @@ -46,7 +41,6 @@ Example: }; dcdc3_reg: dcc3 { - reg = <2>; regulator-min-microvolt = <900000>; regulator-max-microvolt = <1500000>; regulator-boot-on; @@ -54,7 +48,6 @@ Example: }; ldo1_reg: ldo1 { - reg = <3>; regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; regulator-boot-on; @@ -62,7 +55,6 @@ Example: }; ldo2_reg: ldo2 { - reg = <4>; regulator-min-microvolt = <900000>; regulator-max-microvolt = <3300000>; regulator-boot-on; @@ -70,7 +62,6 @@ Example: }; ldo3_reg: ldo3 { - reg = <5>; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; @@ -78,7 +69,6 @@ Example: }; ldo4_reg: ldo4 { - reg = <6>; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt index ac2fcd6ff4b8..1068ffce9f91 100644 --- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt @@ -14,6 +14,10 @@ Required properties: interrupt number is the rtc alarm interrupt and second interrupt number is the rtc tick interrupt. The number of cells representing a interrupt depends on the parent interrupt controller. +- clocks: Must contain a list of phandle and clock specifier for the rtc + and source clocks. +- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the + same order as the clocks property. Example: @@ -21,4 +25,6 @@ Example: compatible = "samsung,s3c6410-rtc"; reg = <0x10070000 0x100>; interrupts = <44 0 45 0>; + clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>; + clock-names = "rtc", "rtc_src"; }; diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt index 35ae1fb3537f..ed94c217c98d 100644 --- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt +++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt @@ -9,7 +9,7 @@ Optional properties: - fsl,uart-has-rtscts : Indicate the uart has rts and cts - fsl,irda-mode : Indicate the uart supports irda mode - fsl,dte-mode : Indicate the uart works in DTE mode. The uart works - is DCE mode by default. + in DCE mode by default. Note: Each uart controller should have an alias correctly numbered in "aliases" node. diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt index ce55c0a6f757..4da41bf1888e 100644 --- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt +++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt @@ -30,6 +30,8 @@ The compatible list for this generic sound card currently: "fsl,imx-audio-sgtl5000" (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt) + "fsl,imx-audio-wm8960" + Required properties: - compatible : Contains one of entries in the compatible list. diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt index 332e625f6ed0..e5ee3f159893 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt @@ -1,8 +1,9 @@ * Renesas R-Car Thermal Required properties: -- compatible : "renesas,thermal-", "renesas,rcar-thermal" - as fallback. +- compatible : "renesas,thermal-", + "renesas,rcar-gen2-thermal" (with thermal-zone) or + "renesas,rcar-thermal" (without thermal-zone) as fallback. Examples with soctypes are: - "renesas,thermal-r8a73a4" (R-Mobile APE6) - "renesas,thermal-r8a7779" (R-Car H1) @@ -36,3 +37,35 @@ thermal@e61f0000 { 0xe61f0300 0x38>; interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>; }; + +Example (with thermal-zone): + +thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <1000>; + polling-delay = <5000>; + + thermal-sensors = <&thermal>; + + trips { + cpu-crit { + temperature = <115000>; + hysteresis = <0>; + type = "critical"; + }; + }; + cooling-maps { + }; + }; +}; + +thermal: thermal@e61f0000 { + compatible = "renesas,thermal-r8a7790", + "renesas,rcar-gen2-thermal", + "renesas,rcar-thermal"; + reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>; + interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&mstp5_clks R8A7790_CLK_THERMAL>; + power-domains = <&cpg_clocks>; + #thermal-sensor-cells = <0>; +}; diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt index 4f4a84b6903a..32ac32e773e1 100644 --- a/Documentation/dma-buf-sharing.txt +++ b/Documentation/dma-buf-sharing.txt @@ -350,7 +350,26 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: handles, too). So it's beneficial to support this in a similar fashion on dma-buf to have a good transition path for existing Android userspace. - No special interfaces, userspace simply calls mmap on the dma-buf fd. + No special interfaces, userspace simply calls mmap on the dma-buf fd, making + sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* + used when the access happens. This is discussed next paragraphs. + + Some systems might need some sort of cache coherency management e.g. when + CPU and GPU domains are being accessed through dma-buf at the same time. To + circumvent this problem there are begin/end coherency markers, that forward + directly to existing dma-buf device drivers vfunc hooks. Userspace can make + use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence + would be used like following: + - mmap dma-buf fd + - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write + to mmap area 3. SYNC_END ioctl. This can be repeated as often as you + want (with the new data being consumed by the GPU or say scanout device) + - munmap once you don't need the buffer any more + + Therefore, for correctness and optimal performance, systems with the memory + cache shared by the GPU and CPU i.e. the "coherent" and also the + "incoherent" are always required to use SYNC_START and SYNC_END before and + after, respectively, when accessing the mapped address. 2. Supporting existing mmap interfaces in importers diff --git a/Documentation/dvb/README.dvb-usb b/Documentation/dvb/README.dvb-usb index 669dc6ce4330..6f4b12f7b844 100644 --- a/Documentation/dvb/README.dvb-usb +++ b/Documentation/dvb/README.dvb-usb @@ -190,7 +190,7 @@ and watch another one. Patches, comments and suggestions are very very welcome. 3. Acknowledgements - Amaury Demol (ademol@dibcom.fr) and Francois Kanounnikoff from DiBcom for + Amaury Demol (Amaury.Demol@parrot.com) and Francois Kanounnikoff from DiBcom for providing specs, code and help, on which the dvb-dibusb, dib3000mb and dib3000mc are based. diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt index c477af086e65..686a64bba775 100644 --- a/Documentation/filesystems/efivarfs.txt +++ b/Documentation/filesystems/efivarfs.txt @@ -14,3 +14,10 @@ filesystem. efivarfs is typically mounted like this, mount -t efivarfs none /sys/firmware/efi/efivars + +Due to the presence of numerous firmware bugs where removing non-standard +UEFI variables causes the system firmware to fail to POST, efivarfs +files that are not well-known standardized variables are created +as immutable files. This doesn't prevent removal - "chattr -i" will work - +but it does prevent this kind of failure from being accomplished +accidentally. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 551ecf09c8dd..9a53c929f017 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -4235,6 +4235,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted. The default value of this parameter is determined by the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT. + workqueue.debug_force_rr_cpu + Workqueue used to implicitly guarantee that work + items queued without explicit CPU specified are put + on the local CPU. This guarantee is no longer true + and while local CPU is still preferred work items + may be put on foreign CPUs. This debug option + forces round-robin CPU selection to flush out + usages which depend on the now broken guarantee. + When enabled, memory and cache locality will be + impacted. + x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of default x2apic cluster mode on platforms supporting x2apic. diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt index 767392ffd31e..a484d2c109d7 100644 --- a/Documentation/timers/hpet.txt +++ b/Documentation/timers/hpet.txt @@ -1,9 +1,7 @@ High Precision Event Timer Driver for Linux The High Precision Event Timer (HPET) hardware follows a specification -by Intel and Microsoft which can be found at - - http://www.intel.com/hardwaredesign/hpetspec_1.pdf +by Intel and Microsoft, revision 1. Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision") and up to 32 comparators. Normally three or more comparators are provided, diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt index 5517db602f37..5e759cab4538 100644 --- a/Documentation/video4linux/v4l2-controls.txt +++ b/Documentation/video4linux/v4l2-controls.txt @@ -647,7 +647,6 @@ Or you can add specific controls to a handler: volume = v4l2_ctrl_new_std(&video_ctrl_handler, &ops, V4L2_CID_AUDIO_VOLUME, ...); v4l2_ctrl_new_std(&video_ctrl_handler, &ops, V4L2_CID_BRIGHTNESS, ...); v4l2_ctrl_new_std(&video_ctrl_handler, &ops, V4L2_CID_CONTRAST, ...); - v4l2_ctrl_add_ctrl(&radio_ctrl_handler, volume); What you should not do is make two identical controls for two handlers. For example: diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index daf9c0f742d2..c81731096a43 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt @@ -358,7 +358,8 @@ In the first case there are two additional complications: - if CR4.SMEP is enabled: since we've turned the page into a kernel page, the kernel may now execute it. We handle this by also setting spte.nx. If we get a user fetch or read fault, we'll change spte.u=1 and - spte.nx=gpte.nx back. + spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when + shadow paging is in use. - if CR4.SMAP is disabled: since the page has been changed to a kernel page, it can not be reused when CR4.SMAP is enabled. We set CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index 9f9ec9f76039..4e4b6f10d841 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt @@ -400,3 +400,7 @@ wm8350_wdt: nowayout: Watchdog cannot be stopped once started (default=kernel config parameter) ------------------------------------------------- +sun4v_wdt: +timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000) +nowayout: Watchdog cannot be stopped once started +------------------------------------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 7f1fa4ff300a..10562167b85e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -827,6 +827,12 @@ S: Maintained F: drivers/net/arcnet/ F: include/uapi/linux/if_arcnet.h +ARM HDLCD DRM DRIVER +M: Liviu Dudau +S: Supported +F: drivers/gpu/drm/arm/ +F: Documentation/devicetree/bindings/display/arm,hdlcd.txt + ARM MFM AND FLOPPY DRIVERS M: Ian Molton S: Maintained @@ -920,17 +926,24 @@ M: Emilio López S: Maintained F: drivers/clk/sunxi/ -ARM/Amlogic MesonX SoC support +ARM/Amlogic Meson SoC support M: Carlo Caione L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-meson@googlegroups.com +W: http://linux-meson.com/ S: Maintained -F: drivers/media/rc/meson-ir.c -N: meson[x68] +F: arch/arm/mach-meson/ +F: arch/arm/boot/dts/meson* +N: meson ARM/Annapurna Labs ALPINE ARCHITECTURE M: Tsahee Zidenberg +M: Antoine Tenart S: Maintained F: arch/arm/mach-alpine/ +F: arch/arm/boot/dts/alpine* +F: arch/arm64/boot/dts/al/ +F: drivers/*/*alpine* ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT M: Nicolas Ferre @@ -1442,8 +1455,8 @@ S: Maintained ARM/RENESAS ARM64 ARCHITECTURE M: Simon Horman M: Magnus Damm -L: linux-sh@vger.kernel.org -Q: http://patchwork.kernel.org/project/linux-sh/list/ +L: linux-renesas-soc@vger.kernel.org +Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next S: Supported F: arch/arm64/boot/dts/renesas/ @@ -2362,14 +2375,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git S: Maintained N: bcm2835 -BROADCOM BCM33XX MIPS ARCHITECTURE -M: Kevin Cernekee -L: linux-mips@linux-mips.org -S: Maintained -F: arch/mips/bcm3384/* -F: arch/mips/include/asm/mach-bcm3384/* -F: arch/mips/kernel/*bmips* - BROADCOM BCM47XX MIPS ARCHITECTURE M: Hauke Mehrtens M: Rafał Miłecki @@ -3452,7 +3457,6 @@ F: drivers/usb/dwc2/ DESIGNWARE USB3 DRD IP DRIVER M: Felipe Balbi L: linux-usb@vger.kernel.org -L: linux-omap@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained F: drivers/usb/dwc3/ @@ -3758,7 +3762,7 @@ F: include/drm/exynos* F: include/uapi/drm/exynos* DRM DRIVERS FOR FREESCALE DCU -M: Jianwei Wang +M: Stefan Agner M: Alison Wang L: dri-devel@lists.freedesktop.org S: Supported @@ -4520,6 +4524,12 @@ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/dma/fsldma.* +FREESCALE GPMI NAND DRIVER +M: Han Xu +L: linux-mtd@lists.infradead.org +S: Maintained +F: drivers/mtd/nand/gpmi-nand/* + FREESCALE I2C CPM DRIVER M: Jochen Friedrich L: linuxppc-dev@lists.ozlabs.org @@ -4536,7 +4546,7 @@ F: include/linux/platform_data/video-imxfb.h F: drivers/video/fbdev/imxfb.c FREESCALE QUAD SPI DRIVER -M: Han Xu +M: Han Xu L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/spi-nor/fsl-quadspi.c @@ -4550,6 +4560,15 @@ S: Maintained F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h +FREESCALE IMX / MXC FEC DRIVER +M: Fugang Duan +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/freescale/fec_main.c +F: drivers/net/ethernet/freescale/fec_ptp.c +F: drivers/net/ethernet/freescale/fec.h +F: Documentation/devicetree/bindings/net/fsl-fec.txt + FREESCALE QUICC ENGINE LIBRARY L: linuxppc-dev@lists.ozlabs.org S: Orphan @@ -6136,7 +6155,7 @@ F: include/uapi/linux/sunrpc/ KERNEL SELFTEST FRAMEWORK M: Shuah Khan -L: linux-api@vger.kernel.org +L: linux-kselftest@vger.kernel.org T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest S: Maintained F: tools/testing/selftests @@ -6766,6 +6785,7 @@ S: Maintained F: Documentation/networking/mac80211-injection.txt F: include/net/mac80211.h F: net/mac80211/ +F: drivers/net/wireless/mac80211_hwsim.[ch] MACVLAN DRIVER M: Patrick McHardy @@ -7362,7 +7382,7 @@ F: drivers/tty/isicom.c F: include/linux/isicom.h MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER -M: Felipe Balbi +M: Bin Liu L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained @@ -7694,13 +7714,13 @@ S: Maintained F: arch/nios2/ NOKIA N900 POWER SUPPLY DRIVERS -M: Pali Rohár -S: Maintained +R: Pali Rohár F: include/linux/power/bq2415x_charger.h F: include/linux/power/bq27xxx_battery.h F: include/linux/power/isp1704_charger.h F: drivers/power/bq2415x_charger.c F: drivers/power/bq27xxx_battery.c +F: drivers/power/bq27xxx_battery_i2c.c F: drivers/power/isp1704_charger.c F: drivers/power/rx51_battery.c @@ -7931,11 +7951,9 @@ F: drivers/media/platform/omap3isp/ F: drivers/staging/media/omap4iss/ OMAP USB SUPPORT -M: Felipe Balbi L: linux-usb@vger.kernel.org L: linux-omap@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git -S: Maintained +S: Orphan F: drivers/usb/*/*omap* F: arch/arm/*omap*/usb* @@ -9566,6 +9584,12 @@ M: Andreas Noever S: Maintained F: drivers/thunderbolt/ +TI BQ27XXX POWER SUPPLY DRIVER +R: Andrew F. Davis +F: include/linux/power/bq27xxx_battery.h +F: drivers/power/bq27xxx_battery.c +F: drivers/power/bq27xxx_battery_i2c.c + TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER M: John Stultz M: Thomas Gleixner @@ -9787,10 +9811,11 @@ S: Supported F: drivers/scsi/be2iscsi/ Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER -M: Sathya Perla -M: Ajit Khaparde -M: Padmanabh Ratnakar -M: Sriharsha Basavapatna +M: Sathya Perla +M: Ajit Khaparde +M: Padmanabh Ratnakar +M: Sriharsha Basavapatna +M: Somnath Kotur L: netdev@vger.kernel.org W: http://www.emulex.com S: Supported @@ -10849,6 +10874,14 @@ L: linux-omap@vger.kernel.org S: Maintained F: drivers/thermal/ti-soc-thermal/ +TI VPE/CAL DRIVERS +M: Benoit Parrot +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +S: Maintained +F: drivers/media/platform/ti-vpe/ + TI CDCE706 CLOCK DRIVER M: Max Filippov S: Maintained @@ -12020,7 +12053,6 @@ F: arch/arm64/xen/ F: arch/arm64/include/asm/xen/ XEN NETWORK BACKEND DRIVER -M: Ian Campbell M: Wei Liu L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: netdev@vger.kernel.org diff --git a/Makefile b/Makefile index 682840850f58..2d519d2fb3a9 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc7 NAME = Blurry Fish Butt # *DOCUMENTATION* diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 76dde9db7934..8a188bc1786a 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -12,8 +12,6 @@ config ARC select BUILDTIME_EXTABLE_SORT select COMMON_CLK select CLONE_BACKWARDS - # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev - select DEVTMPFS if !INITRAMFS_SOURCE="" select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT @@ -275,14 +273,6 @@ config ARC_DCCM_BASE default "0xA0000000" depends on ARC_HAS_DCCM -config ARC_HAS_HW_MPY - bool "Use Hardware Multiplier (Normal or Faster XMAC)" - default y - help - Influences how gcc generates code for MPY operations. - If enabled, MPYxx insns are generated, provided by Standard/XMAC - Multipler. Otherwise software multipy lib is used - choice prompt "MMU Version" default ARC_MMU_V3 if ARC_CPU_770 @@ -338,6 +328,19 @@ config ARC_PAGE_SIZE_4K endchoice +choice + prompt "MMU Super Page Size" + depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE + default ARC_HUGEPAGE_2M + +config ARC_HUGEPAGE_2M + bool "2MB" + +config ARC_HUGEPAGE_16M + bool "16MB" + +endchoice + if ISA_ARCOMPACT config ARC_COMPACT_IRQ_LEVELS @@ -410,7 +413,7 @@ config ARC_HAS_RTC default n depends on !SMP -config ARC_HAS_GRTC +config ARC_HAS_GFRC bool "SMP synchronized 64-bit cycle counter" default y depends on SMP @@ -529,14 +532,6 @@ config ARC_DBG_TLB_MISS_COUNT Counts number of I and D TLB Misses and exports them via Debugfs The counters can be cleared via Debugfs as well -if SMP - -config ARC_IPI_DBG - bool "Debug Inter Core interrupts" - default n - -endif - endif config ARC_UBOOT_SUPPORT @@ -566,6 +561,12 @@ endmenu endmenu # "ARC Architecture Configuration" source "mm/Kconfig" + +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "12" if ARC_HUGEPAGE_16M + default "11" + source "net/Kconfig" source "drivers/Kconfig" source "fs/Kconfig" diff --git a/arch/arc/Makefile b/arch/arc/Makefile index aeb19021099e..c8230f3395f2 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -74,10 +74,6 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB # --build-id w/o "-marclinux". Default arc-elf32-ld is OK ldflags-$(upto_gcc44) += -marclinux -ifndef CONFIG_ARC_HAS_HW_MPY - cflags-y += -mno-mpy -endif - LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) # Modules with short calls might break for calls into builtin-kernel diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index f1ac9818b751..5d4e2a07ad3e 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -39,6 +39,7 @@ CONFIG_IP_PNP_RARP=y # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set @@ -73,7 +74,6 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_DESIGNWARE_PLATFORM=y # CONFIG_HWMON is not set CONFIG_FB=y -# CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y @@ -91,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_DW=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT3_FS=y -CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y -CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 323486d6ee83..87ee46b237ef 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -39,14 +39,10 @@ CONFIG_IP_PNP_RARP=y # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_AXS=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y @@ -78,14 +74,12 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_DESIGNWARE_PLATFORM=y # CONFIG_HWMON is not set CONFIG_FB=y -# CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set # CONFIG_LOGO_LINUX_CLUT224 is not set -CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y @@ -97,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_DW=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT3_FS=y -CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y -CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 66191cd0447e..d80daf4f7e73 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -40,14 +40,10 @@ CONFIG_IP_PNP_RARP=y # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_AXS=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y @@ -79,14 +75,12 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_DESIGNWARE_PLATFORM=y # CONFIG_HWMON is not set CONFIG_FB=y -# CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set # CONFIG_LOGO_LINUX_CLUT224 is not set -CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y @@ -98,12 +92,10 @@ CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_DW=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT3_FS=y -CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y -CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 138f9d887957..f41095340b6a 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -4,6 +4,7 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -26,7 +27,6 @@ CONFIG_ARC_PLAT_SIM=y CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700" CONFIG_PREEMPT=y # CONFIG_COMPACTION is not set -# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -34,6 +34,7 @@ CONFIG_UNIX_DIAG=y CONFIG_NET_KEY=y CONFIG_INET=y # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set @@ -51,7 +52,6 @@ CONFIG_SERIAL_ARC=y CONFIG_SERIAL_ARC_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_VGA_CONSOLE is not set # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set @@ -63,4 +63,3 @@ CONFIG_NFS_FS=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_DEBUG_PREEMPT is not set -CONFIG_XZ_DEC=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index f68838e8068a..cfaa33cb5921 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -35,6 +35,7 @@ CONFIG_UNIX_DIAG=y CONFIG_NET_KEY=y CONFIG_INET=y # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set @@ -49,7 +50,6 @@ CONFIG_SERIAL_ARC=y CONFIG_SERIAL_ARC_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_VGA_CONSOLE is not set # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set @@ -61,4 +61,3 @@ CONFIG_NFS_FS=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_DEBUG_PREEMPT is not set -CONFIG_XZ_DEC=y diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index 96bd1c20fb0b..bb2a8dc778b5 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -21,13 +22,11 @@ CONFIG_MODULES=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_ARC_PLAT_SIM=y -CONFIG_ARC_BOARD_ML509=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu" CONFIG_PREEMPT=y # CONFIG_COMPACTION is not set -# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -35,6 +34,7 @@ CONFIG_UNIX_DIAG=y CONFIG_NET_KEY=y CONFIG_INET=y # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set @@ -49,7 +49,6 @@ CONFIG_SERIAL_ARC=y CONFIG_SERIAL_ARC_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_VGA_CONSOLE is not set # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set @@ -60,4 +59,3 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_XZ_DEC=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 31e1d95764ff..646182e93753 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -33,6 +33,7 @@ CONFIG_UNIX_DIAG=y CONFIG_NET_KEY=y CONFIG_INET=y # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set @@ -58,7 +59,6 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_FB=y -# CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_HID is not set diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index fcae66683ca0..ceca2541950d 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -34,12 +34,12 @@ CONFIG_UNIX_DIAG=y CONFIG_NET_KEY=y CONFIG_INET=y # CONFIG_IPV6 is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_BLK_DEV is not set CONFIG_NETDEVICES=y -CONFIG_NET_OSCI_LAN=y CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2_ALPS is not set # CONFIG_MOUSE_PS2_LOGIPS2PP is not set @@ -58,7 +58,6 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_FB=y -# CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_HID is not set diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index b01b659168ea..4b6da90f6f26 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y +# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -18,15 +19,11 @@ CONFIG_MODULES=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_ARC_PLAT_SIM=y -CONFIG_ARC_BOARD_ML509=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y -CONFIG_ARC_HAS_LL64=y -# CONFIG_ARC_HAS_RTSC is not set CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu" CONFIG_PREEMPT=y # CONFIG_COMPACTION is not set -# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=y @@ -40,6 +37,7 @@ CONFIG_INET=y # CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FIRMWARE_IN_KERNEL is not set @@ -56,14 +54,11 @@ CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_OSCI_LAN=y # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y CONFIG_MOUSE_PS2_TOUCHKIT=y # CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_ARC_PS2=y -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y @@ -75,9 +70,6 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_FB=y -CONFIG_ARCPGU_RGB888=y -CONFIG_ARCPGU_DISPTYPE=0 -# CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_HID is not set diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 3b4dc9cebcf1..9b342eaf95ae 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -3,6 +3,7 @@ CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="tb10x" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -26,12 +27,10 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLOCK is not set CONFIG_ARC_PLAT_TB10X=y CONFIG_ARC_CACHE_LINE_SHIFT=5 -CONFIG_ARC_STACK_NONEXEC=y CONFIG_HZ=250 CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk" CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_COMPACTION is not set -# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -44,8 +43,8 @@ CONFIG_IP_MULTICAST=y # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_PROC_DEVICETREE=y CONFIG_NETDEVICES=y # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -55,9 +54,6 @@ CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_STMMAC_ETH=y -CONFIG_STMMAC_DEBUG_FS=y -CONFIG_STMMAC_DA=y -CONFIG_STMMAC_CHAINED=y # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_WLAN is not set # CONFIG_INPUT is not set @@ -91,7 +87,6 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_LEDS_TRIGGER_TRANSIENT=y CONFIG_DMADEVICES=y CONFIG_DW_DMAC=y -CONFIG_NET_DMA=y CONFIG_ASYNC_TX_DMA=y # CONFIG_IOMMU_SUPPORT is not set # CONFIG_DNOTIFY is not set @@ -100,17 +95,16 @@ CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_DEBUG_INFO=y # CONFIG_ENABLE_WARN_DEPRECATED is not set -CONFIG_MAGIC_SYSRQ=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_HEADERS_CHECK=y CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DETECT_HUNG_TASK=y CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_STACKOVERFLOW=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index f36c047b33ca..735985974a31 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -16,7 +16,7 @@ CONFIG_ARC_PLAT_AXS10X=y CONFIG_AXS103=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y -# CONFIG_ARC_HAS_GRTC is not set +# CONFIG_ARC_HAS_GFRC is not set CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_PREEMPT=y diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 7fac7d85ed6a..f9f4c6f59fdb 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -10,7 +10,8 @@ #define _ASM_ARC_ARCREGS_H /* Build Configuration Registers */ -#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */ +#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */ +#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */ #define ARC_REG_CRC_BCR 0x62 #define ARC_REG_VECBASE_BCR 0x68 #define ARC_REG_PERIBASE_BCR 0x69 @@ -18,10 +19,10 @@ #define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */ #define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */ #define ARC_REG_SLC_BCR 0xce -#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ +#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */ #define ARC_REG_TIMERS_BCR 0x75 #define ARC_REG_AP_BCR 0x76 -#define ARC_REG_ICCM_BCR 0x78 +#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */ #define ARC_REG_XY_MEM_BCR 0x79 #define ARC_REG_MAC_BCR 0x7a #define ARC_REG_MUL_BCR 0x7b @@ -36,6 +37,7 @@ #define ARC_REG_IRQ_BCR 0xF3 #define ARC_REG_SMART_BCR 0xFF #define ARC_REG_CLUSTER_BCR 0xcf +#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */ /* status32 Bits Positions */ #define STATUS_AE_BIT 5 /* Exception active */ @@ -246,7 +248,7 @@ struct bcr_perip { #endif }; -struct bcr_iccm { +struct bcr_iccm_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int base:16, pad:5, sz:3, ver:8; #else @@ -254,17 +256,15 @@ struct bcr_iccm { #endif }; -/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */ -struct bcr_dccm_base { +struct bcr_iccm_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int addr:24, ver:8; + unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8; #else - unsigned int ver:8, addr:24; + unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8; #endif }; -/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */ -struct bcr_dccm { +struct bcr_dccm_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int res:21, sz:3, ver:8; #else @@ -272,6 +272,14 @@ struct bcr_dccm { #endif }; +struct bcr_dccm_arcv2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8; +#else + unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12; +#endif +}; + /* ARCompact: Both SP and DP FPU BCRs have same format */ struct bcr_fp_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN @@ -315,9 +323,9 @@ struct bcr_bpu_arcv2 { struct bcr_generic { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:24, ver:8; + unsigned int info:24, ver:8; #else - unsigned int ver:8, pad:24; + unsigned int ver:8, info:24; #endif }; @@ -349,14 +357,13 @@ struct cpuinfo_arc { struct cpuinfo_arc_bpu bpu; struct bcr_identity core; struct bcr_isa isa; - struct bcr_timer timers; unsigned int vec_base; struct cpuinfo_arc_ccm iccm, dccm; struct { unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3, fpu_sp:1, fpu_dp:1, pad2:6, debug:1, ap:1, smart:1, rtt:1, pad3:4, - pad4:8; + timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; } extn; struct bcr_mpy extn_mpy; struct bcr_extn_xymem extn_xymem; diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index 4fd7d62a6e30..49014f0ef36d 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h @@ -16,11 +16,9 @@ #ifdef CONFIG_ISA_ARCOMPACT #define TIMER0_IRQ 3 #define TIMER1_IRQ 4 -#define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */ #else #define TIMER0_IRQ 16 #define TIMER1_IRQ 17 -#define IPI_IRQ 19 #endif #include diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index 258b0e5ad332..37c2f751eebf 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h @@ -22,6 +22,7 @@ #define AUX_IRQ_CTRL 0x00E #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */ +#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ #define AUX_IRQ_PRIORITY 0x206 #define ICAUSE 0x40a #define AUX_IRQ_SELECT 0x40b @@ -30,8 +31,11 @@ /* Was Intr taken in User Mode */ #define AUX_IRQ_ACT_BIT_U 31 -/* 0 is highest level, but taken by FIRQs, if present in design */ -#define ARCV2_IRQ_DEF_PRIO 0 +/* + * User space should be interruptable even by lowest prio interrupt + * Safe even if actual interrupt priorities is fewer or even one + */ +#define ARCV2_IRQ_DEF_PRIO 15 /* seed value for status register */ #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ @@ -112,6 +116,16 @@ static inline int arch_irqs_disabled(void) return arch_irqs_disabled_flags(arch_local_save_flags()); } +static inline void arc_softirq_trigger(int irq) +{ + write_aux_reg(AUX_IRQ_HINT, irq); +} + +static inline void arc_softirq_clear(int irq) +{ + write_aux_reg(AUX_IRQ_HINT, 0); +} + #else .macro IRQ_DISABLE scratch diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h index 46f4e5351b2a..847e3bbe387f 100644 --- a/arch/arc/include/asm/mcip.h +++ b/arch/arc/include/asm/mcip.h @@ -39,8 +39,8 @@ struct mcip_cmd { #define CMD_DEBUG_SET_MASK 0x34 #define CMD_DEBUG_SET_SELECT 0x36 -#define CMD_GRTC_READ_LO 0x42 -#define CMD_GRTC_READ_HI 0x43 +#define CMD_GFRC_READ_LO 0x42 +#define CMD_GFRC_READ_HI 0x43 #define CMD_IDU_ENABLE 0x71 #define CMD_IDU_DISABLE 0x72 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 57af2f05ae84..d426d4215513 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -179,37 +179,44 @@ #define __S111 PAGE_U_X_W_R /**************************************************************** - * Page Table Lookup split + * 2 tier (PGD:PTE) software page walker * - * We implement 2 tier paging and since this is all software, we are free - * to customize the span of a PGD / PTE entry to suit us - * - * 32 bit virtual address + * [31] 32 bit virtual address [0] * ------------------------------------------------------- - * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE | + * | | <------------ PGDIR_SHIFT ----------> | + * | | | + * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> | * ------------------------------------------------------- * | | | * | | --> off in page frame - * | | * | ---> index into Page Table - * | * ----> index into Page Directory + * + * In a single page size configuration, only PAGE_SHIFT is fixed + * So both PGD and PTE sizing can be tweaked + * e.g. 8K page (PAGE_SHIFT 13) can have + * - PGDIR_SHIFT 21 -> 11:8:13 address split + * - PGDIR_SHIFT 24 -> 8:11:13 address split + * + * If Super Page is configured, PGDIR_SHIFT becomes fixed too, + * so the sizing flexibility is gone. */ -#define BITS_IN_PAGE PAGE_SHIFT - -/* Optimal Sizing of Pg Tbl - based on MMU page size */ -#if defined(CONFIG_ARC_PAGE_SIZE_8K) -#define BITS_FOR_PTE 8 /* 11:8:13 */ -#elif defined(CONFIG_ARC_PAGE_SIZE_16K) -#define BITS_FOR_PTE 8 /* 10:8:14 */ -#elif defined(CONFIG_ARC_PAGE_SIZE_4K) -#define BITS_FOR_PTE 9 /* 11:9:12 */ +#if defined(CONFIG_ARC_HUGEPAGE_16M) +#define PGDIR_SHIFT 24 +#elif defined(CONFIG_ARC_HUGEPAGE_2M) +#define PGDIR_SHIFT 21 +#else +/* + * Only Normal page support so "hackable" (see comment above) + * Default value provides 11:8:13 (8K), 11:9:12 (4K) + */ +#define PGDIR_SHIFT 21 #endif -#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) +#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT) +#define BITS_FOR_PGD (32 - PGDIR_SHIFT) -#define PGDIR_SHIFT (32 - BITS_FOR_PGD) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ #define PGDIR_MASK (~(PGDIR_SIZE-1)) diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cbfec79137bf..c1264607bbff 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots VECTOR handle_interrupt ; (16) Timer0 VECTOR handle_interrupt ; unused (Timer1) VECTOR handle_interrupt ; unused (WDT) -VECTOR handle_interrupt ; (19) ICI (inter core interrupt) -VECTOR handle_interrupt -VECTOR handle_interrupt -VECTOR handle_interrupt -VECTOR handle_interrupt ; (23) End of fixed IRQs +VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI) +VECTOR handle_interrupt ; (20) perf Interrupt +VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI) +VECTOR handle_interrupt ; unused +VECTOR handle_interrupt ; (23) unused +# End of fixed IRQs .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 VECTOR handle_interrupt @@ -211,7 +212,11 @@ debug_marker_syscall: ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig ; entry was via Exception in DS which got preempted in kernel). ; -; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling +; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround +; +; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline +; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly + .Lintr_ret_to_delay_slot: debug_marker_ds: @@ -222,18 +227,23 @@ debug_marker_ds: ld r2, [sp, PT_ret] ld r3, [sp, PT_status32] + ; STAT32 for Int return created from scratch + ; (No delay dlot, disable Further intr in trampoline) + bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK st r0, [sp, PT_status32] mov r1, .Lintr_ret_to_delay_slot_2 st r1, [sp, PT_ret] + ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots st r2, [sp, 0] st r3, [sp, 4] b .Lisr_ret_fast_path .Lintr_ret_to_delay_slot_2: + ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP sub sp, sp, SZ_PT_REGS st r9, [sp, -4] @@ -243,11 +253,19 @@ debug_marker_ds: ld r9, [sp, 4] sr r9, [erstatus] + ; restore AUX_USER_SP if returning to U mode + bbit0 r9, STATUS_U_BIT, 1f + ld r9, [sp, PT_sp] + sr r9, [AUX_USER_SP] + +1: ld r9, [sp, 8] sr r9, [erbta] ld r9, [sp, -4] add sp, sp, SZ_PT_REGS + + ; return from pure kernel mode to delay slot rtie END(ret_from_exception) diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 0394f9f61b46..942526322ae7 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -14,6 +14,8 @@ #include #include +static int irq_prio; + /* * Early Hardware specific Interrupt setup * -Called very early (start_kernel -> setup_arch -> setup_processor) @@ -24,6 +26,14 @@ void arc_init_IRQ(void) { unsigned int tmp; + struct irq_build { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8; +#else + unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3; +#endif + } irq_bcr; + struct aux_irq_ctrl { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int res3:18, save_idx_regs:1, res2:1, @@ -46,28 +56,25 @@ void arc_init_IRQ(void) WRITE_AUX(AUX_IRQ_CTRL, ictrl); - /* setup status32, don't enable intr yet as kernel doesn't want */ - tmp = read_aux_reg(0xa); - tmp |= ISA_INIT_STATUS_BITS; - tmp &= ~STATUS_IE_MASK; - asm volatile("flag %0 \n"::"r"(tmp)); - /* * ARCv2 core intc provides multiple interrupt priorities (upto 16). * Typical builds though have only two levels (0-high, 1-low) * Linux by default uses lower prio 1 for most irqs, reserving 0 for * NMI style interrupts in future (say perf) - * - * Read the intc BCR to confirm that Linux default priority is avail - * in h/w - * - * Note: - * IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level - * is 0 based. */ - tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF; - if (ARCV2_IRQ_DEF_PRIO > tmp) - panic("Linux default irq prio incorrect\n"); + + READ_BCR(ARC_REG_IRQ_BCR, irq_bcr); + + irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */ + pr_info("archs-intc\t: %d priority levels (default %d)%s\n", + irq_prio + 1, irq_prio, + irq_bcr.firq ? " FIRQ (not used)":""); + + /* setup status32, don't enable intr yet as kernel doesn't want */ + tmp = read_aux_reg(0xa); + tmp |= STATUS_AD_MASK | (irq_prio << 1); + tmp &= ~STATUS_IE_MASK; + asm volatile("flag %0 \n"::"r"(tmp)); } static void arcv2_irq_mask(struct irq_data *data) @@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data) { /* set default priority */ write_aux_reg(AUX_IRQ_SELECT, data->irq); - write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); + write_aux_reg(AUX_IRQ_PRIORITY, irq_prio); /* * hw auto enables (linux unmask) all by default diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index 06bcedf19b62..224d1c3aa9c4 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -81,9 +81,6 @@ static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, { switch (irq) { case TIMER0_IRQ: -#ifdef CONFIG_SMP - case IPI_IRQ: -#endif irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); break; default: diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index bd237acdf4f2..c41c364b926c 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -11,9 +11,13 @@ #include #include #include +#include #include #include +#define IPI_IRQ 19 +#define SOFTIRQ_IRQ 21 + static char smp_cpuinfo_buf[128]; static int idu_detected; @@ -22,6 +26,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); static void mcip_setup_per_cpu(int cpu) { smp_ipi_irq_setup(cpu, IPI_IRQ); + smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); } static void mcip_ipi_send(int cpu) @@ -29,46 +34,44 @@ static void mcip_ipi_send(int cpu) unsigned long flags; int ipi_was_pending; + /* ARConnect can only send IPI to others */ + if (unlikely(cpu == raw_smp_processor_id())) { + arc_softirq_trigger(SOFTIRQ_IRQ); + return; + } + + raw_spin_lock_irqsave(&mcip_lock, flags); + /* - * NOTE: We must spin here if the other cpu hasn't yet - * serviced a previous message. This can burn lots - * of time, but we MUST follows this protocol or - * ipi messages can be lost!!! - * Also, we must release the lock in this loop because - * the other side may get to this same loop and not - * be able to ack -- thus causing deadlock. + * If receiver already has a pending interrupt, elide sending this one. + * Linux cross core calling works well with concurrent IPIs + * coalesced into one + * see arch/arc/kernel/smp.c: ipi_send_msg_one() */ + __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); + ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); + if (!ipi_was_pending) + __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); - do { - raw_spin_lock_irqsave(&mcip_lock, flags); - __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); - ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); - if (ipi_was_pending == 0) - break; /* break out but keep lock */ - raw_spin_unlock_irqrestore(&mcip_lock, flags); - } while (1); - - __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); - -#ifdef CONFIG_ARC_IPI_DBG - if (ipi_was_pending) - pr_info("IPI ACK delayed from cpu %d\n", cpu); -#endif } static void mcip_ipi_clear(int irq) { unsigned int cpu, c; unsigned long flags; - unsigned int __maybe_unused copy; + + if (unlikely(irq == SOFTIRQ_IRQ)) { + arc_softirq_clear(irq); + return; + } raw_spin_lock_irqsave(&mcip_lock, flags); /* Who sent the IPI */ __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); - copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ + cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ /* * In rare case, multiple concurrent IPIs sent to same target can @@ -82,12 +85,6 @@ static void mcip_ipi_clear(int irq) } while (cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); - -#ifdef CONFIG_ARC_IPI_DBG - if (c != __ffs(copy)) - pr_info("IPIs from %x coalesced to %x\n", - copy, raw_smp_processor_id()); -#endif } static void mcip_probe_n_setup(void) @@ -96,13 +93,13 @@ static void mcip_probe_n_setup(void) #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad3:8, idu:1, llm:1, num_cores:6, - iocoh:1, grtc:1, dbg:1, pad2:1, + iocoh:1, gfrc:1, dbg:1, pad2:1, msg:1, sem:1, ipi:1, pad:1, ver:8; #else unsigned int ver:8, pad:1, ipi:1, sem:1, msg:1, - pad2:1, dbg:1, grtc:1, iocoh:1, + pad2:1, dbg:1, gfrc:1, iocoh:1, num_cores:6, llm:1, idu:1, pad3:8; #endif @@ -111,12 +108,13 @@ static void mcip_probe_n_setup(void) READ_BCR(ARC_REG_MCIP_BCR, mp); sprintf(smp_cpuinfo_buf, - "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", + "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", mp.ver, mp.num_cores, IS_AVAIL1(mp.ipi, "IPI "), IS_AVAIL1(mp.idu, "IDU "), + IS_AVAIL1(mp.llm, "LLM "), IS_AVAIL1(mp.dbg, "DEBUG "), - IS_AVAIL1(mp.grtc, "GRTC")); + IS_AVAIL1(mp.gfrc, "GFRC")); idu_detected = mp.idu; @@ -125,8 +123,8 @@ static void mcip_probe_n_setup(void) __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); } - if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc) - panic("kernel trying to use non-existent GRTC\n"); + if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc) + panic("kernel trying to use non-existent GFRC\n"); } struct plat_smp_ops plat_smp_ops = { diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index e1b87444ea9a..cdc821df1809 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -42,9 +42,57 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; +static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) +{ + if (is_isa_arcompact()) { + struct bcr_iccm_arcompact iccm; + struct bcr_dccm_arcompact dccm; + + READ_BCR(ARC_REG_ICCM_BUILD, iccm); + if (iccm.ver) { + cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */ + cpu->iccm.base_addr = iccm.base << 16; + } + + READ_BCR(ARC_REG_DCCM_BUILD, dccm); + if (dccm.ver) { + unsigned long base; + cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */ + + base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD); + cpu->dccm.base_addr = base & ~0xF; + } + } else { + struct bcr_iccm_arcv2 iccm; + struct bcr_dccm_arcv2 dccm; + unsigned long region; + + READ_BCR(ARC_REG_ICCM_BUILD, iccm); + if (iccm.ver) { + cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */ + if (iccm.sz00 == 0xF && iccm.sz01 > 0) + cpu->iccm.sz <<= iccm.sz01; + + region = read_aux_reg(ARC_REG_AUX_ICCM); + cpu->iccm.base_addr = region & 0xF0000000; + } + + READ_BCR(ARC_REG_DCCM_BUILD, dccm); + if (dccm.ver) { + cpu->dccm.sz = 256 << dccm.sz0; + if (dccm.sz0 == 0xF && dccm.sz1 > 0) + cpu->dccm.sz <<= dccm.sz1; + + region = read_aux_reg(ARC_REG_AUX_DCCM); + cpu->dccm.base_addr = region & 0xF0000000; + } + } +} + static void read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; + struct bcr_timer timer; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; unsigned long perip_space; @@ -53,7 +101,11 @@ static void read_arc_build_cfg_regs(void) READ_BCR(AUX_IDENTITY, cpu->core); READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); - READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); + READ_BCR(ARC_REG_TIMERS_BCR, timer); + cpu->extn.timer0 = timer.t0; + cpu->extn.timer1 = timer.t1; + cpu->extn.rtc = timer.rtc; + cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); @@ -71,36 +123,11 @@ static void read_arc_build_cfg_regs(void) cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ - - /* Note that we read the CCM BCRs independent of kernel config - * This is to catch the cases where user doesn't know that - * CCMs are present in hardware build - */ - { - struct bcr_iccm iccm; - struct bcr_dccm dccm; - struct bcr_dccm_base dccm_base; - unsigned int bcr_32bit_val; - - bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR); - if (bcr_32bit_val) { - iccm = *((struct bcr_iccm *)&bcr_32bit_val); - cpu->iccm.base_addr = iccm.base << 16; - cpu->iccm.sz = 0x2000 << (iccm.sz - 1); - } - - bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR); - if (bcr_32bit_val) { - dccm = *((struct bcr_dccm *)&bcr_32bit_val); - cpu->dccm.sz = 0x800 << (dccm.sz); - - READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base); - cpu->dccm.base_addr = dccm_base.addr << 8; - } - } - READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); + /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */ + read_decode_ccm_bcr(cpu); + read_decode_mmu_bcr(); read_decode_cache_bcr(); @@ -208,9 +235,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) (unsigned int)(arc_get_core_freq() / 10000) % 100); n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", - IS_AVAIL1(cpu->timers.t0, "Timer0 "), - IS_AVAIL1(cpu->timers.t1, "Timer1 "), - IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ", + IS_AVAIL1(cpu->extn.timer0, "Timer0 "), + IS_AVAIL1(cpu->extn.timer1, "Timer1 "), + IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ", CONFIG_ARC_HAS_RTC)); n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", @@ -232,8 +259,6 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); } - n += scnprintf(buf + n, len - n, "%s", - IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY)); } n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", @@ -293,13 +318,13 @@ static void arc_chk_core_config(void) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; int fpu_enabled; - if (!cpu->timers.t0) + if (!cpu->extn.timer0) panic("Timer0 is not present!\n"); - if (!cpu->timers.t1) + if (!cpu->extn.timer1) panic("Timer1 is not present!\n"); - if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc) + if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc) panic("RTC is not present\n"); #ifdef CONFIG_ARC_HAS_DCCM @@ -334,6 +359,7 @@ static void arc_chk_core_config(void) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + IS_ENABLED(CONFIG_ARC_HAS_LLSC) && !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) panic("llock/scond livelock workaround missing\n"); } diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index ef6e9e15b82a..424e937da5c8 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -336,11 +336,8 @@ irqreturn_t do_IPI(int irq, void *dev_id) int rc; rc = __do_IPI(msg); -#ifdef CONFIG_ARC_IPI_DBG - /* IPI received but no valid @msg */ if (rc) pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); -#endif pending &= ~(1U << msg); } while (pending); diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index dfad287f1db1..156d9833ff84 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -62,7 +62,7 @@ /********** Clock Source Device *********/ -#ifdef CONFIG_ARC_HAS_GRTC +#ifdef CONFIG_ARC_HAS_GFRC static int arc_counter_setup(void) { @@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs) local_irq_save(flags); - __mcip_cmd(CMD_GRTC_READ_LO, 0); + __mcip_cmd(CMD_GFRC_READ_LO, 0); stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK); - __mcip_cmd(CMD_GRTC_READ_HI, 0); + __mcip_cmd(CMD_GFRC_READ_HI, 0); stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK); local_irq_restore(flags); @@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs) } static struct clocksource arc_counter = { - .name = "ARConnect GRTC", + .name = "ARConnect GFRC", .rating = 400, .read = arc_counter_read, .mask = CLOCKSOURCE_MASK(64), diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 7a6a58ef8aaf..43788b1a1ac5 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -195,5 +195,7 @@ CFLAGS_font.o := -Dstatic= $(obj)/font.c: $(FONTC) $(call cmd,shipped) +AFLAGS_hyp-stub.o := -Wa,-march=armv7-a + $(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S $(call cmd,shipped) diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index f3db13d2d90e..0cc150b87b86 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -285,8 +285,10 @@ }; }; + +/include/ "tps65217.dtsi" + &tps { - compatible = "ti,tps65217"; /* * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only * mode") at poweroff. Most BeagleBone versions do not support RTC-only @@ -307,17 +309,12 @@ ti,pmic-shutdown-controller; regulators { - #address-cells = <1>; - #size-cells = <0>; - dcdc1_reg: regulator@0 { - reg = <0>; regulator-name = "vdds_dpr"; regulator-always-on; }; dcdc2_reg: regulator@1 { - reg = <1>; /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ regulator-name = "vdd_mpu"; regulator-min-microvolt = <925000>; @@ -327,7 +324,6 @@ }; dcdc3_reg: regulator@2 { - reg = <2>; /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ regulator-name = "vdd_core"; regulator-min-microvolt = <925000>; @@ -337,25 +333,21 @@ }; ldo1_reg: regulator@3 { - reg = <3>; regulator-name = "vio,vrtc,vdds"; regulator-always-on; }; ldo2_reg: regulator@4 { - reg = <4>; regulator-name = "vdd_3v3aux"; regulator-always-on; }; ldo3_reg: regulator@5 { - reg = <5>; regulator-name = "vdd_1v8"; regulator-always-on; }; ldo4_reg: regulator@6 { - reg = <6>; regulator-name = "vdd_3v3a"; regulator-always-on; }; diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi index fda457b07e15..857d9894103a 100644 --- a/arch/arm/boot/dts/am335x-chilisom.dtsi +++ b/arch/arm/boot/dts/am335x-chilisom.dtsi @@ -128,21 +128,16 @@ }; +/include/ "tps65217.dtsi" + &tps { - compatible = "ti,tps65217"; - regulators { - #address-cells = <1>; - #size-cells = <0>; - dcdc1_reg: regulator@0 { - reg = <0>; regulator-name = "vdds_dpr"; regulator-always-on; }; dcdc2_reg: regulator@1 { - reg = <1>; /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ regulator-name = "vdd_mpu"; regulator-min-microvolt = <925000>; @@ -152,7 +147,6 @@ }; dcdc3_reg: regulator@2 { - reg = <2>; /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ regulator-name = "vdd_core"; regulator-min-microvolt = <925000>; @@ -162,28 +156,24 @@ }; ldo1_reg: regulator@3 { - reg = <3>; regulator-name = "vio,vrtc,vdds"; regulator-boot-on; regulator-always-on; }; ldo2_reg: regulator@4 { - reg = <4>; regulator-name = "vdd_3v3aux"; regulator-boot-on; regulator-always-on; }; ldo3_reg: regulator@5 { - reg = <5>; regulator-name = "vdd_1v8"; regulator-boot-on; regulator-always-on; }; ldo4_reg: regulator@6 { - reg = <6>; regulator-name = "vdd_3v3d"; regulator-boot-on; regulator-always-on; diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts index 77559a1ded60..f313999c503e 100644 --- a/arch/arm/boot/dts/am335x-nano.dts +++ b/arch/arm/boot/dts/am335x-nano.dts @@ -375,15 +375,11 @@ wp-gpios = <&gpio3 18 0>; }; +#include "tps65217.dtsi" + &tps { - compatible = "ti,tps65217"; - regulators { - #address-cells = <1>; - #size-cells = <0>; - dcdc1_reg: regulator@0 { - reg = <0>; /* +1.5V voltage with ±4% tolerance */ regulator-min-microvolt = <1450000>; regulator-max-microvolt = <1550000>; @@ -392,7 +388,6 @@ }; dcdc2_reg: regulator@1 { - reg = <1>; /* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */ regulator-name = "vdd_mpu"; regulator-min-microvolt = <915000>; @@ -402,7 +397,6 @@ }; dcdc3_reg: regulator@2 { - reg = <2>; /* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */ regulator-name = "vdd_core"; regulator-min-microvolt = <915000>; @@ -412,7 +406,6 @@ }; ldo1_reg: regulator@3 { - reg = <3>; /* +1.8V voltage with ±4% tolerance */ regulator-min-microvolt = <1750000>; regulator-max-microvolt = <1870000>; @@ -421,7 +414,6 @@ }; ldo2_reg: regulator@4 { - reg = <4>; /* +3.3V voltage with ±4% tolerance */ regulator-min-microvolt = <3175000>; regulator-max-microvolt = <3430000>; @@ -430,7 +422,6 @@ }; ldo3_reg: regulator@5 { - reg = <5>; /* +1.8V voltage with ±4% tolerance */ regulator-min-microvolt = <1750000>; regulator-max-microvolt = <1870000>; @@ -439,7 +430,6 @@ }; ldo4_reg: regulator@6 { - reg = <6>; /* +3.3V voltage with ±4% tolerance */ regulator-min-microvolt = <3175000>; regulator-max-microvolt = <3430000>; diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts index 471a3a70ea1f..8867aaaec54d 100644 --- a/arch/arm/boot/dts/am335x-pepper.dts +++ b/arch/arm/boot/dts/am335x-pepper.dts @@ -420,9 +420,9 @@ vin-supply = <&vbat>; }; -&tps { - compatible = "ti,tps65217"; +/include/ "tps65217.dtsi" +&tps { backlight { isel = <1>; /* ISET1 */ fdim = <200>; /* TPS65217_BL_FDIM_200HZ */ @@ -430,17 +430,12 @@ }; regulators { - #address-cells = <1>; - #size-cells = <0>; - dcdc1_reg: regulator@0 { - reg = <0>; /* VDD_1V8 system supply */ regulator-always-on; }; dcdc2_reg: regulator@1 { - reg = <1>; /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */ regulator-name = "vdd_core"; regulator-min-microvolt = <925000>; @@ -450,7 +445,6 @@ }; dcdc3_reg: regulator@2 { - reg = <2>; /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */ regulator-name = "vdd_mpu"; regulator-min-microvolt = <925000>; @@ -460,21 +454,18 @@ }; ldo1_reg: regulator@3 { - reg = <3>; /* VRTC 1.8V always-on supply */ regulator-name = "vrtc,vdds"; regulator-always-on; }; ldo2_reg: regulator@4 { - reg = <4>; /* 3.3V rail */ regulator-name = "vdd_3v3aux"; regulator-always-on; }; ldo3_reg: regulator@5 { - reg = <5>; /* VDD_3V3A 3.3V rail */ regulator-name = "vdd_3v3a"; regulator-min-microvolt = <3300000>; @@ -482,7 +473,6 @@ }; ldo4_reg: regulator@6 { - reg = <6>; /* VDD_3V3B 3.3V rail */ regulator-name = "vdd_3v3b"; regulator-always-on; diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts index 1b5b044fcd91..865de8500f1c 100644 --- a/arch/arm/boot/dts/am335x-shc.dts +++ b/arch/arm/boot/dts/am335x-shc.dts @@ -46,7 +46,7 @@ gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; linux,code = ; debounce-interval = <1000>; - gpio-key,wakeup; + wakeup-source; }; front_button { @@ -54,7 +54,7 @@ gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>; linux,code = ; debounce-interval = <1000>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts index d38edfa53bb9..3303c281697b 100644 --- a/arch/arm/boot/dts/am335x-sl50.dts +++ b/arch/arm/boot/dts/am335x-sl50.dts @@ -375,19 +375,16 @@ pinctrl-0 = <&uart4_pins>; }; +#include "tps65217.dtsi" + &tps { - compatible = "ti,tps65217"; ti,pmic-shutdown-controller; interrupt-parent = <&intc>; interrupts = <7>; /* NNMI */ regulators { - #address-cells = <1>; - #size-cells = <0>; - dcdc1_reg: regulator@0 { - reg = <0>; /* VDDS_DDR */ regulator-min-microvolt = <1500000>; regulator-max-microvolt = <1500000>; @@ -395,7 +392,6 @@ }; dcdc2_reg: regulator@1 { - reg = <1>; /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ regulator-name = "vdd_mpu"; regulator-min-microvolt = <925000>; @@ -405,7 +401,6 @@ }; dcdc3_reg: regulator@2 { - reg = <2>; /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ regulator-name = "vdd_core"; regulator-min-microvolt = <925000>; @@ -415,7 +410,6 @@ }; ldo1_reg: regulator@3 { - reg = <3>; /* VRTC / VIO / VDDS*/ regulator-always-on; regulator-min-microvolt = <1800000>; @@ -423,7 +417,6 @@ }; ldo2_reg: regulator@4 { - reg = <4>; /* VDD_3V3AUX */ regulator-always-on; regulator-min-microvolt = <3300000>; @@ -431,7 +424,6 @@ }; ldo3_reg: regulator@5 { - reg = <5>; /* VDD_1V8 */ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -439,7 +431,6 @@ }; ldo4_reg: regulator@6 { - reg = <6>; /* VDD_3V3A */ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 36c0fa6c362a..a0986c65be0c 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -173,6 +173,8 @@ sound0_master: simple-audio-card,codec { sound-dai = <&tlv320aic3104>; + assigned-clocks = <&clkoutmux2_clk_mux>; + assigned-clock-parents = <&sys_clk2_dclk_div>; clocks = <&clkout2_clk>; }; }; @@ -796,6 +798,8 @@ pinctrl-names = "default", "sleep"; pinctrl-0 = <&mcasp3_pins_default>; pinctrl-1 = <&mcasp3_pins_sleep>; + assigned-clocks = <&mcasp3_ahclkx_mux>; + assigned-clock-parents = <&sys_clkin2>; status = "okay"; op-mode = <0>; /* MCASP_IIS_MODE */ diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts index 8d93882dc8d5..1c06cb76da07 100644 --- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts +++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts @@ -545,7 +545,7 @@ ti,debounce-tol = /bits/ 16 <10>; ti,debounce-rep = /bits/ 16 <1>; - linux,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/armada-xp-axpwifiap.dts b/arch/arm/boot/dts/armada-xp-axpwifiap.dts index 23fc670c0427..5c21b236721f 100644 --- a/arch/arm/boot/dts/armada-xp-axpwifiap.dts +++ b/arch/arm/boot/dts/armada-xp-axpwifiap.dts @@ -70,8 +70,8 @@ soc { ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; pcie-controller { status = "okay"; diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index f774101416a5..ebe1d267406d 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -76,8 +76,8 @@ ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; devbus-bootcs { status = "okay"; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 4878d7353069..5730b875c4f5 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -95,8 +95,8 @@ ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; devbus-bootcs { status = "okay"; diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index fb9e1bbf2338..8af463f26ea1 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts @@ -65,8 +65,8 @@ soc { ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; pcie-controller { status = "okay"; diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts index 6e9820e141f8..b89e6cf1271a 100644 --- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts @@ -70,8 +70,8 @@ soc { ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; pcie-controller { status = "okay"; diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts index 6ab33837a2b6..6522b04f4a8e 100644 --- a/arch/arm/boot/dts/armada-xp-matrix.dts +++ b/arch/arm/boot/dts/armada-xp-matrix.dts @@ -68,8 +68,8 @@ soc { ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; internal-regs { serial@12000 { diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts index 62175a8848bc..d19f44c70925 100644 --- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts +++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts @@ -64,8 +64,8 @@ soc { ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; pcie-controller { status = "okay"; diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index a5db17782e08..853bd392a4fe 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts @@ -65,9 +65,9 @@ soc { ranges = ; + MBUS_ID(0x01, 0x2f) 0 0 0xe8000000 0x8000000 + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; devbus-bootcs { status = "okay"; diff --git a/arch/arm/boot/dts/armada-xp-synology-ds414.dts b/arch/arm/boot/dts/armada-xp-synology-ds414.dts index 2391b11dc546..d17dab0a6f51 100644 --- a/arch/arm/boot/dts/armada-xp-synology-ds414.dts +++ b/arch/arm/boot/dts/armada-xp-synology-ds414.dts @@ -78,8 +78,8 @@ soc { ranges = ; + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; pcie-controller { status = "okay"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index c4d9175b90dc..f82aa44c3cee 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1500,6 +1500,16 @@ 0x48485200 0x2E00>; #address-cells = <1>; #size-cells = <1>; + + /* + * Do not allow gating of cpsw clock as workaround + * for errata i877. Keeping internal clock disabled + * causes the device switching characteristics + * to degrade over time and eventually fail to meet + * the data manual delay time/skew specs. + */ + ti,no-idle; + /* * rx_thresh_pend * rx_pend diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 4f6ae921656f..f74d3db4846d 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -896,7 +896,6 @@ #size-cells = <1>; reg = <0x2100000 0x10000>; ranges = <0 0x2100000 0x10000>; - interrupt-parent = <&intc>; clocks = <&clks IMX6QDL_CLK_CAAM_MEM>, <&clks IMX6QDL_CLK_CAAM_ACLK>, <&clks IMX6QDL_CLK_CAAM_IPG>, diff --git a/arch/arm/boot/dts/kirkwood-ds112.dts b/arch/arm/boot/dts/kirkwood-ds112.dts index bf4143c6cb8f..b84af3da8c84 100644 --- a/arch/arm/boot/dts/kirkwood-ds112.dts +++ b/arch/arm/boot/dts/kirkwood-ds112.dts @@ -14,7 +14,7 @@ #include "kirkwood-synology.dtsi" / { - model = "Synology DS111"; + model = "Synology DS112"; compatible = "synology,ds111", "marvell,kirkwood"; memory { diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts index 420788229e6f..aae8a7aceab7 100644 --- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts +++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts @@ -228,6 +228,37 @@ }; }; +&devbus_bootcs { + status = "okay"; + devbus,keep-config; + + flash@0 { + compatible = "jedec-flash"; + reg = <0 0x40000>; + bank-width = <1>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + header@0 { + reg = <0 0x30000>; + read-only; + }; + + uboot@30000 { + reg = <0x30000 0xF000>; + read-only; + }; + + uboot_env@3F000 { + reg = <0x3F000 0x1000>; + }; + }; + }; +}; + &mdio { status = "okay"; diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts index 6713b1ea732b..01d239c3eaaa 100644 --- a/arch/arm/boot/dts/r8a7791-porter.dts +++ b/arch/arm/boot/dts/r8a7791-porter.dts @@ -283,7 +283,6 @@ pinctrl-names = "default"; status = "okay"; - renesas,enable-gpio = <&gpio5 31 GPIO_ACTIVE_HIGH>; }; &usbphy { diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h index 1afe24629d1f..b0c912feaa2f 100644 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h @@ -90,7 +90,7 @@ #define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2) #define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1) #define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2) -#define PIN_PA15 14 +#define PIN_PA15 15 #define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0) #define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1) #define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1) diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi new file mode 100644 index 000000000000..a63272422d76 --- /dev/null +++ b/arch/arm/boot/dts/tps65217.dtsi @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Integrated Power Management Chip + * http://www.ti.com/lit/ds/symlink/tps65217.pdf + */ + +&tps { + compatible = "ti,tps65217"; + + regulators { + #address-cells = <1>; + #size-cells = <0>; + + dcdc1_reg: regulator@0 { + reg = <0>; + regulator-compatible = "dcdc1"; + }; + + dcdc2_reg: regulator@1 { + reg = <1>; + regulator-compatible = "dcdc2"; + }; + + dcdc3_reg: regulator@2 { + reg = <2>; + regulator-compatible = "dcdc3"; + }; + + ldo1_reg: regulator@3 { + reg = <3>; + regulator-compatible = "ldo1"; + }; + + ldo2_reg: regulator@4 { + reg = <4>; + regulator-compatible = "ldo2"; + }; + + ldo3_reg: regulator@5 { + reg = <5>; + regulator-compatible = "ldo3"; + }; + + ldo4_reg: regulator@6 { + reg = <6>; + regulator-compatible = "ldo4"; + }; + }; +}; diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c index 2dc6da70ae59..d7ed252708c5 100644 --- a/arch/arm/common/icst.c +++ b/arch/arm/common/icst.c @@ -16,7 +16,7 @@ */ #include #include - +#include #include /* @@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div); unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco) { - return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]); + u64 dividend = p->ref * 2 * (u64)(vco.v + 8); + u32 divisor = (vco.r + 2) * p->s2div[vco.s]; + + do_div(dividend, divisor); + return (unsigned long)dividend; } EXPORT_SYMBOL(icst_hz); @@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq) if (f > p->vco_min && f <= p->vco_max) break; + i++; } while (i < 8); if (i >= 8) diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index a7151744b85c..d18d6b42fcf5 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -292,24 +292,23 @@ CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y -CONFIG_OMAP2_DSS=m -CONFIG_OMAP5_DSS_HDMI=y -CONFIG_OMAP2_DSS_SDI=y -CONFIG_OMAP2_DSS_DSI=y +CONFIG_FB_OMAP5_DSS_HDMI=y +CONFIG_FB_OMAP2_DSS_SDI=y +CONFIG_FB_OMAP2_DSS_DSI=y CONFIG_FB_OMAP2=m -CONFIG_DISPLAY_ENCODER_TFP410=m -CONFIG_DISPLAY_ENCODER_TPD12S015=m -CONFIG_DISPLAY_CONNECTOR_DVI=m -CONFIG_DISPLAY_CONNECTOR_HDMI=m -CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m -CONFIG_DISPLAY_PANEL_DPI=m -CONFIG_DISPLAY_PANEL_DSI_CM=m -CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m -CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m -CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m -CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m -CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m -CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m +CONFIG_FB_OMAP2_ENCODER_TFP410=m +CONFIG_FB_OMAP2_ENCODER_TPD12S015=m +CONFIG_FB_OMAP2_CONNECTOR_DVI=m +CONFIG_FB_OMAP2_CONNECTOR_HDMI=m +CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m +CONFIG_FB_OMAP2_PANEL_DPI=m +CONFIG_FB_OMAP2_PANEL_DSI_CM=m +CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m +CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m +CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m +CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m +CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m +CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=y diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index b445a5d56f43..89a3a3e592d6 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -364,7 +364,7 @@ static struct crypto_alg aes_algs[] = { { .cra_blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .ivsize = 0, .setkey = ce_aes_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, @@ -441,7 +441,7 @@ static struct crypto_alg aes_algs[] = { { .cra_ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .ivsize = 0, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index 7da5503c0591..e08d15184056 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void) u32 irqstat; asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); + dsb(sy); return irqstat; } diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 0375c8caa061..9408a994cc91 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); + unsigned long page_pfn = page_to_xen_pfn(page); + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); + unsigned long compound_pages = + (1<map_page(hwdev, page, offset, size, dir, attrs); diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 2c5f160be65e..ad325a8c7e1e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -88,6 +88,7 @@ obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o +AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a ifeq ($(CONFIG_ARM_PSCI),y) obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 5fa69d7bae58..99361f11354a 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) u64 val; val = kvm_arm_timer_get_reg(vcpu, reg->id); - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; } static unsigned long num_core_regs(void) diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 7f33b2056ae6..0f6600f05137 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -206,7 +206,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, run->mmio.is_write = is_write; run->mmio.phys_addr = fault_ipa; run->mmio.len = len; - memcpy(run->mmio.data, data_buf, len); + if (is_write) + memcpy(run->mmio.data, data_buf, len); if (!ret) { /* We handled the access successfully in the kernel. */ diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 809827265fb3..bab814d2f37d 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -18,6 +18,7 @@ #include #include +#include #include "common.h" @@ -77,12 +78,31 @@ static const char *const n900_boards_compat[] __initconst = { NULL, }; +/* Set system_rev from atags */ +static void __init rx51_set_system_rev(const struct tag *tags) +{ + const struct tag *tag; + + if (tags->hdr.tag != ATAG_CORE) + return; + + for_each_tag(tag, tags) { + if (tag->hdr.tag == ATAG_REVISION) { + system_rev = tag->u.revision.rev; + break; + } + } +} + /* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags, * save them while the data is still not overwritten */ static void __init rx51_reserve(void) { - save_atags((const struct tag *)(PAGE_OFFSET + 0x100)); + const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100); + + save_atags(tags); + rx51_set_system_rev(tags); omap_reserve(); } diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index 7b76ce01c21d..8633c703546a 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c @@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base) static void set_onenand_cfg(void __iomem *onenand_base) { - u32 reg; + u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT; - reg = readw(onenand_base + ONENAND_REG_SYS_CFG1); - reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9)); reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) | ONENAND_SYS_CFG1_BL_16; if (onenand_flags & ONENAND_FLAG_SYNCREAD) @@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base) reg |= ONENAND_SYS_CFG1_VHF; else reg &= ~ONENAND_SYS_CFG1_VHF; + writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); } @@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base) } } + onenand_async.sync_write = true; omap2_onenand_calc_async_timings(&t); ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async); diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 0437537751bc..f7ff3b9dad87 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -191,12 +191,22 @@ static int _omap_device_notifier_call(struct notifier_block *nb, { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od; + int err; switch (event) { case BUS_NOTIFY_DEL_DEVICE: if (pdev->archdata.od) omap_device_delete(pdev->archdata.od); break; + case BUS_NOTIFY_UNBOUND_DRIVER: + od = to_omap_device(pdev); + if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) { + dev_info(dev, "enabled after unload, idling\n"); + err = omap_device_idle(pdev); + if (err) + dev_err(dev, "failed to idle\n"); + } + break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); @@ -602,8 +612,10 @@ static int _od_runtime_resume(struct device *dev) int ret; ret = omap_device_enable(pdev); - if (ret) + if (ret) { + dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n"); return ret; + } return pm_generic_runtime_resume(dev); } diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index e9f65fec55c0..b6d62e4cdfdd 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2200,6 +2200,11 @@ static int _enable(struct omap_hwmod *oh) */ static int _idle(struct omap_hwmod *oh) { + if (oh->flags & HWMOD_NO_IDLE) { + oh->_int_flags |= _HWMOD_SKIP_ENABLE; + return 0; + } + pr_debug("omap_hwmod: %s: idling\n", oh->name); if (oh->_state != _HWMOD_STATE_ENABLED) { @@ -2504,6 +2509,8 @@ static int __init _init(struct omap_hwmod *oh, void *data) oh->flags |= HWMOD_INIT_NO_RESET; if (of_find_property(np, "ti,no-idle-on-init", NULL)) oh->flags |= HWMOD_INIT_NO_IDLE; + if (of_find_property(np, "ti,no-idle", NULL)) + oh->flags |= HWMOD_NO_IDLE; } oh->_state = _HWMOD_STATE_INITIALIZED; @@ -2630,7 +2637,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh) * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data - * it should be set by the core code as a runtime flag during startup */ - if ((oh->flags & HWMOD_INIT_NO_IDLE) && + if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) && (postsetup_state == _HWMOD_STATE_IDLE)) { oh->_int_flags |= _HWMOD_SKIP_ENABLE; postsetup_state = _HWMOD_STATE_ENABLED; diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 76bce11c85a4..7c7a31169475 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -525,6 +525,8 @@ struct omap_hwmod_omap4_prcm { * or idled. * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to * operate and they need to be handled at the same time as the main_clk. + * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain + * IPs like CPSW on DRA7, where clocks to this module cannot be disabled. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -541,6 +543,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_SWSUP_SIDLE_ACT (1 << 12) #define HWMOD_RECONFIG_IO_CHAIN (1 << 13) #define HWMOD_OPT_CLKS_NEEDED (1 << 14) +#define HWMOD_NO_IDLE (1 << 15) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h index 9cb11215ceba..b3a4ed5289ec 100644 --- a/arch/arm/mach-shmobile/common.h +++ b/arch/arm/mach-shmobile/common.h @@ -4,7 +4,6 @@ extern void shmobile_init_delay(void); extern void shmobile_boot_vector(void); extern unsigned long shmobile_boot_fn; -extern unsigned long shmobile_boot_arg; extern unsigned long shmobile_boot_size; extern void shmobile_smp_boot(void); extern void shmobile_smp_sleep(void); diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S index fa5248c52399..5e503d91ad70 100644 --- a/arch/arm/mach-shmobile/headsmp-scu.S +++ b/arch/arm/mach-shmobile/headsmp-scu.S @@ -38,9 +38,3 @@ ENTRY(shmobile_boot_scu) b secondary_startup ENDPROC(shmobile_boot_scu) - - .text - .align 2 - .globl shmobile_scu_base -shmobile_scu_base: - .space 4 diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index 330c1fc63197..32e0bf6e3ccb 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -24,7 +24,6 @@ .arm .align 12 ENTRY(shmobile_boot_vector) - ldr r0, 2f ldr r1, 1f bx r1 @@ -34,9 +33,6 @@ ENDPROC(shmobile_boot_vector) .globl shmobile_boot_fn shmobile_boot_fn: 1: .space 4 - .globl shmobile_boot_arg -shmobile_boot_arg: -2: .space 4 .globl shmobile_boot_size shmobile_boot_size: .long . - shmobile_boot_vector @@ -46,13 +42,15 @@ shmobile_boot_size: */ ENTRY(shmobile_smp_boot) - @ r0 = MPIDR_HWID_BITMASK mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR - and r0, r1, r0 @ r0 = cpu_logical_map() value + and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK + @ r0 = cpu_logical_map() value mov r1, #0 @ r1 = CPU index - adr r5, 1f @ array of per-cpu mpidr values - adr r6, 2f @ array of per-cpu functions - adr r7, 3f @ array of per-cpu arguments + adr r2, 1f + ldmia r2, {r5, r6, r7} + add r5, r5, r2 @ array of per-cpu mpidr values + add r6, r6, r2 @ array of per-cpu functions + add r7, r7, r2 @ array of per-cpu arguments shmobile_smp_boot_find_mpidr: ldr r8, [r5, r1, lsl #2] @@ -80,12 +78,18 @@ ENTRY(shmobile_smp_sleep) b shmobile_smp_boot ENDPROC(shmobile_smp_sleep) + .align 2 +1: .long shmobile_smp_mpidr - . + .long shmobile_smp_fn - 1b + .long shmobile_smp_arg - 1b + + .bss .globl shmobile_smp_mpidr shmobile_smp_mpidr: -1: .space NR_CPUS * 4 + .space NR_CPUS * 4 .globl shmobile_smp_fn shmobile_smp_fn: -2: .space NR_CPUS * 4 + .space NR_CPUS * 4 .globl shmobile_smp_arg shmobile_smp_arg: -3: .space NR_CPUS * 4 + .space NR_CPUS * 4 diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c index 911884f7e28b..aba75c89f9c1 100644 --- a/arch/arm/mach-shmobile/platsmp-apmu.c +++ b/arch/arm/mach-shmobile/platsmp-apmu.c @@ -123,7 +123,6 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus, { /* install boot code shared by all CPUs */ shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); - shmobile_boot_arg = MPIDR_HWID_BITMASK; /* perform per-cpu setup */ apmu_parse_cfg(apmu_init_cpu, apmu_config, num); diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c index 64663110ab6c..081a097c9219 100644 --- a/arch/arm/mach-shmobile/platsmp-scu.c +++ b/arch/arm/mach-shmobile/platsmp-scu.c @@ -17,6 +17,9 @@ #include #include "common.h" + +void __iomem *shmobile_scu_base; + static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -41,7 +44,6 @@ void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus) { /* install boot code shared by all CPUs */ shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); - shmobile_boot_arg = MPIDR_HWID_BITMASK; /* enable SCU and cache coherency on booting CPU */ scu_enable(shmobile_scu_base); diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c index b854fe2095ad..0b024a9dbd43 100644 --- a/arch/arm/mach-shmobile/smp-r8a7779.c +++ b/arch/arm/mach-shmobile/smp-r8a7779.c @@ -92,8 +92,6 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus) { /* Map the reset vector (in headsmp-scu.S, headsmp.S) */ __raw_writel(__pa(shmobile_boot_vector), AVECR); - shmobile_boot_fn = virt_to_phys(shmobile_boot_scu); - shmobile_boot_arg = (unsigned long)shmobile_scu_base; /* setup r8a7779 specific SCU bits */ shmobile_scu_base = IOMEM(R8A7779_SCU_BASE); diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 4b4058db0781..66353caa35b9 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -173,7 +173,7 @@ unsigned long arch_mmap_rnd(void) { unsigned long rnd; - rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); + rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index cf30daff8932..d19b1ad29b07 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -49,6 +49,9 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } + if (!numpages) + return 0; + if (start < MODULES_VADDR || start >= MODULES_END) return -EINVAL; diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 307237cfe728..b5e3f6d42b88 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -88,7 +88,7 @@ Image: vmlinux Image.%: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -zinstall install: vmlinux +zinstall install: $(Q)$(MAKE) $(build)=$(boot) $@ %.dtb: scripts diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index abcbba2f01ba..305c552b5ec1 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -34,10 +34,10 @@ $(obj)/Image.lzma: $(obj)/Image FORCE $(obj)/Image.lzo: $(obj)/Image FORCE $(call if_changed,lzo) -install: $(obj)/Image +install: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image System.map "$(INSTALL_PATH)" -zinstall: $(obj)/Image.gz +zinstall: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image.gz System.map "$(INSTALL_PATH)" diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh index 12ed78aa6f0c..d91e1f022573 100644 --- a/arch/arm64/boot/install.sh +++ b/arch/arm64/boot/install.sh @@ -20,6 +20,20 @@ # $4 - default install path (blank if root directory) # +verify () { + if [ ! -f "$1" ]; then + echo "" 1>&2 + echo " *** Missing file: $1" 1>&2 + echo ' *** You need to run "make" before "make install".' 1>&2 + echo "" 1>&2 + exit 1 + fi +} + +# Make sure the files actually exist +verify "$2" +verify "$3" + # User may have a custom install script if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 05d9e16c0dfd..7a3d22a46faf 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -294,7 +294,7 @@ static struct crypto_alg aes_algs[] = { { .cra_blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .ivsize = 0, .setkey = aes_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, @@ -371,7 +371,7 @@ static struct crypto_alg aes_algs[] = { { .cra_ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .ivsize = 0, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 2731d3b25ed2..8ec88e5b290f 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(void) u64 irqstat; asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); + dsb(sy); return irqstat; } diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 738a95f93e49..d201d4b396d1 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -107,8 +107,6 @@ #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) -#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B) - /* VTCR_EL2 Registers bits */ #define VTCR_EL2_RES1 (1 << 31) #define VTCR_EL2_PS_MASK (7 << 16) @@ -182,6 +180,7 @@ #define CPTR_EL2_TCPAC (1 << 31) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) +#define CPTR_EL2_DEFAULT 0x000033ff /* Hyp Debug Configuration Register bits */ #define MDCR_EL2_TDRA (1 << 11) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3066328cd86b..779a5872a2c5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) { - u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; + u32 mode; - if (vcpu_mode_is_32bit(vcpu)) + if (vcpu_mode_is_32bit(vcpu)) { + mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; return mode > COMPAT_PSR_MODE_USR; + } + + mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; return mode != PSR_MODE_EL0t; } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index bf464de33f52..819aff5d593f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -34,7 +34,7 @@ /* * VMALLOC and SPARSEMEM_VMEMMAP ranges. * - * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array + * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array * (rounded up to PUD_SIZE). * VMALLOC_START: beginning of the kernel VA space * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, @@ -51,7 +51,9 @@ #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) -#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) +#define VMEMMAP_START (VMALLOC_END + SZ_64K) +#define vmemmap ((struct page *)VMEMMAP_START - \ + SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT)) #define FIRST_USER_ADDRESS 0UL diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 8aee3aeec3e6..c536c9e307b9 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -226,11 +226,28 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr) return retval; } +static void send_user_sigtrap(int si_code) +{ + struct pt_regs *regs = current_pt_regs(); + siginfo_t info = { + .si_signo = SIGTRAP, + .si_errno = 0, + .si_code = si_code, + .si_addr = (void __user *)instruction_pointer(regs), + }; + + if (WARN_ON(!user_mode(regs))) + return; + + if (interrupts_enabled(regs)) + local_irq_enable(); + + force_sig_info(SIGTRAP, &info, current); +} + static int single_step_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - siginfo_t info; - /* * If we are stepping a pending breakpoint, call the hw_breakpoint * handler first. @@ -239,11 +256,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr, return 0; if (user_mode(regs)) { - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_HWBKPT; - info.si_addr = (void __user *)instruction_pointer(regs); - force_sig_info(SIGTRAP, &info, current); + send_user_sigtrap(TRAP_HWBKPT); /* * ptrace will disable single step unless explicitly @@ -307,17 +320,8 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) static int brk_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - siginfo_t info; - if (user_mode(regs)) { - info = (siginfo_t) { - .si_signo = SIGTRAP, - .si_errno = 0, - .si_code = TRAP_BRKPT, - .si_addr = (void __user *)instruction_pointer(regs), - }; - - force_sig_info(SIGTRAP, &info, current); + send_user_sigtrap(TRAP_BRKPT); } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { pr_warning("Unexpected kernel BRK exception at EL1\n"); return -EFAULT; @@ -328,7 +332,6 @@ static int brk_handler(unsigned long addr, unsigned int esr, int aarch32_break_handler(struct pt_regs *regs) { - siginfo_t info; u32 arm_instr; u16 thumb_instr; bool bp = false; @@ -359,14 +362,7 @@ int aarch32_break_handler(struct pt_regs *regs) if (!bp) return -EFAULT; - info = (siginfo_t) { - .si_signo = SIGTRAP, - .si_errno = 0, - .si_code = TRAP_BRKPT, - .si_addr = pc, - }; - - force_sig_info(SIGTRAP, &info, current); + send_user_sigtrap(TRAP_BRKPT); return 0; } diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 999633bd7294..352f7abd91c9 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -89,6 +89,7 @@ __efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy); __efistub_memmove = KALLSYMS_HIDE(__pi_memmove); __efistub_memset = KALLSYMS_HIDE(__pi_memset); __efistub_strlen = KALLSYMS_HIDE(__pi_strlen); +__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen); __efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); __efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); __efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index e33fe33876ab..fd10eb663868 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -145,6 +145,10 @@ ENTRY(cpu_resume_mmu) ENDPROC(cpu_resume_mmu) .popsection cpu_resume_after_mmu: +#ifdef CONFIG_KASAN + mov x0, sp + bl kasan_unpoison_remaining_stack +#endif mov x0, #0 // return zero on success ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 4fad9787ab46..d9751a4769e7 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -44,14 +44,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) unsigned long irq_stack_ptr; /* - * Use raw_smp_processor_id() to avoid false-positives from - * CONFIG_DEBUG_PREEMPT. get_wchan() calls unwind_frame() on sleeping - * task stacks, we can be pre-empted in this case, so - * {raw_,}smp_processor_id() may give us the wrong value. Sleeping - * tasks can't ever be on an interrupt stack, so regardless of cpu, - * the checks will always fail. + * Switching between stacks is valid when tracing current and in + * non-preemptible context. */ - irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id()); + if (tsk == current && !preemptible()) + irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); + else + irq_stack_ptr = 0; low = frame->sp; /* irq stacks are not THREAD_SIZE aligned */ @@ -64,8 +63,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) return -EINVAL; frame->sp = fp + 0x10; - frame->fp = *(unsigned long *)(fp); - frame->pc = *(unsigned long *)(fp + 8); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk && tsk->ret_stack && diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index cbedd724f48e..c5392081b49b 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -146,9 +146,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; - unsigned long irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); + unsigned long irq_stack_ptr; int skip; + /* + * Switching between stacks is valid when tracing current and in + * non-preemptible context. + */ + if (tsk == current && !preemptible()) + irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); + else + irq_stack_ptr = 0; + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index fcb778899a38..9e54ad7c240a 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) u64 val; val = kvm_arm_timer_get_reg(vcpu, reg->id); - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; } /** diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 3e568dcd907b..d073b5a216f7 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -64,7 +64,7 @@ __do_hyp_init: mrs x4, tcr_el1 ldr x5, =TCR_EL2_MASK and x4, x4, x5 - ldr x5, =TCR_EL2_FLAGS + mov x5, #TCR_EL2_RES1 orr x4, x4, x5 #ifndef CONFIG_ARM64_VA_BITS_48 @@ -85,14 +85,16 @@ __do_hyp_init: ldr_l x5, idmap_t0sz bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH #endif + /* + * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in + * TCR_EL2 and VTCR_EL2. + */ + mrs x5, ID_AA64MMFR0_EL1 + bfi x4, x5, #16, #3 + msr tcr_el2, x4 ldr x4, =VTCR_EL2_FLAGS - /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in - * VTCR_EL2. - */ - mrs x5, ID_AA64MMFR0_EL1 bfi x4, x5, #16, #3 /* * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ca8f5a5e2f96..f0e7bdfae134 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, hcr_el2); /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); - write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2); + + val = CPTR_EL2_DEFAULT; + val |= CPTR_EL2_TTA | CPTR_EL2_TFP; + write_sysreg(val, cptr_el2); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); } @@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(HCR_RW, hcr_el2); write_sysreg(0, hstr_el2); write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); - write_sysreg(0, cptr_el2); + write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 9142e082f5f3..5dd2a26444ec 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -147,16 +147,6 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) max_lr_idx = vtr_to_max_lr_idx(val); nr_pri_bits = vtr_to_nr_pri_bits(val); - switch (nr_pri_bits) { - case 7: - write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); - write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); - case 6: - write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); - default: - write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); - } - switch (nr_pri_bits) { case 7: write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); @@ -167,6 +157,16 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); } + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); + write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); + } + switch (max_lr_idx) { case 15: write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 648112e90ed5..4d1ac81870d2 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -27,7 +27,11 @@ #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ PSR_I_BIT | PSR_D_BIT) -#define EL1_EXCEPT_SYNC_OFFSET 0x200 + +#define CURRENT_EL_SP_EL0_VECTOR 0x0 +#define CURRENT_EL_SP_ELx_VECTOR 0x200 +#define LOWER_EL_AArch64_VECTOR 0x400 +#define LOWER_EL_AArch32_VECTOR 0x600 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) { @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, *fsr = 0x14; } +enum exception_type { + except_type_sync = 0, + except_type_irq = 0x80, + except_type_fiq = 0x100, + except_type_serror = 0x180, +}; + +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type) +{ + u64 exc_offset; + + switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) { + case PSR_MODE_EL1t: + exc_offset = CURRENT_EL_SP_EL0_VECTOR; + break; + case PSR_MODE_EL1h: + exc_offset = CURRENT_EL_SP_ELx_VECTOR; + break; + case PSR_MODE_EL0t: + exc_offset = LOWER_EL_AArch64_VECTOR; + break; + default: + exc_offset = LOWER_EL_AArch32_VECTOR; + } + + return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type; +} + static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) { unsigned long cpsr = *vcpu_cpsr(vcpu); @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr *vcpu_spsr(vcpu) = cpsr; *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); + *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; - *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; vcpu_sys_reg(vcpu, FAR_EL1) = addr; @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu) *vcpu_spsr(vcpu) = cpsr; *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); + *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; - *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; /* * Build an unknown exception, depending on the instruction diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index eec3598b4184..2e90371cfb37 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu, if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + /* Handled */ + return 0; } - - /* Handled */ - return 0; } /* Not handled */ @@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, } /** - * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access + * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ @@ -1095,7 +1094,7 @@ out: } /** - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access + * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S index 2ca665711bf2..eae38da6e0bb 100644 --- a/arch/arm64/lib/strnlen.S +++ b/arch/arm64/lib/strnlen.S @@ -168,4 +168,4 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ .Lhit_limit: mov len, limit ret -ENDPROC(strnlen) +ENDPIPROC(strnlen) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 331c4ca6205c..a6e757cbab77 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -933,6 +933,10 @@ static int __init __iommu_dma_init(void) ret = register_iommu_dma_ops_notifier(&platform_bus_type); if (!ret) ret = register_iommu_dma_ops_notifier(&amba_bustype); + + /* handle devices queued before this arch_initcall */ + if (!ret) + __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL); return ret; } arch_initcall(__iommu_dma_init); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 92ddac1e8ca2..abe2a9542b3a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -371,6 +371,13 @@ static int __kprobes do_translation_fault(unsigned long addr, return 0; } +static int do_alignment_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + do_bad_area(addr, esr, regs); + return 0; +} + /* * This abort handler always returns "fault". */ @@ -418,7 +425,7 @@ static struct fault_info { { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, { do_bad, SIGBUS, 0, "unknown 32" }, - { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, + { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, { do_bad, SIGBUS, 0, "unknown 34" }, { do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 36" }, diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 82d607c3614e..da30529bb1f6 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -306,10 +306,6 @@ static __init int setup_hugepagesz(char *opt) hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); - } else if (ps == (PAGE_SIZE * CONT_PTES)) { - hugetlb_add_hstate(CONT_PTE_SHIFT); - } else if (ps == (PMD_SIZE * CONT_PMDS)) { - hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); } else { pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); return 0; @@ -317,13 +313,3 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); - -#ifdef CONFIG_ARM64_64K_PAGES -static __init int add_default_hugepagesz(void) -{ - if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) - hugetlb_add_hstate(CONT_PMD_SHIFT); - return 0; -} -arch_initcall(add_default_hugepagesz); -#endif diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f3b061e67bfe..7802f216a67a 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -319,8 +319,8 @@ void __init mem_init(void) #endif MLG(VMALLOC_START, VMALLOC_END), #ifdef CONFIG_SPARSEMEM_VMEMMAP - MLG((unsigned long)vmemmap, - (unsigned long)vmemmap + VMEMMAP_SIZE), + MLG(VMEMMAP_START, + VMEMMAP_START + VMEMMAP_SIZE), MLM((unsigned long)virt_to_page(PAGE_OFFSET), (unsigned long)virt_to_page(high_memory)), #endif diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 4c893b5189dd..232f787a088a 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -53,10 +53,10 @@ unsigned long arch_mmap_rnd(void) #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_32BIT)) - rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1); + rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); else #endif - rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); + rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index fc96e814188e..d1fc4796025e 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -108,6 +108,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -266,6 +268,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -366,6 +374,7 @@ CONFIG_ARIADNE=y # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NETRONOME is not set CONFIG_HYDRA=y CONFIG_APNE=y CONFIG_ZORRO8390=y diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 05c904f08d9d..9bfe8be3658c 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -264,6 +266,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -344,6 +352,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index d572b731c510..ebdcfae55580 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -264,6 +266,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -353,6 +361,7 @@ CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NETRONOME is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 11a30c65ad44..8acc65e54995 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -262,6 +264,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -343,6 +351,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 6630a5154b9d..0c6a3d52b26e 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -264,6 +266,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -345,6 +353,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 1d90b71d0903..12a8a6cb32f4 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -105,6 +105,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -266,6 +268,12 @@ CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -362,6 +370,7 @@ CONFIG_MAC89x0=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_MACSONIC=y +# CONFIG_NET_VENDOR_NETRONOME is not set CONFIG_MAC8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 1fd21c1ca87f..64ff2dcb34c8 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -115,6 +115,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -276,6 +278,12 @@ CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -404,6 +412,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_MACSONIC=y +# CONFIG_NET_VENDOR_NETRONOME is not set CONFIG_HYDRA=y CONFIG_MAC8390=y CONFIG_NE2000=y diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 74e10f79d7b1..07fc6abcfe0c 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -103,6 +103,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -261,6 +263,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -343,6 +351,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 7034e716f166..69903ded88f7 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -262,6 +264,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -343,6 +351,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index f7deb5f702a6..bd8401686dde 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -262,6 +264,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -352,6 +360,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NETRONOME is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 0ce79eb0d805..5f9fb3ab9636 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -259,6 +261,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -340,6 +348,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 4cb787e4991f..5d1c674530e2 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m CONFIG_NFT_QUEUE=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -259,6 +261,12 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_ATALK=m CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y @@ -341,6 +349,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index f9d96bf86910..bafaff6dcd7b 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include -#define NR_syscalls 376 +#define NR_syscalls 377 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 36cf129de663..0ca729665f29 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -381,5 +381,6 @@ #define __NR_userfaultfd 373 #define __NR_membarrier 374 #define __NR_mlock2 375 +#define __NR_copy_file_range 376 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 282cd903f4c4..8bb94261ff97 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -396,3 +396,4 @@ ENTRY(sys_call_table) .long sys_userfaultfd .long sys_membarrier .long sys_mlock2 /* 375 */ + .long sys_copy_file_range diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 57a945e832f4..74a3db92da1b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2085,7 +2085,7 @@ config PAGE_SIZE_32KB config PAGE_SIZE_64KB bool "64kB" - depends on !CPU_R3000 && !CPU_TX39XX + depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000 help Using 64kB page size will result in higher performance kernel at the price of higher memory consumption. This option is available on diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi index 459b9b252c3b..d61b1616b604 100644 --- a/arch/mips/boot/dts/brcm/bcm6328.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi @@ -74,6 +74,7 @@ timer: timer@10000040 { compatible = "syscon"; reg = <0x10000040 0x2c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi index 4fc7ecee273c..1a7efa883c5e 100644 --- a/arch/mips/boot/dts/brcm/bcm7125.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi @@ -98,6 +98,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7125-sun-top-ctrl", "syscon"; reg = <0x404000 0x60c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi index a3039bb53477..d4bf52cfcf17 100644 --- a/arch/mips/boot/dts/brcm/bcm7346.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi @@ -118,6 +118,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7346-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi index 4274ff41ec21..8e2501694d03 100644 --- a/arch/mips/boot/dts/brcm/bcm7358.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi @@ -112,6 +112,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7358-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi index 0dcc9163c27b..7e5f76040fb8 100644 --- a/arch/mips/boot/dts/brcm/bcm7360.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi @@ -112,6 +112,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7360-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi index 2f3f9fc2c478..c739ea77acb0 100644 --- a/arch/mips/boot/dts/brcm/bcm7362.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi @@ -118,6 +118,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7362-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi index bee221b3b568..5f55d0a50a28 100644 --- a/arch/mips/boot/dts/brcm/bcm7420.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi @@ -99,6 +99,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7420-sun-top-ctrl", "syscon"; reg = <0x404000 0x60c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi index 571f30f52e3f..e24d41ab4e30 100644 --- a/arch/mips/boot/dts/brcm/bcm7425.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi @@ -100,6 +100,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi index 614ee211f71a..8b9432cc062b 100644 --- a/arch/mips/boot/dts/brcm/bcm7435.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi @@ -114,6 +114,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index cefb7a596878..e090fc388e02 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -227,7 +227,7 @@ struct mips_elf_abiflags_v0 { int __res = 1; \ struct elfhdr *__h = (hdr); \ \ - if (__h->e_machine != EM_MIPS) \ + if (!mips_elf_check_machine(__h)) \ __res = 0; \ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ __res = 0; \ @@ -258,7 +258,7 @@ struct mips_elf_abiflags_v0 { int __res = 1; \ struct elfhdr *__h = (hdr); \ \ - if (__h->e_machine != EM_MIPS) \ + if (!mips_elf_check_machine(__h)) \ __res = 0; \ if (__h->e_ident[EI_CLASS] != ELFCLASS64) \ __res = 0; \ @@ -285,6 +285,11 @@ struct mips_elf_abiflags_v0 { #endif /* !defined(ELF_ARCH) */ +#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS) + +#define vmcore_elf32_check_arch mips_elf_check_machine +#define vmcore_elf64_check_arch mips_elf_check_machine + struct mips_abi; extern struct mips_abi mips_abi; diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 9cbf383b8834..f06f97bd62df 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -179,6 +179,10 @@ static inline void lose_fpu_inatomic(int save, struct task_struct *tsk) if (save) _save_fp(tsk); __disable_fpu(); + } else { + /* FPU should not have been left enabled with no owner */ + WARN(read_c0_status() & ST0_CU1, + "Orphaned FPU left enabled"); } KSTK_STATUS(tsk) &= ~ST0_CU1; clear_tsk_thread_flag(tsk, TIF_USEDFPU); diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h index 8ebd3f579b84..3ed10a8d7865 100644 --- a/arch/mips/include/asm/octeon/octeon-feature.h +++ b/arch/mips/include/asm/octeon/octeon-feature.h @@ -128,7 +128,8 @@ static inline int octeon_has_feature(enum octeon_feature feature) case OCTEON_FEATURE_PCIE: return OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) - || OCTEON_IS_MODEL(OCTEON_CN6XXX); + || OCTEON_IS_MODEL(OCTEON_CN6XXX) + || OCTEON_IS_MODEL(OCTEON_CN7XXX); case OCTEON_FEATURE_SRIO: return OCTEON_IS_MODEL(OCTEON_CN63XX) diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 3f832c3dd8f5..041153f5cf93 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count; * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ -#define TASK_SIZE 0x7fff8000UL +#define TASK_SIZE 0x80000000UL #endif #define STACK_TOP_MAX TASK_SIZE diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index a71da576883c..eebf39549606 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -289,7 +289,7 @@ .set reorder .set noat mfc0 a0, CP0_STATUS - li v1, 0xff00 + li v1, ST0_CU1 | ST0_IM ori a0, STATMASK xori a0, STATMASK mtc0 a0, CP0_STATUS @@ -330,7 +330,7 @@ ori a0, STATMASK xori a0, STATMASK mtc0 a0, CP0_STATUS - li v1, 0xff00 + li v1, ST0_CU1 | ST0_FR | ST0_IM and a0, v1 LONG_L v0, PT_STATUS(sp) nor v1, $0, v1 diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 6499d93ae68d..47bc45a67e9b 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task, /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ if ((config_enabled(CONFIG_32BIT) || test_tsk_thread_flag(task, TIF_32BIT_REGS)) && - (regs->regs[2] == __NR_syscall)) { + (regs->regs[2] == __NR_syscall)) i++; - n++; - } while (n--) ret |= mips_get_syscall_arg(args++, task, regs, i++); diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 90f03a7da665..3129795de940 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -380,16 +380,17 @@ #define __NR_userfaultfd (__NR_Linux + 357) #define __NR_membarrier (__NR_Linux + 358) #define __NR_mlock2 (__NR_Linux + 359) +#define __NR_copy_file_range (__NR_Linux + 360) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 359 +#define __NR_Linux_syscalls 360 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 359 +#define __NR_O32_Linux_syscalls 360 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -717,16 +718,17 @@ #define __NR_userfaultfd (__NR_Linux + 317) #define __NR_membarrier (__NR_Linux + 318) #define __NR_mlock2 (__NR_Linux + 319) +#define __NR_copy_file_range (__NR_Linux + 320) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 319 +#define __NR_Linux_syscalls 320 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 319 +#define __NR_64_Linux_syscalls 320 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1058,15 +1060,16 @@ #define __NR_userfaultfd (__NR_Linux + 321) #define __NR_membarrier (__NR_Linux + 322) #define __NR_mlock2 (__NR_Linux + 323) +#define __NR_copy_file_range (__NR_Linux + 324) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 323 +#define __NR_Linux_syscalls 324 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 323 +#define __NR_N32_Linux_syscalls 324 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 8c6d76c9b2d6..d9907e57e9b9 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -270,7 +270,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask) } EXPORT_SYMBOL(jz_gpio_port_get_value); -#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f) +#define IRQ_TO_BIT(irq) BIT((irq - JZ4740_IRQ_GPIO(0)) & 0x1f) static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq) { diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 1188e00bb120..1b992c6e3d8e 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -35,7 +35,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; int __res = 1; \ struct elfhdr *__h = (hdr); \ \ - if (__h->e_machine != EM_MIPS) \ + if (!mips_elf_check_machine(__h)) \ __res = 0; \ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ __res = 0; \ diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 928767858b86..abd3affe5fb3 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -47,7 +47,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; int __res = 1; \ struct elfhdr *__h = (hdr); \ \ - if (__h->e_machine != EM_MIPS) \ + if (!mips_elf_check_machine(__h)) \ __res = 0; \ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ __res = 0; \ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index f2975d4d1e44..eddd5fd6fdfa 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -65,12 +65,10 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); status |= KU_USER; regs->cp0_status = status; - clear_used_math(); - clear_fpu_owner(); - init_dsp(); - clear_thread_flag(TIF_USEDMSA); + lose_fpu(0); clear_thread_flag(TIF_MSA_CTX_LIVE); - disable_msa(); + clear_used_math(); + init_dsp(); regs->cp0_epc = pc; regs->regs[29] = sp; } diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index 5ce3b746cedc..b4ac6374a38f 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -125,7 +125,7 @@ LEAF(_restore_fp_context) END(_restore_fp_context) .set reorder - .type fault@function + .type fault, @function .ent fault fault: li v0, -EFAULT jr ra diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index f09546ee2cdc..17732f876eff 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -358,7 +358,7 @@ LEAF(_restore_msa_all_upper) .set reorder - .type fault@function + .type fault, @function .ent fault fault: li v0, -EFAULT # failure jr ra diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 2d23c834ba96..a56317444bda 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -595,3 +595,4 @@ EXPORT(sys_call_table) PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 + PTR sys_copy_file_range /* 4360 */ diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index deac63315d0e..2b2dc14610d0 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -433,4 +433,5 @@ EXPORT(sys_call_table) PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 + PTR sys_copy_file_range /* 5320 */ .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 5a69eb48d0a8..2bf5c8593d91 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -423,4 +423,5 @@ EXPORT(sysn32_call_table) PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 + PTR sys_copy_file_range .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index e4b6d7c97822..c5b759e584c7 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -578,4 +578,5 @@ EXPORT(sys32_call_table) PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 + PTR sys_copy_file_range /* 4360 */ .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 569a7d5242dd..5fdaf8bdcd2e 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -782,6 +782,7 @@ static inline void prefill_possible_map(void) {} void __init setup_arch(char **cmdline_p) { cpu_probe(); + mips_cm_probe(); prom_init(); setup_early_fdc_console(); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index bafcb7ad5c85..bf14da9f3e33 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -663,7 +663,7 @@ static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) return -1; } -static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) +static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode) { if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { int rd = (opcode & MM_RS) >> 16; @@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) asmlinkage void do_ov(struct pt_regs *regs) { enum ctx_state prev_state; - siginfo_t info; + siginfo_t info = { + .si_signo = SIGFPE, + .si_code = FPE_INTOVF, + .si_addr = (void __user *)regs->cp0_epc, + }; prev_state = exception_enter(); die_if_kernel("Integer overflow", regs); - info.si_code = FPE_INTOVF; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); exception_exit(prev_state); } @@ -874,7 +874,7 @@ out: void do_trap_or_bp(struct pt_regs *regs, unsigned int code, const char *str) { - siginfo_t info; + siginfo_t info = { 0 }; char b[40]; #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP @@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, else info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; - info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; @@ -1119,11 +1118,12 @@ no_r2_instr: if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; - if (unlikely(get_user(mmop[0], epc) < 0)) + if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) status = SIGSEGV; - if (unlikely(get_user(mmop[1], epc) < 0)) + if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) status = SIGSEGV; - opcode = (mmop[0] << 16) | mmop[1]; + opcode = mmop[0]; + opcode = (opcode << 16) | mmop[1]; if (status < 0) status = simulate_rdhwr_mm(regs, opcode); @@ -1369,26 +1369,12 @@ asmlinkage void do_cpu(struct pt_regs *regs) if (unlikely(compute_return_epc(regs) < 0)) break; - if (get_isa16_mode(regs->cp0_epc)) { - unsigned short mmop[2] = { 0 }; - - if (unlikely(get_user(mmop[0], epc) < 0)) - status = SIGSEGV; - if (unlikely(get_user(mmop[1], epc) < 0)) - status = SIGSEGV; - opcode = (mmop[0] << 16) | mmop[1]; - - if (status < 0) - status = simulate_rdhwr_mm(regs, opcode); - } else { + if (!get_isa16_mode(regs->cp0_epc)) { if (unlikely(get_user(opcode, epc) < 0)) status = SIGSEGV; if (!cpu_has_llsc && status < 0) status = simulate_llsc(regs, opcode); - - if (status < 0) - status = simulate_rdhwr_normal(regs, opcode); } if (status < 0) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 8bc3977576e6..3110447ab1e9 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { void __user *uaddr = (void __user *)(long)reg->addr; - return copy_to_user(uaddr, vs, 16); + return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; } else { return -EINVAL; } @@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { void __user *uaddr = (void __user *)(long)reg->addr; - return copy_from_user(vs, uaddr, 16); + return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; } else { return -EINVAL; } diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 5c81fdd032c3..353037699512 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -146,7 +146,7 @@ unsigned long arch_mmap_rnd(void) { unsigned long rnd; - rnd = (unsigned long)get_random_int(); + rnd = get_random_long(); rnd <<= PAGE_SHIFT; if (TASK_IS_32BIT_ADDR) rnd &= 0xfffffful; @@ -174,7 +174,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) static inline unsigned long brk_rnd(void) { - unsigned long rnd = get_random_int(); + unsigned long rnd = get_random_long(); rnd = rnd << PAGE_SHIFT; /* 8MB for 32bit, 256MB for 64bit */ diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 3bd0597d9c3d..91dec32c77b7 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void) sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; - c->scache.sets = 64 << sets; + if (sets) + c->scache.sets = 64 << sets; line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; - c->scache.linesz = 2 << line_sz; + if (line_sz) + c->scache.linesz = 2 << line_sz; assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; @@ -176,13 +178,12 @@ static int __init mips_sc_probe_cm3(void) c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waybit = __ffs(c->scache.waysize); - c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; + if (c->scache.linesz) { + c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; + return 1; + } - return 1; -} - -void __weak platform_early_l2_init(void) -{ + return 0; } static inline int __init mips_sc_probe(void) @@ -194,12 +195,6 @@ static inline int __init mips_sc_probe(void) /* Mark as not present until probe completed */ c->scache.flags |= MIPS_CACHE_NOT_PRESENT; - /* - * Do we need some platform specific probing before - * we configure L2? - */ - platform_early_l2_init(); - if (mips_cm_revision() >= CM_REV_CM3) return mips_sc_probe_cm3(); diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 571148c5fd0b..dc2c5214809d 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -293,7 +293,6 @@ mips_pci_controller: console_config(); #endif /* Early detection of CMP support */ - mips_cm_probe(); mips_cpc_probe(); if (!register_cps_smp_ops()) @@ -304,10 +303,3 @@ mips_pci_controller: return; register_up_smp_ops(); } - -void platform_early_l2_init(void) -{ - /* L2 configuration lives in the CM3 */ - if (mips_cm_revision() >= CM_REV_CM3) - mips_cm_probe(); -} diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index a009ee458934..1ae932c2d78b 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -297,12 +297,12 @@ static int mt7620_pci_probe(struct platform_device *pdev) return PTR_ERR(rstpcie0); bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res); - if (!bridge_base) - return -ENOMEM; + if (IS_ERR(bridge_base)) + return PTR_ERR(bridge_base); pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res); - if (!pcie_base) - return -ENOMEM; + if (IS_ERR(pcie_base)) + return PTR_ERR(pcie_base); iomem_resource.start = 0; iomem_resource.end = ~0; diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h index f84ff12574b7..6d8276cd25ca 100644 --- a/arch/parisc/include/asm/floppy.h +++ b/arch/parisc/include/asm/floppy.h @@ -33,7 +33,7 @@ * floppy accesses go through the track buffer. */ #define _CROSS_64KB(a,s,vdma) \ -(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) +(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 35bdccbb2036..b75039f92116 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -361,8 +361,9 @@ #define __NR_membarrier (__NR_Linux + 343) #define __NR_userfaultfd (__NR_Linux + 344) #define __NR_mlock2 (__NR_Linux + 345) +#define __NR_copy_file_range (__NR_Linux + 346) -#define __NR_Linux_syscalls (__NR_mlock2 + 1) +#define __NR_Linux_syscalls (__NR_copy_file_range + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 9585c81f755f..ce0b2b4075c7 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { - long ret = 0; - /* Do the secure computing check first. */ secure_computing_strict(regs->gr[20]); if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) - ret = -1L; + tracehook_report_syscall_entry(regs)) { + /* + * Tracing decided this syscall should not happen or the + * debugger stored an invalid system call number. Skip + * the system call and the system call restart handling. + */ + regs->gr[20] = -1UL; + goto out; + } #ifdef CONFIG_64BIT if (!is_compat_task()) @@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[24] & 0xffffffff, regs->gr[23] & 0xffffffff); - return ret ? : regs->gr[20]; +out: + return regs->gr[20]; } void do_syscall_trace_exit(struct pt_regs *regs) diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 3fbd7252a4b2..fbafa0d0e2bf 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -343,7 +343,7 @@ tracesys_next: #endif comiclr,>>= __NR_Linux_syscalls, %r20, %r0 - b,n .Lsyscall_nosys + b,n .Ltracesys_nosys LDREGX %r20(%r19), %r19 @@ -359,6 +359,9 @@ tracesys_next: be 0(%sr7,%r19) ldo R%tracesys_exit(%r2),%r2 +.Ltracesys_nosys: + ldo -ENOSYS(%r0),%r28 /* set errno */ + /* Do *not* call this function on the gateway page, because it makes a direct call to syscall_trace. */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index d4ffcfbc9885..585d50fc75c0 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -441,6 +441,7 @@ ENTRY_SAME(membarrier) ENTRY_SAME(userfaultfd) ENTRY_SAME(mlock2) /* 345 */ + ENTRY_SAME(copy_file_range) .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index e4824fd04bb7..9faa18c4f3f7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -557,7 +557,7 @@ choice config PPC_4K_PAGES bool "4k page size" - select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S + select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64 config PPC_16K_PAGES bool "16k page size" @@ -566,7 +566,7 @@ config PPC_16K_PAGES config PPC_64K_PAGES bool "64k page size" depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64) - select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S + select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64 config PPC_256K_PAGES bool "256k page size" diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 8d1c41d28318..ac07a30a7934 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -281,6 +281,10 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE +extern void pmdp_huge_split_prepare(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + #define pmd_move_must_withdraw pmd_move_must_withdraw struct spinlock; static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index c5eb86f3d452..867c39b45df6 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -81,6 +81,7 @@ struct pci_dn; #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ #define EEH_PE_REMOVED (1 << 10) /* Removed permanently */ +#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */ struct eeh_pe { int type; /* PE type: PHB/Bus/Device */ diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index 8e86b48d0369..32e36b16773f 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -57,12 +57,14 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit, extern void hcall_tracepoint_regfunc(void); extern void hcall_tracepoint_unregfunc(void); -TRACE_EVENT_FN(hcall_entry, +TRACE_EVENT_FN_COND(hcall_entry, TP_PROTO(unsigned long opcode, unsigned long *args), TP_ARGS(opcode, args), + TP_CONDITION(cpu_online(raw_smp_processor_id())), + TP_STRUCT__entry( __field(unsigned long, opcode) ), @@ -76,13 +78,15 @@ TRACE_EVENT_FN(hcall_entry, hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc ); -TRACE_EVENT_FN(hcall_exit, +TRACE_EVENT_FN_COND(hcall_exit, TP_PROTO(unsigned long opcode, unsigned long retval, unsigned long *retbuf), TP_ARGS(opcode, retval, retbuf), + TP_CONDITION(cpu_online(raw_smp_processor_id())), + TP_STRUCT__entry( __field(unsigned long, opcode) __field(unsigned long, retval) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 938742135ee0..650cfb31ea3d 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata) eeh_pcid_put(dev); if (driver->err_handler && driver->err_handler->error_detected && - driver->err_handler->slot_reset && - driver->err_handler->resume) + driver->err_handler->slot_reset) return NULL; } @@ -564,6 +563,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) */ eeh_pe_state_mark(pe, EEH_PE_KEEP); if (bus) { + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); pci_lock_rescan_remove(); pcibios_remove_pci_devices(bus); pci_unlock_rescan_remove(); @@ -803,6 +803,7 @@ perm_error: * the their PCI config any more. */ if (frozen_bus) { + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); pci_lock_rescan_remove(); @@ -886,6 +887,7 @@ static void eeh_handle_special_event(void) continue; /* Notify all devices to be down */ + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); bus = eeh_pe_bus_get(phb_pe); eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index ca9e5371930e..98f81800e00c 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -928,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) bus = pe->phb->bus; } else if (pe->type & EEH_PE_BUS || pe->type & EEH_PE_DEVICE) { - if (pe->bus) { + if (pe->state & EEH_PE_PRI_BUS) { bus = pe->bus; goto out; } diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 05e804cdecaa..aec9a1b1d25b 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -109,8 +109,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp) * If the breakpoint is unregistered between a hw_breakpoint_handler() * and the single_step_dabr_instruction(), then cleanup the breakpoint * restoration variables to prevent dangling pointers. + * FIXME, this should not be using bp->ctx at all! Sayeth peterz. */ - if (bp->ctx && bp->ctx->task) + if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) bp->ctx->task->thread.last_hit_ubp = NULL; } diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index ac64ffdb52c8..08b7a40de5f8 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -340,7 +340,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) if (name[0] == '.') { if (strcmp(name+1, "TOC.") == 0) syms[i].st_shndx = SHN_ABS; - memmove(name, name+1, strlen(name)); + syms[i].st_name++; } } } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index dccc87e8fee5..3c5736e52a14 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1768,9 +1768,9 @@ static inline unsigned long brk_rnd(void) /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); + rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT))); else - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); + rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT))); return rnd << PAGE_SHIFT; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 6ee26de9a1de..25ae2c9913c3 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1370,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) std r6, VCPU_ACOP(r9) stw r7, VCPU_GUEST_PID(r9) std r8, VCPU_WORT(r9) + /* + * Restore various registers to 0, where non-zero values + * set by the guest could disrupt the host. + */ + li r0, 0 + mtspr SPRN_IAMR, r0 + mtspr SPRN_CIABR, r0 + mtspr SPRN_DAWRX, r0 + mtspr SPRN_TCSCR, r0 + mtspr SPRN_WORT, r0 + /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ + li r0, 1 + sldi r0, r0, 31 + mtspr SPRN_MMCRS, r0 8: /* Save and reset AMR and UAMOR before turning on the MMU */ diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c index 0762c1e08c88..edb09912f0c9 100644 --- a/arch/powerpc/mm/hash64_64k.c +++ b/arch/powerpc/mm/hash64_64k.c @@ -111,7 +111,13 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, */ if (!(old_pte & _PAGE_COMBO)) { flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); - old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND; + /* + * clear the old slot details from the old and new pte. + * On hash insert failure we use old pte value and we don't + * want slot information there if we have a insert failure. + */ + old_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND); + new_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND); goto htab_insert_hpte; } /* diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 49b152b0f926..eb2accdd76fd 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -78,9 +78,19 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, * base page size. This is because demote_segment won't flush * hash page table entries. */ - if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) + if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) { flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, ssize, flags); + /* + * With THP, we also clear the slot information with + * respect to all the 64K hash pte mapping the 16MB + * page. They are all invalid now. This make sure we + * don't find the slot valid when we fault with 4k + * base page size. + * + */ + memset(hpte_slot_array, 0, PTE_FRAG_SIZE); + } } valid = hpte_valid(hpte_slot_array, index); diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 7e6d0880813f..83a8be791e06 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -8,6 +8,8 @@ #include #include +#include + #ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC64 static inline int tlb1_next(void) @@ -60,6 +62,14 @@ static inline void book3e_tlb_lock(void) unsigned long tmp; int token = smp_processor_id() + 1; + /* + * Besides being unnecessary in the absence of SMT, this + * check prevents trying to do lbarx/stbcx. on e5500 which + * doesn't implement either feature. + */ + if (!cpu_has_feature(CPU_FTR_SMT)) + return; + asm volatile("1: lbarx %0, 0, %1;" "cmpwi %0, 0;" "bne 2f;" @@ -80,6 +90,9 @@ static inline void book3e_tlb_unlock(void) { struct paca_struct *paca = get_paca(); + if (!cpu_has_feature(CPU_FTR_SMT)) + return; + isync(); paca->tcd_ptr->lock = 0; } diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 0f0502e12f6c..4087705ba90f 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -59,9 +59,9 @@ unsigned long arch_mmap_rnd(void) /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) - rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT)); + rnd = get_random_long() % (1<<(23-PAGE_SHIFT)); else - rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT)); + rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT)); return rnd << PAGE_SHIFT; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 3124a20d0fab..cdf2123d46db 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -646,6 +646,28 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) return pgtable; } +void pmdp_huge_split_prepare(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); + + /* + * We can't mark the pmd none here, because that will cause a race + * against exit_mmap. We need to continue mark pmd TRANS HUGE, while + * we spilt, but at the same time we wan't rest of the ppc64 code + * not to insert hash pte on this, because we will be modifying + * the deposited pgtable in the caller of this function. Hence + * clear the _PAGE_USER so that we move the fault handling to + * higher level function and that will serialize against ptl. + * We need to flush existing hash pte entries here even though, + * the translation is still valid, because we will withdraw + * pgtable_t after this. + */ + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0); +} + + /* * set a new huge pmd. We should not be called for updating * an existing pmd entry. That should go via pmd_hugepage_update. @@ -663,10 +685,20 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } +/* + * We use this to invalidate a pmdp entry before switching from a + * hugepte to regular pmd entry. + */ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); + + /* + * This ensures that generic code that rely on IRQ disabling + * to prevent a parallel THP split work as expected. + */ + kick_all_cpus_sync(); } /* diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 5f152b95ca0c..87f47e55aab6 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) * PCI devices of the PE are expected to be removed prior * to PE reset. */ - if (!edev->pe->bus) + if (!(edev->pe->state & EEH_PE_PRI_BUS)) { edev->pe->bus = pci_find_bus(hose->global_number, pdn->busno); + if (edev->pe->bus) + edev->pe->state |= EEH_PE_PRI_BUS; + } /* * Enable EEH explicitly so that we will do EEH check diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 573ae1994097..f90dc04395bf 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3180,6 +3180,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose) static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .dma_dev_setup = pnv_pci_dma_dev_setup, + .dma_bus_setup = pnv_pci_dma_bus_setup, #ifdef CONFIG_PCI_MSI .setup_msi_irqs = pnv_setup_msi_irqs, .teardown_msi_irqs = pnv_teardown_msi_irqs, diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 2f55c86df703..b1ef84a6c9d1 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -599,6 +599,9 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages, u64 rpn = __pa(uaddr) >> tbl->it_page_shift; long i; + if (proto_tce & TCE_PCI_WRITE) + proto_tce |= TCE_PCI_READ; + for (i = 0; i < npages; i++) { unsigned long newtce = proto_tce | ((rpn + i) << tbl->it_page_shift); @@ -620,6 +623,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index, BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); + if (newtce & TCE_PCI_WRITE) + newtce |= TCE_PCI_READ; + oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)); *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE); *direction = iommu_tce_direction(oldtce); @@ -760,6 +766,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev) phb->dma_dev_setup(phb, pdev); } +void pnv_pci_dma_bus_setup(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + struct pnv_phb *phb = hose->private_data; + struct pnv_ioda_pe *pe; + + list_for_each_entry(pe, &phb->ioda.pe_list, list) { + if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) + continue; + + if (!pe->pbus) + continue; + + if (bus->number == ((pe->rid >> 8) & 0xFF)) { + pe->pbus = bus; + break; + } + } +} + void pnv_pci_shutdown(void) { struct pci_controller *hose; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 7f56313e8d72..00691a9b99af 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -242,6 +242,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev); +extern void pnv_pci_dma_bus_setup(struct pci_bus *bus); extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h index ea91ddfe54eb..629c90865a07 100644 --- a/arch/s390/include/asm/fpu/internal.h +++ b/arch/s390/include/asm/fpu/internal.h @@ -40,6 +40,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs) static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) { fpregs->pad = 0; + fpregs->fpc = fpu->fpc; if (MACHINE_HAS_VX) convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); else @@ -49,6 +50,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) { + fpu->fpc = fpregs->fpc; if (MACHINE_HAS_VX) convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); else diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h index 7aa799134a11..a52b6cca873d 100644 --- a/arch/s390/include/asm/livepatch.h +++ b/arch/s390/include/asm/livepatch.h @@ -37,7 +37,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) regs->psw.addr = ip; } #else -#error Live patching support is disabled; check CONFIG_LIVEPATCH +#error Include linux/livepatch.h, not asm/livepatch.h #endif #endif diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index fb1b93ea3e3f..e485817f7b1a 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -15,17 +15,25 @@ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + spin_lock_init(&mm->context.list_lock); + INIT_LIST_HEAD(&mm->context.pgtable_list); + INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.attach_count, 0); mm->context.flush_mm = 0; - mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; - mm->context.asce_bits |= _ASCE_TYPE_REGION3; #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.has_pgste = 0; mm->context.use_skey = 0; #endif - mm->context.asce_limit = STACK_TOP_MAX; + if (mm->context.asce_limit == 0) { + /* context created by exec, set asce limit to 4TB */ + mm->context.asce_bits = _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | _ASCE_TYPE_REGION3; + mm->context.asce_limit = STACK_TOP_MAX; + } else if (mm->context.asce_limit == (1UL << 31)) { + mm_inc_nr_pmds(mm); + } crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; } @@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev, static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { - if (oldmm->context.asce_limit < mm->context.asce_limit) - crst_table_downgrade(mm, oldmm->context.asce_limit); } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 7b7858f158b4..d7cc79fb6191 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - spin_lock_init(&mm->context.list_lock); - INIT_LIST_HEAD(&mm->context.pgtable_list); - INIT_LIST_HEAD(&mm->context.gmap_list); - return (pgd_t *) crst_table_alloc(mm); + unsigned long *table = crst_table_alloc(mm); + + if (!table) + return NULL; + if (mm->context.asce_limit == (1UL << 31)) { + /* Forking a compat process with 2 page table levels */ + if (!pgtable_pmd_page_ctor(virt_to_page(table))) { + crst_table_free(mm, table); + return NULL; + } + } + return (pgd_t *) table; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + if (mm->context.asce_limit == (1UL << 31)) + pgtable_pmd_page_dtor(virt_to_page(pgd)); + crst_table_free(mm, (unsigned long *) pgd); } -#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 66c94417c0ba..4af60374eba0 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs, /* Restore high gprs from signal stack */ if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high, - sizeof(&sregs_ext->gprs_high))) + sizeof(sregs_ext->gprs_high))) return -EFAULT; for (i = 0; i < NUM_GPRS; i++) *(__u32 *)®s->gprs[i] = gprs_high[i]; diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index c5febe84eba6..03c2b469c472 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -16,7 +16,7 @@ __HEAD ENTRY(startup_continue) - tm __LC_STFLE_FAC_LIST+6,0x80 # LPP available ? + tm __LC_STFLE_FAC_LIST+5,0x80 # LPP available ? jz 0f xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid mvi __LC_LPP,0x80 # and set LPP_MAGIC diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index cfcba2dd9bb5..0943b11a2f6e 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -260,12 +260,13 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { - unsigned long head; + unsigned long head, frame_size; struct stack_frame *head_sf; if (user_mode(regs)) return; + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); head = regs->gprs[15]; head_sf = (struct stack_frame *) head; @@ -273,8 +274,9 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, return; head = head_sf->back_chain; - head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack); + head = __store_trace(entry, head, + S390_lowcore.async_stack + frame_size - ASYNC_SIZE, + S390_lowcore.async_stack + frame_size); __store_trace(entry, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 5acba3cb7220..8f64ebd63767 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -59,26 +59,32 @@ static unsigned long save_context_stack(struct stack_trace *trace, } } -void save_stack_trace(struct stack_trace *trace) +static void __save_stack_trace(struct stack_trace *trace, unsigned long sp) { - register unsigned long sp asm ("15"); - unsigned long orig_sp, new_sp; + unsigned long new_sp, frame_size; - orig_sp = sp; - new_sp = save_context_stack(trace, orig_sp, - S390_lowcore.panic_stack - PAGE_SIZE, - S390_lowcore.panic_stack, 1); - if (new_sp != orig_sp) - return; + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + new_sp = save_context_stack(trace, sp, + S390_lowcore.panic_stack + frame_size - PAGE_SIZE, + S390_lowcore.panic_stack + frame_size, 1); new_sp = save_context_stack(trace, new_sp, - S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack, 1); - if (new_sp != orig_sp) - return; + S390_lowcore.async_stack + frame_size - ASYNC_SIZE, + S390_lowcore.async_stack + frame_size, 1); save_context_stack(trace, new_sp, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE, 1); } + +void save_stack_trace(struct stack_trace *trace) +{ + register unsigned long r15 asm ("15"); + unsigned long sp; + + sp = r15; + __save_stack_trace(trace, sp); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) @@ -86,6 +92,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) unsigned long sp, low, high; sp = tsk->thread.ksp; + if (tsk == current) { + /* Get current stack pointer. */ + asm volatile("la %0,0(15)" : "=a" (sp)); + } low = (unsigned long) task_stack_page(tsk); high = (unsigned long) task_pt_regs(tsk); save_context_stack(trace, sp, low, high, 0); @@ -93,3 +103,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + unsigned long sp; + + sp = kernel_stack_pointer(regs); + __save_stack_trace(trace, sp); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} +EXPORT_SYMBOL_GPL(save_stack_trace_regs); diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c index 21a5df99552b..dde7654f5c68 100644 --- a/arch/s390/kernel/trace.c +++ b/arch/s390/kernel/trace.c @@ -18,6 +18,9 @@ void trace_s390_diagnose_norecursion(int diag_nr) unsigned long flags; unsigned int *depth; + /* Avoid lockdep recursion. */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + return; local_irq_save(flags); depth = this_cpu_ptr(&diagnose_trace_depth); if (*depth == 0) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4af21c771f9b..03dfe9c667f4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2381,7 +2381,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) /* manually convert vector registers if necessary */ if (MACHINE_HAS_VX) { - convert_vx_to_fp(fprs, current->thread.fpu.vxrs); + convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, fprs, 128); } else { diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index fec59c067d0d..792f9c63fbca 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -93,15 +93,19 @@ static int __memcpy_real(void *dest, void *src, size_t count) */ int memcpy_real(void *dest, void *src, size_t count) { + int irqs_disabled, rc; unsigned long flags; - int rc; if (!count) return 0; - local_irq_save(flags); - __arch_local_irq_stnsm(0xfbUL); + flags = __arch_local_irq_stnsm(0xf8UL); + irqs_disabled = arch_irqs_disabled_flags(flags); + if (!irqs_disabled) + trace_hardirqs_off(); rc = __memcpy_real(dest, src, count); - local_irq_restore(flags); + if (!irqs_disabled) + trace_hardirqs_on(); + __arch_local_irq_ssm(flags); return rc; } diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c index fe0bfe370c45..1884e1759529 100644 --- a/arch/s390/oprofile/backtrace.c +++ b/arch/s390/oprofile/backtrace.c @@ -54,12 +54,13 @@ __show_trace(unsigned int *depth, unsigned long sp, void s390_backtrace(struct pt_regs * const regs, unsigned int depth) { - unsigned long head; + unsigned long head, frame_size; struct stack_frame* head_sf; if (user_mode(regs)) return; + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); head = regs->gprs[15]; head_sf = (struct stack_frame*)head; @@ -68,8 +69,9 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth) head = head_sf->back_chain; - head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack); + head = __show_trace(&depth, head, + S390_lowcore.async_stack + frame_size - ASYNC_SIZE, + S390_lowcore.async_stack + frame_size); __show_trace(&depth, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index eaee14637d93..8496a074bd0e 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -24,7 +24,13 @@ LDFLAGS := -m elf32_sparc export BITS := 32 UTS_MACHINE := sparc +# We are adding -Wa,-Av8 to KBUILD_CFLAGS to deal with a specs bug in some +# versions of gcc. Some gcc versions won't pass -Av8 to binutils when you +# give -mcpu=v8. This silently worked with older bintutils versions but +# does not any more. KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 +KBUILD_CFLAGS += -Wa,-Av8 + KBUILD_AFLAGS += -m32 -Wa,-Av8 else diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index 1c26d440d288..b6de8b10a55b 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -422,8 +422,9 @@ #define __NR_listen 354 #define __NR_setsockopt 355 #define __NR_mlock2 356 +#define __NR_copy_file_range 357 -#define NR_syscalls 357 +#define NR_syscalls 358 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 33c02b15f478..a83707c83be8 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -948,7 +948,24 @@ linux_syscall_trace: cmp %o0, 0 bne 3f mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ld [%sp + STACKFRAME_SZ + PT_G1], %g1 + sethi %hi(sys_call_table), %l7 + ld [%sp + STACKFRAME_SZ + PT_I0], %i0 + or %l7, %lo(sys_call_table), %l7 + ld [%sp + STACKFRAME_SZ + PT_I1], %i1 + ld [%sp + STACKFRAME_SZ + PT_I2], %i2 + ld [%sp + STACKFRAME_SZ + PT_I3], %i3 + ld [%sp + STACKFRAME_SZ + PT_I4], %i4 + ld [%sp + STACKFRAME_SZ + PT_I5], %i5 + cmp %g1, NR_syscalls + bgeu 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 mov %i0, %o0 + ld [%l7 + %l4], %l7 mov %i1, %o1 mov %i2, %o2 mov %i3, %o3 diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index afbaba52d2f1..d127130bf424 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog) mov %o1, %o4 mov HV_FAST_MACH_SET_WATCHDOG, %o5 ta HV_FAST_TRAP + brnz,a,pn %o4, 0f stx %o1, [%o4] - retl +0: retl nop ENDPROC(sun4v_mach_set_watchdog) diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index d88beff47bab..39aaec173f66 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) unsigned char fenab; int err; - flush_user_windows(); + synchronize_user_stack(); if (get_thread_wsaved() || (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || (!__access_ok(ucp, sizeof(*ucp)))) diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c index a92d5d2c46a3..9e034f29dcc5 100644 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ b/arch/sparc/kernel/sparc_ksyms_64.c @@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf); EXPORT_SYMBOL(sun4v_niagara_setperf); EXPORT_SYMBOL(sun4v_niagara2_getperf); EXPORT_SYMBOL(sun4v_niagara2_setperf); +EXPORT_SYMBOL(sun4v_mach_set_watchdog); /* from hweight.S */ EXPORT_SYMBOL(__arch_hweight8); diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index c690c8e16a96..b489e9759518 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -264,7 +264,7 @@ static unsigned long mmap_rnd(void) unsigned long rnd = 0UL; if (current->flags & PF_RANDOMIZE) { - unsigned long val = get_random_int(); + unsigned long val = get_random_long(); if (test_thread_flag(TIF_32BIT)) rnd = (val % (1UL << (23UL-PAGE_SHIFT))); else diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index bb0008927598..c4a1b5c40e4e 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -158,7 +158,25 @@ linux_syscall_trace32: add %sp, PTREGS_OFF, %o0 brnz,pn %o0, 3f mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 + sethi %hi(sys_call_table32), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 + or %l7, %lo(sys_call_table32), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 + ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 + ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 + ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 + ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 + + cmp %g1, NR_syscalls + bgeu,pn %xcc, 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 srl %i0, 0, %o0 + lduw [%l7 + %l4], %l7 srl %i4, 0, %o4 srl %i1, 0, %o1 srl %i2, 0, %o2 @@ -170,7 +188,25 @@ linux_syscall_trace: add %sp, PTREGS_OFF, %o0 brnz,pn %o0, 3f mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 + sethi %hi(sys_call_table64), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 + or %l7, %lo(sys_call_table64), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 + ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 + ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 + ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 + ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 + + cmp %g1, NR_syscalls + bgeu,pn %xcc, 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 mov %i0, %o0 + lduw [%l7 + %l4], %l7 mov %i1, %o1 mov %i2, %o2 mov %i3, %o3 diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index e663b6c78de2..6c3dd6c52f8b 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -88,4 +88,4 @@ sys_call_table: /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf /*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen -/*355*/ .long sys_setsockopt, sys_mlock2 +/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 1557121f4cdc..12b524cfcfa0 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -89,7 +89,7 @@ sys_call_table32: /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf /*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen - .word compat_sys_setsockopt, sys_mlock2 + .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range #endif /* CONFIG_COMPAT */ @@ -170,4 +170,4 @@ sys_call_table: /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf /*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen - .word sys_setsockopt, sys_mlock2 + .word sys_setsockopt, sys_mlock2, sys_copy_file_range diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c index 9bdf67a092a5..b60a9f8cda75 100644 --- a/arch/um/kernel/reboot.c +++ b/arch/um/kernel/reboot.c @@ -12,6 +12,7 @@ #include void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); static void kill_off_processes(void) { diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index fc8be0e3a4ff..57acbd67d85d 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs) struct ksignal ksig; int handled_sig = 0; - if (get_signal(&ksig)) { + while (get_signal(&ksig)) { handled_sig = 1; /* Whee! Actually deliver the signal. */ handle_signal(&ksig, regs); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9af2e6338400..c46662f64c39 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -475,6 +475,7 @@ config X86_UV depends on X86_64 depends on X86_EXTENDED_PLATFORM depends on NUMA + depends on EFI depends on X86_X2APIC depends on PCI ---help--- @@ -777,8 +778,8 @@ config HPET_TIMER HPET is the next generation timer replacing legacy 8254s. The HPET provides a stable time base on SMP systems, unlike the TSC, but it is more expensive to access, - as it is off-chip. You can find the HPET spec at - . + as it is off-chip. The interface used is documented + in the HPET spec, revision 1. You can safely choose Y here. However, HPET will only be activated if the platform and the BIOS support this feature. diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 77d8c5112900..bb3e376d0f33 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -294,6 +294,7 @@ sysenter_past_esp: pushl $__USER_DS /* pt_regs->ss */ pushl %ebp /* pt_regs->sp (stashed in bp) */ pushfl /* pt_regs->flags (except IF = 0) */ + ASM_CLAC /* Clear AC after saving FLAGS */ orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ pushl $__USER_CS /* pt_regs->cs */ pushl $0 /* pt_regs->ip = 0 (placeholder) */ diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index ff1c6d61f332..3c990eeee40b 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat) * Interrupts are off on entry. */ PARAVIRT_ADJUST_EXCEPTION_FRAME + ASM_CLAC /* Do this early to minimize exposure */ SWAPGS /* diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index 19c099afa861..e795f5274217 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -41,7 +41,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) regs->ip = ip; } #else -#error Live patching support is disabled; check CONFIG_LIVEPATCH +#error Include linux/livepatch.h, not asm/livepatch.h #endif #endif /* _ASM_X86_LIVEPATCH_H */ diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 46873fbd44e1..d08eacd298c2 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); +extern bool mp_should_keep_irq(struct device *dev); + struct pci_raw_ops { int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 2d5a50cb61a2..20c11d1aa4cc 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -766,7 +766,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. */ -#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) +#define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8)) #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) extern unsigned long KSTK_ESP(struct task_struct *task); diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index f5dcb5204dcd..3fe0eac59462 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) switch (n) { case 1: + __uaccess_begin(); __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); + __uaccess_end(); return ret; case 2: + __uaccess_begin(); __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); + __uaccess_end(); return ret; case 4: + __uaccess_begin(); __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); + __uaccess_end(); return ret; case 8: + __uaccess_begin(); __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret, 8); + __uaccess_end(); return ret; } } @@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) switch (n) { case 1: + __uaccess_begin(); __get_user_size(*(u8 *)to, from, 1, ret, 1); + __uaccess_end(); return ret; case 2: + __uaccess_begin(); __get_user_size(*(u16 *)to, from, 2, ret, 2); + __uaccess_end(); return ret; case 4: + __uaccess_begin(); __get_user_size(*(u32 *)to, from, 4, ret, 4); + __uaccess_end(); return ret; } } @@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) switch (n) { case 1: + __uaccess_begin(); __get_user_size(*(u8 *)to, from, 1, ret, 1); + __uaccess_end(); return ret; case 2: + __uaccess_begin(); __get_user_size(*(u16 *)to, from, 2, ret, 2); + __uaccess_end(); return ret; case 4: + __uaccess_begin(); __get_user_size(*(u32 *)to, from, 4, ret, 4); + __uaccess_end(); return ret; } } @@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, switch (n) { case 1: + __uaccess_begin(); __get_user_size(*(u8 *)to, from, 1, ret, 1); + __uaccess_end(); return ret; case 2: + __uaccess_begin(); __get_user_size(*(u16 *)to, from, 2, ret, 2); + __uaccess_end(); return ret; case 4: + __uaccess_begin(); __get_user_size(*(u32 *)to, from, 4, ret, 4); + __uaccess_end(); return ret; } } diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index 968d57dd54c9..f320ee32d5a1 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h @@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, { if (xen_pci_frontend && xen_pci_frontend->enable_msi) return xen_pci_frontend->enable_msi(dev, vectors); - return -ENODEV; + return -ENOSYS; } static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) { @@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, { if (xen_pci_frontend && xen_pci_frontend->enable_msix) return xen_pci_frontend->enable_msix(dev, vectors, nvec); - return -ENODEV; + return -ENOSYS; } static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev) { diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index d1daead5fcdd..adb3eaf8fe2a 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -16,6 +16,7 @@ #include #include +#include #include "../../realmode/rm/wakeup.h" #include "sleep.h" @@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void) saved_magic = 0x123456789abcdef0L; #endif /* CONFIG_64BIT */ + /* + * Pause/unpause graph tracing around do_suspend_lowlevel as it has + * inconsistent call/return info after it jumps to the wakeup vector. + */ + pause_graph_tracing(); do_suspend_lowlevel(); + unpause_graph_tracing(); return 0; } diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c index 49742746a6c9..8836fc9fa84b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c @@ -323,6 +323,8 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu) return 0; fail: + if (amd_uncore_nb) + *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; kfree(uncore_nb); return -ENOMEM; } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1505587d06e9..b9b09fec173b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, u16 sel; la = seg_base(ctxt, addr.seg) + addr.ea; - *linear = la; *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: + *linear = la; if (is_noncanonical_address(la)) goto bad; @@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, goto bad; break; default: + *linear = la = (u32)la; usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, addr.seg); if (!usable) @@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, if (size > *max_size) goto bad; } - la &= (u32)-1; break; } if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 95a955de5964..1e7a49bfc94f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3721,13 +3721,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { + bool uses_nx = context->nx || context->base_role.smep_andnot_wp; + /* * Passing "true" to the last argument is okay; it adds a check * on bit 8 of the SPTEs which KVM doesn't use anyway. */ __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, boot_cpu_data.x86_phys_bits, - context->shadow_root_level, context->nx, + context->shadow_root_level, uses_nx, guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), true); } diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 6c9fed957cce..2ce4f05e81d3 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, return ret; kvm_vcpu_mark_page_dirty(vcpu, table_gfn); - walker->ptes[level] = pte; + walker->ptes[level - 1] = pte; } return 0; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e2951b6edbbc..9bd8f44baded 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -596,6 +596,8 @@ struct vcpu_vmx { /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; + + u64 current_tsc_ratio; }; enum segment_cache_field { @@ -1811,6 +1813,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, return; } break; + case MSR_IA32_PEBS_ENABLE: + /* PEBS needs a quiescent period after being disabled (to write + * a record). Disabling PEBS through VMX MSR swapping doesn't + * provide that period, so a CPU could write host's record into + * guest's memory. + */ + wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } for (i = 0; i < m->nr; ++i) @@ -1848,26 +1857,31 @@ static void reload_tss(void) static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { - u64 guest_efer; - u64 ignore_bits; + u64 guest_efer = vmx->vcpu.arch.efer; + u64 ignore_bits = 0; - guest_efer = vmx->vcpu.arch.efer; + if (!enable_ept) { + /* + * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing + * host CPUID is more efficient than testing guest CPUID + * or CR4. Host SMEP is anyway a requirement for guest SMEP. + */ + if (boot_cpu_has(X86_FEATURE_SMEP)) + guest_efer |= EFER_NX; + else if (!(guest_efer & EFER_NX)) + ignore_bits |= EFER_NX; + } /* - * NX is emulated; LMA and LME handled by hardware; SCE meaningless - * outside long mode + * LMA and LME handled by hardware; SCE meaningless outside long mode. */ - ignore_bits = EFER_NX | EFER_SCE; + ignore_bits |= EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif - guest_efer &= ~ignore_bits; - guest_efer |= host_efer & ignore_bits; - vmx->guest_msrs[efer_offset].data = guest_efer; - vmx->guest_msrs[efer_offset].mask = ~ignore_bits; clear_atomic_switch_msr(vmx, MSR_EFER); @@ -1878,16 +1892,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) */ if (cpu_has_load_ia32_efer || (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { - guest_efer = vmx->vcpu.arch.efer; if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; - } + } else { + guest_efer &= ~ignore_bits; + guest_efer |= host_efer & ignore_bits; - return true; + vmx->guest_msrs[efer_offset].data = guest_efer; + vmx->guest_msrs[efer_offset].mask = ~ignore_bits; + + return true; + } } static unsigned long segment_base(u16 selector) @@ -2127,14 +2146,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ - /* Setup TSC multiplier */ - if (cpu_has_vmx_tsc_scaling()) - vmcs_write64(TSC_MULTIPLIER, - vcpu->arch.tsc_scaling_ratio); - vmx->loaded_vmcs->cpu = cpu; } + /* Setup TSC multiplier */ + if (kvm_has_tsc_control && + vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { + vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; + vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); + } + vmx_vcpu_pi_load(vcpu, cpu); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4244c2baf57d..eaf6ee8c28b8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6618,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * KVM_DEBUGREG_WONT_EXIT again. */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { - int i; - WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); kvm_x86_ops->sync_dirty_debug_regs(vcpu); - for (i = 0; i < KVM_NR_DB_REGS; i++) - vcpu->arch.eff_db[i] = vcpu->arch.db[i]; + kvm_update_dr0123(vcpu); + kvm_update_dr6(vcpu); + kvm_update_dr7(vcpu); + vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; } /* diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 982ce34f4a9b..27f89c79a44b 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -232,17 +232,31 @@ ENDPROC(copy_user_enhanced_fast_string) /* * copy_user_nocache - Uncached memory copy with exception handling - * This will force destination/source out of cache for more performance. + * This will force destination out of cache for more performance. + * + * Note: Cached memory copy is used when destination or size is not + * naturally aligned. That is: + * - Require 8-byte alignment when size is 8 bytes or larger. + * - Require 4-byte alignment when size is 4 bytes. */ ENTRY(__copy_user_nocache) ASM_STAC + + /* If size is less than 8 bytes, go to 4-byte copy */ cmpl $8,%edx - jb 20f /* less then 8 bytes, go to byte copy loop */ + jb .L_4b_nocache_copy_entry + + /* If destination is not 8-byte aligned, "cache" copy to align it */ ALIGN_DESTINATION + + /* Set 4x8-byte copy count and remainder */ movl %edx,%ecx andl $63,%edx shrl $6,%ecx - jz 17f + jz .L_8b_nocache_copy_entry /* jump if count is 0 */ + + /* Perform 4x8-byte nocache loop-copy */ +.L_4x8b_nocache_copy_loop: 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 @@ -262,60 +276,106 @@ ENTRY(__copy_user_nocache) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi decl %ecx - jnz 1b -17: movl %edx,%ecx + jnz .L_4x8b_nocache_copy_loop + + /* Set 8-byte copy count and remainder */ +.L_8b_nocache_copy_entry: + movl %edx,%ecx andl $7,%edx shrl $3,%ecx - jz 20f -18: movq (%rsi),%r8 -19: movnti %r8,(%rdi) + jz .L_4b_nocache_copy_entry /* jump if count is 0 */ + + /* Perform 8-byte nocache loop-copy */ +.L_8b_nocache_copy_loop: +20: movq (%rsi),%r8 +21: movnti %r8,(%rdi) leaq 8(%rsi),%rsi leaq 8(%rdi),%rdi decl %ecx - jnz 18b -20: andl %edx,%edx - jz 23f + jnz .L_8b_nocache_copy_loop + + /* If no byte left, we're done */ +.L_4b_nocache_copy_entry: + andl %edx,%edx + jz .L_finish_copy + + /* If destination is not 4-byte aligned, go to byte copy: */ + movl %edi,%ecx + andl $3,%ecx + jnz .L_1b_cache_copy_entry + + /* Set 4-byte copy count (1 or 0) and remainder */ movl %edx,%ecx -21: movb (%rsi),%al -22: movb %al,(%rdi) + andl $3,%edx + shrl $2,%ecx + jz .L_1b_cache_copy_entry /* jump if count is 0 */ + + /* Perform 4-byte nocache copy: */ +30: movl (%rsi),%r8d +31: movnti %r8d,(%rdi) + leaq 4(%rsi),%rsi + leaq 4(%rdi),%rdi + + /* If no bytes left, we're done: */ + andl %edx,%edx + jz .L_finish_copy + + /* Perform byte "cache" loop-copy for the remainder */ +.L_1b_cache_copy_entry: + movl %edx,%ecx +.L_1b_cache_copy_loop: +40: movb (%rsi),%al +41: movb %al,(%rdi) incq %rsi incq %rdi decl %ecx - jnz 21b -23: xorl %eax,%eax + jnz .L_1b_cache_copy_loop + + /* Finished copying; fence the prior stores */ +.L_finish_copy: + xorl %eax,%eax ASM_CLAC sfence ret .section .fixup,"ax" -30: shll $6,%ecx +.L_fixup_4x8b_copy: + shll $6,%ecx addl %ecx,%edx - jmp 60f -40: lea (%rdx,%rcx,8),%rdx - jmp 60f -50: movl %ecx,%edx -60: sfence + jmp .L_fixup_handle_tail +.L_fixup_8b_copy: + lea (%rdx,%rcx,8),%rdx + jmp .L_fixup_handle_tail +.L_fixup_4b_copy: + lea (%rdx,%rcx,4),%rdx + jmp .L_fixup_handle_tail +.L_fixup_1b_copy: + movl %ecx,%edx +.L_fixup_handle_tail: + sfence jmp copy_user_handle_tail .previous - _ASM_EXTABLE(1b,30b) - _ASM_EXTABLE(2b,30b) - _ASM_EXTABLE(3b,30b) - _ASM_EXTABLE(4b,30b) - _ASM_EXTABLE(5b,30b) - _ASM_EXTABLE(6b,30b) - _ASM_EXTABLE(7b,30b) - _ASM_EXTABLE(8b,30b) - _ASM_EXTABLE(9b,30b) - _ASM_EXTABLE(10b,30b) - _ASM_EXTABLE(11b,30b) - _ASM_EXTABLE(12b,30b) - _ASM_EXTABLE(13b,30b) - _ASM_EXTABLE(14b,30b) - _ASM_EXTABLE(15b,30b) - _ASM_EXTABLE(16b,30b) - _ASM_EXTABLE(18b,40b) - _ASM_EXTABLE(19b,40b) - _ASM_EXTABLE(21b,50b) - _ASM_EXTABLE(22b,50b) + _ASM_EXTABLE(1b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(2b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(3b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(4b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(5b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(6b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(7b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(8b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(9b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(10b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(11b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(12b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(13b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(14b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(15b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(16b,.L_fixup_4x8b_copy) + _ASM_EXTABLE(20b,.L_fixup_8b_copy) + _ASM_EXTABLE(21b,.L_fixup_8b_copy) + _ASM_EXTABLE(30b,.L_fixup_4b_copy) + _ASM_EXTABLE(31b,.L_fixup_4b_copy) + _ASM_EXTABLE(40b,.L_fixup_1b_copy) + _ASM_EXTABLE(41b,.L_fixup_1b_copy) ENDPROC(__copy_user_nocache) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index eef44d9a3f77..e830c71a1323 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -287,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address) if (!pmd_k) return -1; + if (pmd_huge(*pmd_k)) + return 0; + pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) return -1; @@ -360,8 +363,6 @@ void vmalloc_sync_all(void) * 64-bit: * * Handle a fault on the vmalloc area - * - * This assumes no large pages in there. */ static noinline int vmalloc_fault(unsigned long address) { @@ -403,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address) if (pud_none(*pud_ref)) return -1; - if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) + if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) BUG(); + if (pud_huge(*pud)) + return 0; + pmd = pmd_offset(pud, address); pmd_ref = pmd_offset(pud_ref, address); if (pmd_none(*pmd_ref)) return -1; - if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) + if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) BUG(); + if (pmd_huge(*pmd)) + return 0; + pte_ref = pte_offset_kernel(pmd_ref, address); if (!pte_present(*pte_ref)) return -1; diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 6d5eb5900372..d8a798d8bf50 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -102,7 +102,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, return 0; } - page = pte_page(pte); if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { @@ -115,6 +114,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, return 0; } VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); get_page(page); put_dev_pagemap(pgmap); SetPageReferenced(page); diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 96bd1e2bffaf..72bb52f93c3d 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -71,12 +71,12 @@ unsigned long arch_mmap_rnd(void) if (mmap_is_ia32()) #ifdef CONFIG_COMPAT - rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1); + rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); #else - rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); + rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); #endif else - rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); + rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b2fd67da1701..ef05755a1900 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, break; } - if (regno > nr_registers) { + if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index c3b3f653ed0c..d04f8094bc23 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -469,7 +469,7 @@ static void __init numa_clear_kernel_node_hotplug(void) { int i, nid; nodemask_t numa_kernel_nodes = NODE_MASK_NONE; - unsigned long start, end; + phys_addr_t start, end; struct memblock_region *r; /* diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 2440814b0069..9cf96d82147a 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address) phys_addr_t slow_virt_to_phys(void *__virt_addr) { unsigned long virt_addr = (unsigned long)__virt_addr; - unsigned long phys_addr, offset; + phys_addr_t phys_addr; + unsigned long offset; enum pg_level level; pte_t *pte; pte = lookup_address(virt_addr, &level); BUG_ON(!pte); + /* + * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t + * before being left-shifted PAGE_SHIFT bits -- this trick is to + * make 32-PAE kernel work correctly. + */ switch (level) { case PG_LEVEL_1G: - phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; offset = virt_addr & ~PUD_PAGE_MASK; break; case PG_LEVEL_2M: - phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; offset = virt_addr & ~PMD_PAGE_MASK; break; default: - phys_addr = pte_pfn(*pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; offset = virt_addr & ~PAGE_MASK; } diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 2879efc73a96..d34b5118b4e8 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -711,28 +711,22 @@ int pcibios_add_device(struct pci_dev *dev) return 0; } -int pcibios_alloc_irq(struct pci_dev *dev) -{ - /* - * If the PCI device was already claimed by core code and has - * MSI enabled, probing of the pcibios IRQ will overwrite - * dev->irq. So bail out if MSI is already enabled. - */ - if (pci_dev_msi_enabled(dev)) - return -EBUSY; - - return pcibios_enable_irq(dev); -} - -void pcibios_free_irq(struct pci_dev *dev) -{ - if (pcibios_disable_irq) - pcibios_disable_irq(dev); -} - int pcibios_enable_device(struct pci_dev *dev, int mask) { - return pci_enable_resources(dev, mask); + int err; + + if ((err = pci_enable_resources(dev, mask)) < 0) + return err; + + if (!pci_dev_msi_enabled(dev)) + return pcibios_enable_irq(dev); + return 0; +} + +void pcibios_disable_device (struct pci_dev *dev) +{ + if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) + pcibios_disable_irq(dev); } int pci_ext_cfg_avail(void) diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 0d24e7c10145..8b93e634af84 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -215,7 +215,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) int polarity; int ret; - if (pci_has_managed_irq(dev)) + if (dev->irq_managed && dev->irq > 0) return 0; switch (intel_mid_identify_cpu()) { @@ -256,13 +256,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) static void intel_mid_pci_irq_disable(struct pci_dev *dev) { - if (pci_has_managed_irq(dev)) { + if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && + dev->irq > 0) { mp_unmap_irq(dev->irq); dev->irq_managed = 0; - /* - * Don't reset dev->irq here, otherwise - * intel_mid_pci_irq_enable() will fail on next call. - */ } } diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 32e70343e6fd..9bd115484745 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev) struct pci_dev *temp_dev; int irq; - if (pci_has_managed_irq(dev)) + if (dev->irq_managed && dev->irq > 0) return 0; irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, @@ -1230,7 +1230,8 @@ static int pirq_enable_irq(struct pci_dev *dev) } dev = temp_dev; if (irq >= 0) { - pci_set_managed_irq(dev, irq); + dev->irq_managed = 1; + dev->irq = irq; dev_info(&dev->dev, "PCI->APIC IRQ transform: " "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); return 0; @@ -1256,10 +1257,24 @@ static int pirq_enable_irq(struct pci_dev *dev) return 0; } +bool mp_should_keep_irq(struct device *dev) +{ + if (dev->power.is_prepared) + return true; +#ifdef CONFIG_PM + if (dev->power.runtime_status == RPM_SUSPENDING) + return true; +#endif + + return false; +} + static void pirq_disable_irq(struct pci_dev *dev) { - if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) { + if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && + dev->irq_managed && dev->irq) { mp_unmap_irq(dev->irq); - pci_reset_managed_irq(dev); + dev->irq = 0; + dev->irq_managed = 0; } } diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index ff31ab464213..beac4dfdade6 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 0; error: - dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); + if (ret == -ENOSYS) + dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); + else if (ret) + dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret); free: kfree(v); return ret; diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index c61b6c332e97..bfadcd0f4944 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c @@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev) end = (unsigned long)__end_rodata - 1; /* - * Setup a locked IMR around the physical extent of the kernel + * Setup an unlocked IMR around the physical extent of the kernel * from the beginning of the .text secton to the end of the * .rodata section as one physically contiguous block. * * We don't round up @size since it is already PAGE_SIZE aligned. * See vmlinux.lds.S for details. */ - ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); if (ret < 0) { pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", size / 1024, start, end); diff --git a/arch/x86/um/os-Linux/task_size.c b/arch/x86/um/os-Linux/task_size.c index 8502ad30e61b..5adb6a2fd117 100644 --- a/arch/x86/um/os-Linux/task_size.c +++ b/arch/x86/um/os-Linux/task_size.c @@ -109,7 +109,7 @@ unsigned long os_get_top_address(void) exit(1); } - printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT); + printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT); printf("Locating the top of the address space ... "); fflush(stdout); @@ -134,7 +134,7 @@ out: exit(1); } top <<= UM_KERN_PAGE_SHIFT; - printf("0x%x\n", top); + printf("0x%lx\n", top); return top; } diff --git a/block/Kconfig b/block/Kconfig index 161491d0a879..0363cd731320 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -88,6 +88,19 @@ config BLK_DEV_INTEGRITY T10/SCSI Data Integrity Field or the T13/ATA External Path Protection. If in doubt, say N. +config BLK_DEV_DAX + bool "Block device DAX support" + depends on FS_DAX + depends on BROKEN + help + When DAX support is available (CONFIG_FS_DAX) raw block + devices can also support direct userspace access to the + storage capacity via MMAP(2) similar to a file on a + DAX-enabled filesystem. However, the DAX I/O-path disables + some standard I/O-statistics, and the MMAP(2) path has some + operational differences due to bypassing the page + cache. If in doubt, say N. + config BLK_DEV_THROTTLING bool "Block layer bio throttling support" depends on BLK_CGROUP=y diff --git a/block/bio.c b/block/bio.c index dbabd48b1934..cf7591551b17 100644 --- a/block/bio.c +++ b/block/bio.c @@ -874,7 +874,7 @@ int submit_bio_wait(int rw, struct bio *bio) bio->bi_private = &ret; bio->bi_end_io = submit_bio_wait_endio; submit_bio(rw, bio); - wait_for_completion(&ret.event); + wait_for_completion_io(&ret.event); return ret.error; } @@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio) if (!bio_flagged(bio, BIO_NULL_MAPPED)) { /* * if we're in a workqueue, the request is orphaned, so - * don't copy into a random user address space, just free. + * don't copy into a random user address space, just free + * and return -EINTR so user space doesn't expect any data. */ - if (current->mm && bio_data_dir(bio) == READ) + if (!current->mm) + ret = -EINTR; + else if (bio_data_dir(bio) == READ) ret = bio_copy_to_iter(bio, bmd->iter); if (bmd->is_our_pages) bio_free_pages(bio); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5a37188b559f..66e6f1aae02e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -788,6 +788,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, { struct gendisk *disk; struct blkcg_gq *blkg; + struct module *owner; unsigned int major, minor; int key_len, part, ret; char *body; @@ -804,7 +805,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, if (!disk) return -ENODEV; if (part) { + owner = disk->fops->owner; put_disk(disk); + module_put(owner); return -ENODEV; } @@ -820,7 +823,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ret = PTR_ERR(blkg); rcu_read_unlock(); spin_unlock_irq(disk->queue->queue_lock); + owner = disk->fops->owner; put_disk(disk); + module_put(owner); /* * If queue was bypassing, we should retry. Do so after a * short msleep(). It isn't strictly necessary but queue @@ -851,9 +856,13 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep); void blkg_conf_finish(struct blkg_conf_ctx *ctx) __releases(ctx->disk->queue->queue_lock) __releases(rcu) { + struct module *owner; + spin_unlock_irq(ctx->disk->queue->queue_lock); rcu_read_unlock(); + owner = ctx->disk->fops->owner; put_disk(ctx->disk); + module_put(owner); } EXPORT_SYMBOL_GPL(blkg_conf_finish); diff --git a/block/blk-core.c b/block/blk-core.c index ab51685988c2..b83d29755b5a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2455,14 +2455,16 @@ struct request *blk_peek_request(struct request_queue *q) rq = NULL; break; - } else if (ret == BLKPREP_KILL) { + } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { + int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO; + rq->cmd_flags |= REQ_QUIET; /* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); - __blk_end_request_all(rq, -EIO); + __blk_end_request_all(rq, err); } else { printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); break; diff --git a/block/blk-map.c b/block/blk-map.c index f565e11f465a..a54f0543b956 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio) return ret; } +static int __blk_rq_map_user_iov(struct request *rq, + struct rq_map_data *map_data, struct iov_iter *iter, + gfp_t gfp_mask, bool copy) +{ + struct request_queue *q = rq->q; + struct bio *bio, *orig_bio; + int ret; + + if (copy) + bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); + else + bio = bio_map_user_iov(q, iter, gfp_mask); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + if (map_data && map_data->null_mapped) + bio_set_flag(bio, BIO_NULL_MAPPED); + + iov_iter_advance(iter, bio->bi_iter.bi_size); + if (map_data) + map_data->offset += bio->bi_iter.bi_size; + + orig_bio = bio; + blk_queue_bounce(q, &bio); + + /* + * We link the bounce buffer in and could have to traverse it + * later so we have to get a ref to prevent it from being freed + */ + bio_get(bio); + + ret = blk_rq_append_bio(q, rq, bio); + if (ret) { + bio_endio(bio); + __blk_rq_unmap_user(orig_bio); + bio_put(bio); + return ret; + } + + return 0; +} + /** * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted @@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) { - struct bio *bio; - int unaligned = 0; - struct iov_iter i; struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; + bool copy = (q->dma_pad_mask & iter->count) || map_data; + struct bio *bio = NULL; + struct iov_iter i; + int ret; if (!iter || !iter->count) return -EINVAL; @@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, */ if ((uaddr & queue_dma_alignment(q)) || iovec_gap_to_prv(q, &prv, &iov)) - unaligned = 1; + copy = true; prv.iov_base = iov.iov_base; prv.iov_len = iov.iov_len; } - if (unaligned || (q->dma_pad_mask & iter->count) || map_data) - bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); - else - bio = bio_map_user_iov(q, iter, gfp_mask); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - if (map_data && map_data->null_mapped) - bio_set_flag(bio, BIO_NULL_MAPPED); - - if (bio->bi_iter.bi_size != iter->count) { - /* - * Grab an extra reference to this bio, as bio_unmap_user() - * expects to be able to drop it twice as it happens on the - * normal IO completion path - */ - bio_get(bio); - bio_endio(bio); - __blk_rq_unmap_user(bio); - return -EINVAL; - } + i = *iter; + do { + ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); + if (ret) + goto unmap_rq; + if (!bio) + bio = rq->bio; + } while (iov_iter_count(&i)); if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - - blk_queue_bounce(q, &bio); - bio_get(bio); - blk_rq_bio_prep(q, rq, bio); return 0; + +unmap_rq: + __blk_rq_unmap_user(bio); + rq->bio = NULL; + return -EINVAL; } EXPORT_SYMBOL(blk_rq_map_user_iov); diff --git a/block/blk-merge.c b/block/blk-merge.c index 888a7fec81f7..261353166dcf 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -304,7 +304,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { struct bio_vec end_bv = { NULL }, nxt_bv; - struct bvec_iter iter; if (!blk_queue_cluster(q)) return 0; @@ -316,11 +315,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, if (!bio_has_data(bio)) return 1; - bio_for_each_segment(end_bv, bio, iter) - if (end_bv.bv_len == iter.bi_size) - break; - - nxt_bv = bio_iovec(nxt); + bio_get_last_bvec(bio, &end_bv); + bio_get_first_bvec(nxt, &nxt_bv); if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) return 0; diff --git a/block/blk-mq.c b/block/blk-mq.c index 4c0622fae413..56c0a726b619 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -599,8 +599,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, * If a request wasn't started before the queue was * marked dying, kill it here or it'll go unnoticed. */ - if (unlikely(blk_queue_dying(rq->q))) - blk_mq_complete_request(rq, -EIO); + if (unlikely(blk_queue_dying(rq->q))) { + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + } return; } diff --git a/block/blk-settings.c b/block/blk-settings.c index dd4973583978..c7bb666aafd1 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim) lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->virt_boundary_mask = 0; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; - lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = - BLK_SAFE_MAX_SECTORS; + lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; + lim->max_dev_sectors = 0; lim->chunk_sectors = 0; lim->max_write_same_sectors = 0; lim->max_discard_sectors = 0; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e140cc487ce1..dd93763057ce 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -147,10 +147,9 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) { - unsigned long long val; - val = q->limits.max_hw_discard_sectors << 9; - return sprintf(page, "%llu\n", val); + return sprintf(page, "%llu\n", + (unsigned long long)q->limits.max_hw_discard_sectors << 9); } static ssize_t queue_discard_max_show(struct request_queue *q, char *page) diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index a753df2b3fc2..d0dd7882d8c7 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -39,7 +39,6 @@ struct deadline_data { */ struct request *next_rq[2]; unsigned int batching; /* number of sequential requests made */ - sector_t last_sector; /* head position */ unsigned int starved; /* times reads have starved writes */ /* @@ -210,8 +209,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq) dd->next_rq[WRITE] = NULL; dd->next_rq[data_dir] = deadline_latter_request(rq); - dd->last_sector = rq_end_sector(rq); - /* * take it off the sort and fifo list, move * to dispatch queue diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 38c1aa89d3a0..28556fce4267 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -65,18 +65,10 @@ struct skcipher_async_req { struct skcipher_async_rsgl first_sgl; struct list_head list; struct scatterlist *tsg; - char iv[]; + atomic_t *inflight; + struct skcipher_request req; }; -#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ - crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))) - -#define GET_REQ_SIZE(ctx) \ - crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)) - -#define GET_IV_SIZE(ctx) \ - crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req)) - #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ sizeof(struct scatterlist) - 1) @@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) static void skcipher_async_cb(struct crypto_async_request *req, int err) { - struct sock *sk = req->data; - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_async_req *sreq = GET_SREQ(req, ctx); + struct skcipher_async_req *sreq = req->data; struct kiocb *iocb = sreq->iocb; - atomic_dec(&ctx->inflight); + atomic_dec(sreq->inflight); skcipher_free_async_sgls(sreq); - kfree(req); + kzfree(sreq); iocb->ki_complete(iocb, err, err); } @@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct skcipher_ctx *ctx = ask->private; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; unsigned ivsize = crypto_skcipher_ivsize(tfm); struct skcipher_sg_list *sgl; struct af_alg_control con = {}; @@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct skcipher_ctx *ctx = ask->private; + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; struct skcipher_sg_list *sgl; struct scatterlist *sg; struct skcipher_async_req *sreq; struct skcipher_request *req; struct skcipher_async_rsgl *last_rsgl = NULL; - unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); - unsigned int reqlen = sizeof(struct skcipher_async_req) + - GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); + unsigned int txbufs = 0, len = 0, tx_nents; + unsigned int reqsize = crypto_skcipher_reqsize(tfm); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); int err = -ENOMEM; bool mark = false; + char *iv; + + sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL); + if (unlikely(!sreq)) + goto out; + + req = &sreq->req; + iv = (char *)(req + 1) + reqsize; + sreq->iocb = msg->msg_iocb; + INIT_LIST_HEAD(&sreq->list); + sreq->inflight = &ctx->inflight; lock_sock(sk); - req = kmalloc(reqlen, GFP_KERNEL); - if (unlikely(!req)) - goto unlock; - - sreq = GET_SREQ(req, ctx); - sreq->iocb = msg->msg_iocb; - memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl)); - INIT_LIST_HEAD(&sreq->list); + tx_nents = skcipher_all_sg_nents(ctx); sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); - if (unlikely(!sreq->tsg)) { - kfree(req); + if (unlikely(!sreq->tsg)) goto unlock; - } sg_init_table(sreq->tsg, tx_nents); - memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); - skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - skcipher_async_cb, sk); + memcpy(iv, ctx->iv, ivsize); + skcipher_request_set_tfm(req, tfm); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, + skcipher_async_cb, sreq); while (iov_iter_count(&msg->msg_iter)) { struct skcipher_async_rsgl *rsgl; @@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, sg_mark_end(sreq->tsg + txbufs - 1); skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, - len, sreq->iv); + len, iv); err = ctx->enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); if (err == -EINPROGRESS) { atomic_inc(&ctx->inflight); err = -EIOCBQUEUED; + sreq = NULL; goto unlock; } free: skcipher_free_async_sgls(sreq); - kfree(req); unlock: skcipher_wmem_wakeup(sk); release_sock(sk); + kzfree(sreq); +out: return err; } @@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct skcipher_ctx *ctx = ask->private; - unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( - &ctx->req)); + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; + unsigned bs = crypto_skcipher_blocksize(tfm); struct skcipher_sg_list *sgl; struct scatterlist *sg; int err = -EAGAIN; @@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ask->private = ctx; skcipher_request_set_tfm(&ctx->req, skcipher); - skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); sk->sk_destruct = skcipher_sock_destruct; diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 237f3795cfaa..43fe85f20d57 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (link->dump == NULL) return -EINVAL; + down_read(&crypto_alg_sem); list_for_each_entry(alg, &crypto_alg_list, cra_list) dump_alloc += CRYPTO_REPORT_MAXSIZE; @@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) .done = link->done, .min_dump_alloc = dump_alloc, }; - return netlink_dump_start(crypto_nlsk, skb, nlh, &c); + err = netlink_dump_start(crypto_nlsk, skb, nlh, &c); } + up_read(&crypto_alg_sem); + + return err; } err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 305218539df2..d48cbed342c1 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -269,8 +269,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, */ if (ACPI_SUCCESS(status) && possible_method_call && (node->type == ACPI_TYPE_METHOD)) { - if (GET_CURRENT_ARG_TYPE(walk_state->arg_types) == - ARGP_SUPERNAME) { + if (walk_state->opcode == AML_UNLOAD_OP) { /* * acpi_ps_get_next_namestring has increased the AML pointer, * so we need to restore the saved AML pointer for method call. @@ -697,7 +696,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state * * PARAMETERS: walk_state - Current state * parser_state - Current parser state object - * arg_type - The parser argument type (ARGP_*) + * arg_type - The argument type (AML_*_ARG) * return_arg - Where the next arg is returned * * RETURN: Status, and an op object containing the next argument. @@ -817,9 +816,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_NO_MEMORY); } - /* super_name allows argument to be a method call */ + /* To support super_name arg of Unload */ - if (arg_type == ARGP_SUPERNAME) { + if (walk_state->opcode == AML_UNLOAD_OP) { status = acpi_ps_get_next_namepath(walk_state, parser_state, arg, diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index ad6d8c6b777e..35947ac87644 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, nfit_mem->bdw = NULL; } -static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, +static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) { u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; struct nfit_memdev *nfit_memdev; struct nfit_flush *nfit_flush; - struct nfit_dcr *nfit_dcr; struct nfit_bdw *nfit_bdw; struct nfit_idt *nfit_idt; u16 idt_idx, range_index; - list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { - if (nfit_dcr->dcr->region_index != dcr) - continue; - nfit_mem->dcr = nfit_dcr->dcr; - break; - } - - if (!nfit_mem->dcr) { - dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n", - spa->range_index, __to_nfit_memdev(nfit_mem) - ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR"); - return -ENODEV; - } - - /* - * We've found enough to create an nvdimm, optionally - * find an associated BDW - */ - list_add(&nfit_mem->list, &acpi_desc->dimms); - list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { if (nfit_bdw->bdw->region_index != dcr) continue; @@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, } if (!nfit_mem->bdw) - return 0; + return; nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); if (!nfit_mem->spa_bdw) - return 0; + return; range_index = nfit_mem->spa_bdw->range_index; list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { @@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, } break; } - - return 0; } static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, @@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, *found; struct nfit_memdev *nfit_memdev; int type = nfit_spa_type(spa); - u16 dcr; switch (type) { case NFIT_SPA_DCR: @@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, } list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - int rc; + struct nfit_dcr *nfit_dcr; + u32 device_handle; + u16 dcr; if (nfit_memdev->memdev->range_index != spa->range_index) continue; found = NULL; dcr = nfit_memdev->memdev->region_index; + device_handle = nfit_memdev->memdev->device_handle; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) - if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { + if (__to_nfit_memdev(nfit_mem)->device_handle + == device_handle) { found = nfit_mem; break; } @@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, if (!nfit_mem) return -ENOMEM; INIT_LIST_HEAD(&nfit_mem->list); + list_add(&nfit_mem->list, &acpi_desc->dimms); + } + + list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { + if (nfit_dcr->dcr->region_index != dcr) + continue; + /* + * Record the control region for the dimm. For + * the ACPI 6.1 case, where there are separate + * control regions for the pmem vs blk + * interfaces, be sure to record the extended + * blk details. + */ + if (!nfit_mem->dcr) + nfit_mem->dcr = nfit_dcr->dcr; + else if (nfit_mem->dcr->windows == 0 + && nfit_dcr->dcr->windows) + nfit_mem->dcr = nfit_dcr->dcr; + break; + } + + if (dcr && !nfit_mem->dcr) { + dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", + spa->range_index, dcr); + return -ENODEV; } if (type == NFIT_SPA_DCR) { @@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, nfit_mem->idt_dcr = nfit_idt->idt; break; } + nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); } else { /* * A single dimm may belong to multiple SPA-PM @@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, */ nfit_mem->memdev_pmem = nfit_memdev->memdev; } - - if (found) - continue; - - rc = nfit_mem_add(acpi_desc, nfit_mem, spa); - if (rc) - return rc; } return 0; @@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc, case 1: /* ARS unsupported, but we should never get here */ return 0; - case 2: - return -EINVAL; - case 3: + case 6: /* ARS is in progress */ msleep(1000); break; @@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc, } static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, - struct nd_cmd_ars_status *cmd) + struct nd_cmd_ars_status *cmd, u32 size) { int rc; while (1) { rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd, - sizeof(*cmd)); + size); if (rc || cmd->status & 0xffff) return -ENXIO; @@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, case 2: /* No ARS performed for the current boot */ return 0; + case 3: + /* TODO: error list overflow support */ default: return -ENXIO; } @@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc, struct nd_cmd_ars_start *ars_start = NULL; struct nd_cmd_ars_cap *ars_cap = NULL; u64 start, len, cur, remaining; + u32 ars_status_size; int rc; ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL); @@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc, start = ndr_desc->res->start; len = ndr_desc->res->end - ndr_desc->res->start + 1; + /* + * If ARS is unimplemented, unsupported, or if the 'Persistent Memory + * Scrub' flag in extended status is not set, skip this but continue + * initialization + */ rc = ars_get_cap(nd_desc, ars_cap, start, len); + if (rc == -ENOTTY) { + dev_dbg(acpi_desc->dev, + "Address Range Scrub is not implemented, won't create an error list\n"); + rc = 0; + goto out; + } if (rc) goto out; - /* - * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in - * extended status is not set, skip this but continue initialization - */ if ((ars_cap->status & 0xffff) || !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) { dev_warn(acpi_desc->dev, @@ -1610,14 +1617,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc, * Check if a full-range ARS has been run. If so, use those results * without having to start a new ARS. */ - ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status), - GFP_KERNEL); + ars_status_size = ars_cap->max_ars_out; + ars_status = kzalloc(ars_status_size, GFP_KERNEL); if (!ars_status) { rc = -ENOMEM; goto out; } - rc = ars_get_status(nd_desc, ars_status); + rc = ars_get_status(nd_desc, ars_status, ars_status_size); if (rc) goto out; @@ -1647,7 +1654,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc, if (rc) goto out; - rc = ars_get_status(nd_desc, ars_status); + rc = ars_get_status(nd_desc, ars_status, ars_status_size); if (rc) goto out; diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index d30184c7f3bc..c8e169e46673 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) return 0; } - if (pci_has_managed_irq(dev)) + if (dev->irq_managed && dev->irq > 0) return 0; entry = acpi_pci_irq_lookup(dev, pin); @@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev) kfree(entry); return rc; } - pci_set_managed_irq(dev, rc); + dev->irq = rc; + dev->irq_managed = 1; if (link) snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); @@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev) u8 pin; pin = dev->pin; - if (!pin || !pci_has_managed_irq(dev)) + if (!pin || !dev->irq_managed || dev->irq <= 0) return; + /* Keep IOAPIC pin configuration when suspending */ + if (dev->dev.power.is_prepared) + return; +#ifdef CONFIG_PM + if (dev->dev.power.runtime_status == RPM_SUSPENDING) + return; +#endif + entry = acpi_pci_irq_lookup(dev, pin); if (!entry) return; @@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); if (gsi >= 0) { acpi_unregister_gsi(gsi); - pci_reset_managed_irq(dev); + dev->irq_managed = 0; } } diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index fa2863567eed..ededa909df2f 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -4,7 +4,6 @@ * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh * Copyright (C) 2002 Dominik Brodowski - * Copyright (c) 2015, The Linux Foundation. All rights reserved. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) * enabled system. */ +#define ACPI_MAX_IRQS 256 #define ACPI_MAX_ISA_IRQ 16 #define PIRQ_PENALTY_PCI_AVAILABLE (0) @@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) #define PIRQ_PENALTY_ISA_USED (16*16*16*16*16) #define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16) -static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = { +static int acpi_irq_penalty[ACPI_MAX_IRQS] = { PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */ @@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = { PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */ PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */ PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */ + /* >IRQ15 */ }; -struct irq_penalty_info { - int irq; - int penalty; - struct list_head node; -}; - -static LIST_HEAD(acpi_irq_penalty_list); - -static int acpi_irq_get_penalty(int irq) -{ - struct irq_penalty_info *irq_info; - - if (irq < ACPI_MAX_ISA_IRQ) - return acpi_irq_isa_penalty[irq]; - - list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) { - if (irq_info->irq == irq) - return irq_info->penalty; - } - - return 0; -} - -static int acpi_irq_set_penalty(int irq, int new_penalty) -{ - struct irq_penalty_info *irq_info; - - /* see if this is a ISA IRQ */ - if (irq < ACPI_MAX_ISA_IRQ) { - acpi_irq_isa_penalty[irq] = new_penalty; - return 0; - } - - /* next, try to locate from the dynamic list */ - list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) { - if (irq_info->irq == irq) { - irq_info->penalty = new_penalty; - return 0; - } - } - - /* nope, let's allocate a slot for this IRQ */ - irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL); - if (!irq_info) - return -ENOMEM; - - irq_info->irq = irq; - irq_info->penalty = new_penalty; - list_add_tail(&irq_info->node, &acpi_irq_penalty_list); - - return 0; -} - -static void acpi_irq_add_penalty(int irq, int penalty) -{ - int curpen = acpi_irq_get_penalty(irq); - - acpi_irq_set_penalty(irq, curpen + penalty); -} - int __init acpi_irq_penalty_init(void) { struct acpi_pci_link *link; @@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void) link->irq.possible_count; for (i = 0; i < link->irq.possible_count; i++) { - if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) { - int irqpos = link->irq.possible[i]; - - acpi_irq_add_penalty(irqpos, penalty); - } + if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) + acpi_irq_penalty[link->irq. + possible[i]] += + penalty; } } else if (link->irq.active) { - acpi_irq_add_penalty(link->irq.active, - PIRQ_PENALTY_PCI_POSSIBLE); + acpi_irq_penalty[link->irq.active] += + PIRQ_PENALTY_PCI_POSSIBLE; } } @@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) * the use of IRQs 9, 10, 11, and >15. */ for (i = (link->irq.possible_count - 1); i >= 0; i--) { - if (acpi_irq_get_penalty(irq) > - acpi_irq_get_penalty(link->irq.possible[i])) + if (acpi_irq_penalty[irq] > + acpi_irq_penalty[link->irq.possible[i]]) irq = link->irq.possible[i]; } } - if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) { + if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " "Try pci=noacpi or acpi=off\n", acpi_device_name(link->device), @@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) acpi_device_bid(link->device)); return -ENODEV; } else { - acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING); - + acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING; printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", acpi_device_name(link->device), acpi_device_bid(link->device), link->irq.active); @@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device) } /* - * modify penalty from cmdline + * modify acpi_irq_penalty[] from cmdline */ static int __init acpi_irq_penalty_update(char *str, int used) { @@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used) if (irq < 0) continue; + if (irq >= ARRAY_SIZE(acpi_irq_penalty)) + continue; + if (used) - acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED); + acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; else - acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE); + acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE; if (retval != 2) /* no next number */ break; @@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used) */ void acpi_penalize_isa_irq(int irq, int active) { - if (irq >= 0) - acpi_irq_add_penalty(irq, active ? - PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); + if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { + if (active) + acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; + else + acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; + } } bool acpi_isa_irq_available(int irq) { - return irq >= 0 && - (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); + return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || + acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); } /* @@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq) */ void acpi_penalize_sci_irq(int irq, int trigger, int polarity) { - int penalty; - - if (irq < 0) - return; - - if (trigger != ACPI_MADT_TRIGGER_LEVEL || - polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) - penalty = PIRQ_PENALTY_ISA_ALWAYS; - else - penalty = PIRQ_PENALTY_PCI_USING; - - acpi_irq_add_penalty(irq, penalty); + if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { + if (trigger != ACPI_MADT_TRIGGER_LEVEL || + polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) + acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS; + else + acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; + } } /* diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a39e85f9efa9..7d00b7a015ea 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc, if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; - ptr += sizeof(void *); + ptr += sizeof(cookie); list_for_each_entry(w, &proc->delivered_death, entry) { struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 594fcabd22cd..146dc0b8ec61 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ @@ -347,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -1305,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host) {} #endif +#ifdef CONFIG_ARM64 +/* + * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. + * Workaround is to make sure all pending IRQs are served before leaving + * handler. + */ +static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct ahci_host_priv *hpriv; + unsigned int rc = 0; + void __iomem *mmio; + u32 irq_stat, irq_masked; + unsigned int handled = 1; + + VPRINTK("ENTER\n"); + hpriv = host->private_data; + mmio = hpriv->mmio; + irq_stat = readl(mmio + HOST_IRQ_STAT); + if (!irq_stat) + return IRQ_NONE; + + do { + irq_masked = irq_stat & hpriv->port_map; + spin_lock(&host->lock); + rc = ahci_handle_port_intr(host, irq_masked); + if (!rc) + handled = 0; + writel(irq_stat, mmio + HOST_IRQ_STAT); + irq_stat = readl(mmio + HOST_IRQ_STAT); + spin_unlock(&host->lock); + } while (irq_stat); + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} +#endif + /* * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer * to single msi. @@ -1540,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (ahci_broken_devslp(pdev)) hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; +#ifdef CONFIG_ARM64 + if (pdev->vendor == 0x177d && pdev->device == 0xa01c) + hpriv->irq_handler = ahci_thunderx_irq_handler; +#endif + /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index a4faa438889c..167ba7e3b92e 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -240,8 +240,7 @@ enum { error-handling stage) */ AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ - AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as - Edge Triggered */ + #ifdef CONFIG_PCI_MSI AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ @@ -250,6 +249,7 @@ enum { AHCI_HFLAG_MULTI_MSI = 0, AHCI_HFLAG_MULTI_MSIX = 0, #endif + AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ /* ap->flags bits */ @@ -360,6 +360,7 @@ struct ahci_host_priv { * be overridden anytime before the host is activated. */ void (*start_engine)(struct ata_port *ap); + irqreturn_t (*irq_handler)(int irq, void *dev_instance); }; #ifdef CONFIG_PCI_MSI @@ -423,6 +424,7 @@ int ahci_reset_em(struct ata_host *host); void ahci_print_info(struct ata_host *host, const char *scc_s); int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht); void ahci_error_handler(struct ata_port *ap); +u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked); static inline void __iomem *__ahci_port_base(struct ata_host *host, unsigned int port_no) diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c index b36cae2fd04b..e87bcec0fd7c 100644 --- a/drivers/ata/ahci_brcmstb.c +++ b/drivers/ata/ahci_brcmstb.c @@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(hpriv)) return PTR_ERR(hpriv); hpriv->plat_data = priv; + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; brcm_sata_alpm_init(hpriv); diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index e2c6d9e0c5ac..8e3f7faf00d3 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -548,6 +548,88 @@ softreset_retry: return rc; } +/** + * xgene_ahci_handle_broken_edge_irq - Handle the broken irq. + * @ata_host: Host that recieved the irq + * @irq_masked: HOST_IRQ_STAT value + * + * For hardware with broken edge trigger latch + * the HOST_IRQ_STAT register misses the edge interrupt + * when clearing of HOST_IRQ_STAT register and hardware + * reporting the PORT_IRQ_STAT register at the + * same clock cycle. + * As such, the algorithm below outlines the workaround. + * + * 1. Read HOST_IRQ_STAT register and save the state. + * 2. Clear the HOST_IRQ_STAT register. + * 3. Read back the HOST_IRQ_STAT register. + * 4. If HOST_IRQ_STAT register equals to zero, then + * traverse the rest of port's PORT_IRQ_STAT register + * to check if an interrupt is triggered at that point else + * go to step 6. + * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero + * then update the state of HOST_IRQ_STAT saved in step 1. + * 6. Handle port interrupts. + * 7. Exit + */ +static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host, + u32 irq_masked) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *port_mmio; + int i; + + if (!readl(hpriv->mmio + HOST_IRQ_STAT)) { + for (i = 0; i < host->n_ports; i++) { + if (irq_masked & (1 << i)) + continue; + + port_mmio = ahci_port_base(host->ports[i]); + if (readl(port_mmio + PORT_IRQ_STAT)) + irq_masked |= (1 << i); + } + } + + return ahci_handle_port_intr(host, irq_masked); +} + +static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct ahci_host_priv *hpriv; + unsigned int rc = 0; + void __iomem *mmio; + u32 irq_stat, irq_masked; + + VPRINTK("ENTER\n"); + + hpriv = host->private_data; + mmio = hpriv->mmio; + + /* sigh. 0xffffffff is a valid return from h/w */ + irq_stat = readl(mmio + HOST_IRQ_STAT); + if (!irq_stat) + return IRQ_NONE; + + irq_masked = irq_stat & hpriv->port_map; + + spin_lock(&host->lock); + + /* + * HOST_IRQ_STAT behaves as edge triggered latch meaning that + * it should be cleared before all the port events are cleared. + */ + writel(irq_stat, mmio + HOST_IRQ_STAT); + + rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked); + + spin_unlock(&host->lock); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(rc); +} + static struct ata_port_operations xgene_ahci_v1_ops = { .inherits = &ahci_ops, .host_stop = xgene_ahci_host_stop, @@ -779,7 +861,8 @@ skip_clk_phy: hpriv->flags = AHCI_HFLAG_NO_NCQ; break; case XGENE_AHCI_V2: - hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ; + hpriv->flags |= AHCI_HFLAG_YES_FBS; + hpriv->irq_handler = xgene_ahci_irq_intr; break; default: break; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index d61740e78d6d..85ea5142a095 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev, const char *buf, size_t size); static ssize_t ahci_show_em_supported(struct device *dev, struct device_attribute *attr, char *buf); +static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance); static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); @@ -496,8 +497,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) } } - /* fabricate port_map from cap.nr_ports */ - if (!port_map) { + /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */ + if (!port_map && vers < 0x10300) { port_map = (1 << ahci_nr_ports(cap)) - 1; dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); @@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) if (!hpriv->start_engine) hpriv->start_engine = ahci_start_engine; + + if (!hpriv->irq_handler) + hpriv->irq_handler = ahci_single_level_irq_intr; } EXPORT_SYMBOL_GPL(ahci_save_initial_config); @@ -593,8 +597,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine); int ahci_stop_engine(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_host_priv *hpriv = ap->host->private_data; u32 tmp; + /* + * On some controllers, stopping a port's DMA engine while the port + * is in ALPM state (partial or slumber) results in failures on + * subsequent DMA engine starts. For those controllers, put the + * port back in active state before stopping its DMA engine. + */ + if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) && + (ap->link.lpm_policy > ATA_LPM_MAX_POWER) && + ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) { + dev_err(ap->host->dev, "Failed to wake up port before engine stop\n"); + return -EIO; + } + tmp = readl(port_mmio + PORT_CMD); /* check if the HBA is idle */ @@ -689,6 +707,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, void __iomem *port_mmio = ahci_port_base(ap); if (policy != ATA_LPM_MAX_POWER) { + /* wakeup flag only applies to the max power policy */ + hints &= ~ATA_LPM_WAKE_ONLY; + /* * Disable interrupts on Phy Ready. This keeps us from * getting woken up due to spurious phy ready @@ -704,7 +725,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, u32 cmd = readl(port_mmio + PORT_CMD); if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { - cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); + if (!(hints & ATA_LPM_WAKE_ONLY)) + cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); cmd |= PORT_CMD_ICC_ACTIVE; writel(cmd, port_mmio + PORT_CMD); @@ -712,6 +734,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, /* wait 10ms to be sure we've come out of LPM state */ ata_msleep(ap, 10); + + if (hints & ATA_LPM_WAKE_ONLY) + return 0; } else { cmd |= PORT_CMD_ALPE; if (policy == ATA_LPM_MIN_POWER) @@ -1143,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap, /* mark esata ports */ tmp = readl(port_mmio + PORT_CMD); - if ((tmp & PORT_CMD_HPCP) || - ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))) + if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ap->pflags |= ATA_PFLAG_EXTERNAL; } @@ -1825,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) return IRQ_HANDLED; } -static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) +u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) { unsigned int i, handled = 0; @@ -1851,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) return handled; } - -static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance) -{ - struct ata_host *host = dev_instance; - struct ahci_host_priv *hpriv; - unsigned int rc = 0; - void __iomem *mmio; - u32 irq_stat, irq_masked; - - VPRINTK("ENTER\n"); - - hpriv = host->private_data; - mmio = hpriv->mmio; - - /* sigh. 0xffffffff is a valid return from h/w */ - irq_stat = readl(mmio + HOST_IRQ_STAT); - if (!irq_stat) - return IRQ_NONE; - - irq_masked = irq_stat & hpriv->port_map; - - spin_lock(&host->lock); - - /* - * HOST_IRQ_STAT behaves as edge triggered latch meaning that - * it should be cleared before all the port events are cleared. - */ - writel(irq_stat, mmio + HOST_IRQ_STAT); - - rc = ahci_handle_port_intr(host, irq_masked); - - spin_unlock(&host->lock); - - VPRINTK("EXIT\n"); - - return IRQ_RETVAL(rc); -} +EXPORT_SYMBOL_GPL(ahci_handle_port_intr); static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) { @@ -2514,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht) int irq = hpriv->irq; int rc; - if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) + if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) { + if (hpriv->irq_handler) + dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \ + and custom irq handler implemented\n"); + rc = ahci_host_activate_multi_irqs(host, sht); - else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ) - rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr, - IRQF_SHARED, sht); - else - rc = ata_host_activate(host, irq, ahci_single_level_irq_intr, + } else { + rc = ata_host_activate(host, irq, hpriv->irq_handler, IRQF_SHARED, sht); + } + + return rc; } EXPORT_SYMBOL_GPL(ahci_host_activate); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index cbb74719d2c1..55e257c268dd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, + { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 7e959f90c020..e417e1a1d02c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap) int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, int cmd, void __user *arg) { - int val = -EINVAL, rc = -EINVAL; + unsigned long val; + int rc = -EINVAL; unsigned long flags; switch (cmd) { - case ATA_IOC_GET_IO32: + case HDIO_GET_32BIT: spin_lock_irqsave(ap->lock, flags); val = ata_ioc32(ap); spin_unlock_irqrestore(ap->lock, flags); - if (copy_to_user(arg, &val, 1)) - return -EFAULT; - return 0; + return put_user(val, (unsigned long __user *)arg); - case ATA_IOC_SET_IO32: + case HDIO_SET_32BIT: val = (unsigned long) arg; rc = 0; spin_lock_irqsave(ap->lock, flags); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index cdf6215a9a22..051b6158d1b7 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) { struct ata_port *ap = qc->ap; - unsigned long flags; if (ap->ops->error_handler) { if (in_wq) { - spin_lock_irqsave(ap->lock, flags); - /* EH might have kicked in while host lock is * released. */ @@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) } else ata_port_freeze(ap); } - - spin_unlock_irqrestore(ap->lock, flags); } else { if (likely(!(qc->err_mask & AC_ERR_HSM))) ata_qc_complete(qc); @@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) } } else { if (in_wq) { - spin_lock_irqsave(ap->lock, flags); ata_sff_irq_on(ap); ata_qc_complete(qc); - spin_unlock_irqrestore(ap->lock, flags); } else ata_qc_complete(qc); } @@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, { struct ata_link *link = qc->dev->link; struct ata_eh_info *ehi = &link->eh_info; - unsigned long flags = 0; int poll_next; + lockdep_assert_held(ap->lock); + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); /* Make sure ata_sff_qc_issue() does not throw things @@ -1112,14 +1106,6 @@ fsm_start: } } - /* Send the CDB (atapi) or the first data block (ata pio out). - * During the state transition, interrupt handler shouldn't - * be invoked before the data transfer is complete and - * hsm_task_state is changed. Hence, the following locking. - */ - if (in_wq) - spin_lock_irqsave(ap->lock, flags); - if (qc->tf.protocol == ATA_PROT_PIO) { /* PIO data out protocol. * send first data block. @@ -1135,9 +1121,6 @@ fsm_start: /* send CDB */ atapi_send_cdb(ap, qc); - if (in_wq) - spin_unlock_irqrestore(ap->lock, flags); - /* if polling, ata_sff_pio_task() handles the rest. * otherwise, interrupt handler takes over from here. */ @@ -1296,7 +1279,8 @@ fsm_start: break; default: poll_next = 0; - BUG(); + WARN(true, "ata%d: SFF host state machine in invalid state %d", + ap->print_id, ap->hsm_task_state); } return poll_next; @@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work) u8 status; int poll_next; + spin_lock_irq(ap->lock); + BUG_ON(ap->sff_pio_task_link == NULL); /* qc can be NULL if timeout occurred */ qc = ata_qc_from_tag(ap, link->active_tag); if (!qc) { ap->sff_pio_task_link = NULL; - return; + goto out_unlock; } fsm_start: @@ -1381,11 +1367,14 @@ fsm_start: */ status = ata_sff_busy_wait(ap, ATA_BUSY, 5); if (status & ATA_BUSY) { + spin_unlock_irq(ap->lock); ata_msleep(ap, 2); + spin_lock_irq(ap->lock); + status = ata_sff_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); - return; + goto out_unlock; } } @@ -1402,6 +1391,8 @@ fsm_start: */ if (poll_next) goto fsm_start; +out_unlock: + spin_unlock_irq(ap->lock); } /** diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index 12fe0f3bb7e9..c8b6a780a290 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -32,6 +32,8 @@ #include #include +#include + #define DRV_NAME "pata-rb532-cf" #define DRV_VERSION "0.1.0" #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" @@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) int gpio; struct resource *res; struct ata_host *ah; + struct cf_device *pdata; struct rb532_cf_info *info; int ret; @@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) return -ENOENT; } - gpio = irq_to_gpio(irq); + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "no platform data specified\n"); + return -EINVAL; + } + + gpio = pdata->gpio_pin; if (gpio < 0) { dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); return -ENOENT; diff --git a/drivers/base/component.c b/drivers/base/component.c index 89f5cf68d80a..04a1582e80bb 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -206,6 +206,8 @@ static void component_match_release(struct device *master, if (mc->release) mc->release(master, mc->data); } + + kfree(match->compare); } static void devm_component_match_release(struct device *dev, void *res) @@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev, if (match->alloc == num) return 0; - new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL); + new = kmalloc_array(num, sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM; if (match->compare) { memcpy(new, match->compare, sizeof(*new) * min(match->num, num)); - devm_kfree(dev, match->compare); + kfree(match->compare); } match->compare = new; match->alloc = num; @@ -283,6 +285,24 @@ void component_match_add_release(struct device *master, } EXPORT_SYMBOL(component_match_add_release); +static void free_master(struct master *master) +{ + struct component_match *match = master->match; + int i; + + list_del(&master->node); + + if (match) { + for (i = 0; i < match->num; i++) { + struct component *c = match->compare[i].component; + if (c) + c->master = NULL; + } + } + + kfree(master); +} + int component_master_add_with_match(struct device *dev, const struct component_master_ops *ops, struct component_match *match) @@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev, ret = try_to_bring_up_master(master, NULL); - if (ret < 0) { - /* Delete off the list if we weren't successful */ - list_del(&master->node); - kfree(master); - } + if (ret < 0) + free_master(master); + mutex_unlock(&component_mutex); return ret < 0 ? ret : 0; @@ -324,25 +342,12 @@ void component_master_del(struct device *dev, const struct component_master_ops *ops) { struct master *master; - int i; mutex_lock(&component_mutex); master = __master_find(dev, ops); if (master) { - struct component_match *match = master->match; - take_down_master(master); - - list_del(&master->node); - - if (match) { - for (i = 0; i < match->num; i++) { - struct component *c = match->compare[i].component; - if (c) - c->master = NULL; - } - } - kfree(master); + free_master(master); } mutex_unlock(&component_mutex); } @@ -486,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops) ret = try_to_bring_up_masters(component); if (ret < 0) { + if (component->master) + remove_component(component->master, component); list_del(&component->node); kfree(component); diff --git a/drivers/base/property.c b/drivers/base/property.c index c359351d50f1..a163f2c59aa3 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -218,7 +218,7 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) bool ret; ret = __fwnode_property_present(fwnode, propname); - if (ret == false && fwnode && fwnode->secondary) + if (ret == false && fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) ret = __fwnode_property_present(fwnode->secondary, propname); return ret; } @@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(device_property_match_string); int _ret_; \ _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \ _val_, _nval_); \ - if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary) \ + if (_ret_ == -EINVAL && _fwnode_ && !IS_ERR_OR_NULL(_fwnode_->secondary)) \ _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \ _proptype_, _val_, _nval_); \ _ret_; \ @@ -593,7 +593,7 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, int ret; ret = __fwnode_property_read_string_array(fwnode, propname, val, nval); - if (ret == -EINVAL && fwnode && fwnode->secondary) + if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) ret = __fwnode_property_read_string_array(fwnode->secondary, propname, val, nval); return ret; @@ -621,7 +621,7 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode, int ret; ret = __fwnode_property_read_string(fwnode, propname, val); - if (ret == -EINVAL && fwnode && fwnode->secondary) + if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) ret = __fwnode_property_read_string(fwnode->secondary, propname, val); return ret; diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 8812bfb9e3b8..eea51569f0eb 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context, while (val_size) { switch (ctx->val_bytes) { case 1: - __raw_writeb(*(u8 *)val, ctx->regs + offset); + writeb(*(u8 *)val, ctx->regs + offset); break; case 2: - __raw_writew(*(u16 *)val, ctx->regs + offset); + writew(*(u16 *)val, ctx->regs + offset); break; case 4: - __raw_writel(*(u32 *)val, ctx->regs + offset); + writel(*(u32 *)val, ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - __raw_writeq(*(u64 *)val, ctx->regs + offset); + writeq(*(u64 *)val, ctx->regs + offset); break; #endif default: @@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context, while (val_size) { switch (ctx->val_bytes) { case 1: - *(u8 *)val = __raw_readb(ctx->regs + offset); + *(u8 *)val = readb(ctx->regs + offset); break; case 2: - *(u16 *)val = __raw_readw(ctx->regs + offset); + *(u16 *)val = readw(ctx->regs + offset); break; case 4: - *(u32 *)val = __raw_readl(ctx->regs + offset); + *(u32 *)val = readl(ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - *(u64 *)val = __raw_readq(ctx->regs + offset); + *(u64 *)val = readq(ctx->regs + offset); break; #endif default: diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 9e251201dd48..84708a5f8c52 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -866,7 +866,7 @@ static void set_fdc(int drive) } /* locks the driver */ -static int lock_fdc(int drive, bool interruptible) +static int lock_fdc(int drive) { if (WARN(atomic_read(&usage_count) == 0, "Trying to lock fdc while usage count=0\n")) @@ -2173,7 +2173,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req) { int ret; - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; set_floppy(drive); @@ -2960,7 +2960,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible) { int ret; - if (lock_fdc(drive, interruptible)) + if (lock_fdc(drive)) return -EINTR; if (arg == FD_RESET_ALWAYS) @@ -3243,7 +3243,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&open_lock); - if (lock_fdc(drive, true)) { + if (lock_fdc(drive)) { mutex_unlock(&open_lock); return -EINTR; } @@ -3263,7 +3263,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, } else { int oldStretch; - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; if (cmd != FDDEFPRM) { /* notice a disk change immediately, else @@ -3349,7 +3349,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g) if (type) *g = &floppy_type[type]; else { - if (lock_fdc(drive, false)) + if (lock_fdc(drive)) return -EINTR; if (poll_drive(false, 0) == -EINTR) return -EINTR; @@ -3433,7 +3433,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int if (UDRS->fd_ref != 1) /* somebody else has this drive open */ return -EBUSY; - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; /* do the actual eject. Fails on @@ -3445,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int process_fd_request(); return ret; case FDCLRPRM: - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; current_type[drive] = NULL; floppy_sizes[drive] = MAX_DISK_SIZE << 1; @@ -3467,7 +3467,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int UDP->flags &= ~FTD_MSG; return 0; case FDFMTBEG: - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; @@ -3484,7 +3484,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return do_format(drive, &inparam.f); case FDFMTEND: case FDFLUSH: - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; return invalidate_drive(bdev); case FDSETEMSGTRESH: @@ -3507,7 +3507,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int outparam = UDP; break; case FDPOLLDRVSTAT: - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; @@ -3530,7 +3530,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int case FDRAWCMD: if (type) return -EINVAL; - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; set_floppy(drive); i = raw_cmd_ioctl(cmd, (void __user *)param); @@ -3539,7 +3539,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int process_fd_request(); return i; case FDTWADDLE: - if (lock_fdc(drive, true)) + if (lock_fdc(drive)) return -EINTR; twaddle(); process_fd_request(); @@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) opened_bdev[drive] = bdev; + if (!(mode & (FMODE_READ|FMODE_WRITE))) { + res = -EINVAL; + goto out; + } + res = -ENXIO; if (!floppy_track_buffer) { @@ -3706,21 +3711,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (UFDCS->rawcmd == 1) UFDCS->rawcmd = 2; - if (!(mode & FMODE_NDELAY)) { - if (mode & (FMODE_READ|FMODE_WRITE)) { - UDRS->last_checked = 0; - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); - check_disk_change(bdev); - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) - goto out; - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) - goto out; - } - res = -EROFS; - if ((mode & FMODE_WRITE) && - !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) - goto out; - } + UDRS->last_checked = 0; + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); + check_disk_change(bdev); + if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) + goto out; + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) + goto out; + + res = -EROFS; + + if ((mode & FMODE_WRITE) && + !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + goto out; + mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; @@ -3748,7 +3752,8 @@ static unsigned int floppy_check_events(struct gendisk *disk, return DISK_EVENT_MEDIA_CHANGE; if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { - lock_fdc(drive, false); + if (lock_fdc(drive)) + return -EINTR; poll_drive(false, 0); process_fd_request(); } @@ -3847,7 +3852,9 @@ static int floppy_revalidate(struct gendisk *disk) "VFS: revalidate called on non-open device.\n")) return -EFAULT; - lock_fdc(drive, false); + res = lock_fdc(drive); + if (res) + return res; cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags)); if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8ba1e97d573c..64a7b5971b57 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -478,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) id->ver_id = 0x1; id->vmnt = 0; id->cgrps = 1; - id->cap = 0x3; + id->cap = 0x2; id->dom = 0x1; id->ppaf.blk_offset = 0; @@ -707,9 +707,7 @@ static int null_add_dev(void) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); - mutex_lock(&lock); - list_add_tail(&nullb->list, &nullb_list); nullb->index = nullb_indexes++; mutex_unlock(&lock); @@ -743,6 +741,10 @@ static int null_add_dev(void) strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); add_disk(disk); + + mutex_lock(&lock); + list_add_tail(&nullb->list, &nullb_list); + mutex_unlock(&lock); done: return 0; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8a8dc91c39f7..83eb9e6bf8b0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1873,6 +1873,43 @@ again: return err; } +static int negotiate_mq(struct blkfront_info *info) +{ + unsigned int backend_max_queues = 0; + int err; + unsigned int i; + + BUG_ON(info->nr_rings); + + /* Check if backend supports multiple queues. */ + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "multi-queue-max-queues", "%u", &backend_max_queues); + if (err < 0) + backend_max_queues = 1; + + info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); + /* We need at least one ring. */ + if (!info->nr_rings) + info->nr_rings = 1; + + info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL); + if (!info->rinfo) { + xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); + return -ENOMEM; + } + + for (i = 0; i < info->nr_rings; i++) { + struct blkfront_ring_info *rinfo; + + rinfo = &info->rinfo[i]; + INIT_LIST_HEAD(&rinfo->indirect_pages); + INIT_LIST_HEAD(&rinfo->grants); + rinfo->dev_info = info; + INIT_WORK(&rinfo->work, blkif_restart_queue); + spin_lock_init(&rinfo->ring_lock); + } + return 0; +} /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and @@ -1883,9 +1920,7 @@ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; - unsigned int r_index; struct blkfront_info *info; - unsigned int backend_max_queues = 0; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, @@ -1936,33 +1971,10 @@ static int blkfront_probe(struct xenbus_device *dev, } info->xbdev = dev; - /* Check if backend supports multiple queues. */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "multi-queue-max-queues", "%u", &backend_max_queues); - if (err < 0) - backend_max_queues = 1; - - info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); - /* We need at least one ring. */ - if (!info->nr_rings) - info->nr_rings = 1; - - info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL); - if (!info->rinfo) { - xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure"); + err = negotiate_mq(info); + if (err) { kfree(info); - return -ENOMEM; - } - - for (r_index = 0; r_index < info->nr_rings; r_index++) { - struct blkfront_ring_info *rinfo; - - rinfo = &info->rinfo[r_index]; - INIT_LIST_HEAD(&rinfo->indirect_pages); - INIT_LIST_HEAD(&rinfo->grants); - rinfo->dev_info = info; - INIT_WORK(&rinfo->work, blkif_restart_queue); - spin_lock_init(&rinfo->ring_lock); + return err; } mutex_init(&info->mutex); @@ -2123,12 +2135,16 @@ static int blkif_recover(struct blkfront_info *info) static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); - int err; + int err = 0; dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); + err = negotiate_mq(info); + if (err) + return err; + err = talk_to_blkback(dev, info); /* diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 240b6cf1d97c..be54e5331a45 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -42,7 +42,7 @@ /* * The High Precision Event Timer driver. * This driver is closely modelled after the rtc.c driver. - * http://www.intel.com/hardwaredesign/hpetspec_1.pdf + * See HPET spec revision 1. */ #define HPET_USER_FREQ (64) #define HPET_DRIFT (500) diff --git a/drivers/char/random.c b/drivers/char/random.c index d0da5d852d41..b583e5336630 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1818,6 +1818,28 @@ unsigned int get_random_int(void) } EXPORT_SYMBOL(get_random_int); +/* + * Same as get_random_int(), but returns unsigned long. + */ +unsigned long get_random_long(void) +{ + __u32 *hash; + unsigned long ret; + + if (arch_get_random_long(&ret)) + return ret; + + hash = get_cpu_var(get_random_int_hash); + + hash[0] += current->pid + jiffies + random_get_entropy(); + md5_transform(hash, random_int_secret); + ret = *(unsigned long *)hash; + put_cpu_var(get_random_int_hash); + + return ret; +} +EXPORT_SYMBOL(get_random_long); + /* * randomize_range() returns a start address such that * diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index b038e3666058..bae4be6501df 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -43,7 +43,7 @@ obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o -obj-$(CONFIG_ARCH_TANGOX) += clk-tango4.o +obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o obj-$(CONFIG_ARCH_U300) += clk-u300.o obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 19fed65587e8..7b09a265d79f 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c @@ -289,7 +289,7 @@ static void __init of_gpio_clk_setup(struct device_node *node, num_parents = of_clk_get_parent_count(node); if (num_parents < 0) - return; + num_parents = 0; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index cd0f2726f5e0..89e9ca78bb94 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -299,7 +299,7 @@ static int scpi_clocks_probe(struct platform_device *pdev) /* Add the virtual cpufreq device */ cpufreq_dev = platform_device_register_simple("scpi-cpufreq", -1, NULL, 0); - if (!cpufreq_dev) + if (IS_ERR(cpufreq_dev)) pr_warn("unable to register cpufreq device"); return 0; diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c index d5c5bfa35a5a..3e0b52daa35f 100644 --- a/drivers/clk/mvebu/dove-divider.c +++ b/drivers/clk/mvebu/dove-divider.c @@ -247,7 +247,7 @@ static struct clk_onecell_data dove_divider_data = { void __init dove_divider_clk_init(struct device_node *np) { - void *base; + void __iomem *base; base = of_iomap(np, 0); if (WARN_ON(!base)) diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c index cf73e539e9f6..070037a29ea5 100644 --- a/drivers/clk/qcom/gcc-apq8084.c +++ b/drivers/clk/qcom/gcc-apq8084.c @@ -3587,7 +3587,6 @@ static const struct regmap_config gcc_apq8084_regmap_config = { .val_bits = 32, .max_register = 0x1fc0, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc gcc_apq8084_desc = { diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index b692ae881d6a..dd5402bac620 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -3005,7 +3005,6 @@ static const struct regmap_config gcc_ipq806x_regmap_config = { .val_bits = 32, .max_register = 0x3e40, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc gcc_ipq806x_desc = { diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c index f6a2b14dfec4..ad413036f7c7 100644 --- a/drivers/clk/qcom/gcc-msm8660.c +++ b/drivers/clk/qcom/gcc-msm8660.c @@ -2702,7 +2702,6 @@ static const struct regmap_config gcc_msm8660_regmap_config = { .val_bits = 32, .max_register = 0x363c, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc gcc_msm8660_desc = { diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index e3bf09d7d0ef..8cc9b2868b41 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -3336,7 +3336,6 @@ static const struct regmap_config gcc_msm8916_regmap_config = { .val_bits = 32, .max_register = 0x80000, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc gcc_msm8916_desc = { diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index f31111e32d44..983dd7dc89a7 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -3468,7 +3468,6 @@ static const struct regmap_config gcc_msm8960_regmap_config = { .val_bits = 32, .max_register = 0x3660, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct regmap_config gcc_apq8064_regmap_config = { @@ -3477,7 +3476,6 @@ static const struct regmap_config gcc_apq8064_regmap_config = { .val_bits = 32, .max_register = 0x3880, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc gcc_msm8960_desc = { diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c index df164d618e34..335952db309b 100644 --- a/drivers/clk/qcom/gcc-msm8974.c +++ b/drivers/clk/qcom/gcc-msm8974.c @@ -2680,7 +2680,6 @@ static const struct regmap_config gcc_msm8974_regmap_config = { .val_bits = 32, .max_register = 0x1fc0, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc gcc_msm8974_desc = { diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 62e79fadd5f7..db3998e5e2d8 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -419,7 +419,6 @@ static const struct regmap_config lcc_ipq806x_regmap_config = { .val_bits = 32, .max_register = 0xfc, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc lcc_ipq806x_desc = { diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index bf95bb0ea1b8..4fcf9d1d233c 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c @@ -524,7 +524,6 @@ static const struct regmap_config lcc_msm8960_regmap_config = { .val_bits = 32, .max_register = 0xfc, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc lcc_msm8960_desc = { diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index 1e703fda8a0f..30777f9f1a43 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -3368,7 +3368,6 @@ static const struct regmap_config mmcc_apq8084_regmap_config = { .val_bits = 32, .max_register = 0x5104, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc mmcc_apq8084_desc = { diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c index d73a048d3b9d..00e36192a1de 100644 --- a/drivers/clk/qcom/mmcc-msm8960.c +++ b/drivers/clk/qcom/mmcc-msm8960.c @@ -3029,7 +3029,6 @@ static const struct regmap_config mmcc_msm8960_regmap_config = { .val_bits = 32, .max_register = 0x334, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct regmap_config mmcc_apq8064_regmap_config = { @@ -3038,7 +3037,6 @@ static const struct regmap_config mmcc_apq8064_regmap_config = { .val_bits = 32, .max_register = 0x350, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc mmcc_msm8960_desc = { diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index bbe28ed93669..9d790bcadf25 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -2594,7 +2594,6 @@ static const struct regmap_config mmcc_msm8974_regmap_config = { .val_bits = 32, .max_register = 0x5104, .fast_io = true, - .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static const struct qcom_cc_desc mmcc_msm8974_desc = { diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index ebce98033fbb..bc7fbac83ab7 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -133,7 +133,7 @@ PNAME(mux_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" }; PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; -PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; +PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" }; PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" }; static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = { @@ -224,16 +224,16 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { RK2928_CLKGATE_CON(2), 2, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED, - RK2928_CLKSEL_CON(2), 4, 1, DFLAGS, + RK2928_CLKSEL_CON(2), 4, 1, MFLAGS, RK2928_CLKGATE_CON(1), 0, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED, - RK2928_CLKSEL_CON(2), 5, 1, DFLAGS, + RK2928_CLKSEL_CON(2), 5, 1, MFLAGS, RK2928_CLKGATE_CON(1), 1, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED, - RK2928_CLKSEL_CON(2), 6, 1, DFLAGS, + RK2928_CLKSEL_CON(2), 6, 1, MFLAGS, RK2928_CLKGATE_CON(2), 4, GFLAGS), COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED, - RK2928_CLKSEL_CON(2), 7, 1, DFLAGS, + RK2928_CLKSEL_CON(2), 7, 1, MFLAGS, RK2928_CLKGATE_CON(2), 5, GFLAGS), MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, @@ -242,11 +242,11 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, RK2928_CLKGATE_CON(1), 8, GFLAGS), COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0, - RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, - RK2928_CLKGATE_CON(1), 8, GFLAGS), + RK2928_CLKSEL_CON(14), 0, 7, DFLAGS, + RK2928_CLKGATE_CON(1), 10, GFLAGS), COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0, - RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, - RK2928_CLKGATE_CON(1), 8, GFLAGS), + RK2928_CLKSEL_CON(15), 0, 7, DFLAGS, + RK2928_CLKGATE_CON(1), 12, GFLAGS), COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(17), 0, RK2928_CLKGATE_CON(1), 9, GFLAGS, @@ -279,13 +279,13 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { RK2928_CLKGATE_CON(3), 2, GFLAGS), COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0, - RK2928_CLKSEL_CON(12), 8, 2, DFLAGS, + RK2928_CLKSEL_CON(12), 8, 2, MFLAGS, RK2928_CLKGATE_CON(2), 11, GFLAGS), DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0, RK2928_CLKSEL_CON(11), 0, 7, DFLAGS), COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, - RK2928_CLKSEL_CON(12), 10, 2, DFLAGS, + RK2928_CLKSEL_CON(12), 10, 2, MFLAGS, RK2928_CLKGATE_CON(2), 13, GFLAGS), DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, RK2928_CLKSEL_CON(11), 8, 7, DFLAGS), @@ -344,12 +344,12 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { RK2928_CLKGATE_CON(10), 5, GFLAGS), COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0, - RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS), + RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS), MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0, - RK2928_CLKSEL_CON(21), 9, 5, DFLAGS, + RK2928_CLKSEL_CON(21), 4, 5, DFLAGS, RK2928_CLKGATE_CON(2), 6, GFLAGS), MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0, diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c index be0ede522269..21f3ea909fab 100644 --- a/drivers/clk/rockchip/clk-rk3368.c +++ b/drivers/clk/rockchip/clk-rk3368.c @@ -780,13 +780,13 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = { GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS), /* pclk_pd_alive gates */ - GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS), - GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS), - GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS), - GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS), - GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS), - GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS), - GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS), + GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS), + GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS), + GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS), + GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS), + GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS), + GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS), + GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS), /* * pclk_vio gates @@ -796,12 +796,12 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = { GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), /* pclk_pd_pmu gates */ - GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS), - GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), - GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS), - GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), - GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS), - GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), + GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS), + GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS), + GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS), + GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS), + GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS), + GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS), /* timer gates */ GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c index e1fe8f35d45c..74e7544f861b 100644 --- a/drivers/clk/tegra/clk-emc.c +++ b/drivers/clk/tegra/clk-emc.c @@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra, struct emc_timing *timing = tegra->timings + (i++); err = load_one_timing_from_dt(tegra, timing, child); - if (err) + if (err) { + of_node_put(child); return err; + } timing->ram_code = ram_code; } @@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np, * fuses until the apbmisc driver is loaded. */ err = load_timings_from_dt(tegra, node, node_ram_code); + of_node_put(node); if (err) return ERR_PTR(err); - of_node_put(node); break; } diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index 19ce0738ee76..62ea38187b71 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -11,6 +11,7 @@ enum clk_id { tegra_clk_afi, tegra_clk_amx, tegra_clk_amx1, + tegra_clk_apb2ape, tegra_clk_apbdma, tegra_clk_apbif, tegra_clk_ape, diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index a534bfab30b3..6ac3f843e7ca 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -86,15 +86,21 @@ #define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\ PLLE_SS_CNTL_SSC_BYP) #define PLLE_SS_MAX_MASK 0x1ff -#define PLLE_SS_MAX_VAL 0x25 +#define PLLE_SS_MAX_VAL_TEGRA114 0x25 +#define PLLE_SS_MAX_VAL_TEGRA210 0x21 #define PLLE_SS_INC_MASK (0xff << 16) #define PLLE_SS_INC_VAL (0x1 << 16) #define PLLE_SS_INCINTRV_MASK (0x3f << 24) -#define PLLE_SS_INCINTRV_VAL (0x20 << 24) +#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24) +#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24) #define PLLE_SS_COEFFICIENTS_MASK \ (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK) -#define PLLE_SS_COEFFICIENTS_VAL \ - (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL) +#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \ + (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\ + PLLE_SS_INCINTRV_VAL_TEGRA114) +#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \ + (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\ + PLLE_SS_INCINTRV_VAL_TEGRA210) #define PLLE_AUX_PLLP_SEL BIT(2) #define PLLE_AUX_USE_LOCKDET BIT(3) @@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll) static int clk_plle_enable(struct clk_hw *hw) { struct tegra_clk_pll *pll = to_clk_pll(hw); - unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); + unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); struct tegra_clk_pll_freq_table sel; u32 val; int err; @@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) u32 val; int ret; unsigned long flags = 0; - unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); + unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) return -EINVAL; @@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) val |= PLLE_MISC_IDDQ_SW_CTRL; val &= ~PLLE_MISC_IDDQ_SW_VALUE; val |= PLLE_MISC_PLLE_PTS; - val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; + val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK); pll_writel_misc(val, pll); udelay(5); @@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) val = pll_readl(PLLE_SS_CTRL, pll); val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); val &= ~PLLE_SS_COEFFICIENTS_MASK; - val |= PLLE_SS_COEFFICIENTS_VAL; + val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114; pll_writel(val, PLLE_SS_CTRL, pll); val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); pll_writel(val, PLLE_SS_CTRL, pll); @@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw) struct tegra_clk_pll *pll = to_clk_pll(hw); struct tegra_clk_pll_freq_table sel; u32 val; - int ret; + int ret = 0; unsigned long flags = 0; - unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); + unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) return -EINVAL; @@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw) if (pll->lock) spin_lock_irqsave(pll->lock, flags); + val = pll_readl(pll->params->aux_reg, pll); + if (val & PLLE_AUX_SEQ_ENABLE) + goto out; + val = pll_readl_base(pll); val &= ~BIT(30); /* Disable lock override */ pll_writel_base(val, pll); - val = pll_readl(pll->params->aux_reg, pll); - val |= PLLE_AUX_ENABLE_SWCTL; - val &= ~PLLE_AUX_SEQ_ENABLE; - pll_writel(val, pll->params->aux_reg, pll); - udelay(1); - val = pll_readl_misc(pll); val |= PLLE_MISC_LOCK_ENABLE; val |= PLLE_MISC_IDDQ_SW_CTRL; val &= ~PLLE_MISC_IDDQ_SW_VALUE; val |= PLLE_MISC_PLLE_PTS; - val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; + val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK); pll_writel_misc(val, pll); udelay(5); @@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw) val = pll_readl(PLLE_SS_CTRL, pll); val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); val &= ~PLLE_SS_COEFFICIENTS_MASK; - val |= PLLE_SS_COEFFICIENTS_VAL; + val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210; pll_writel(val, PLLE_SS_CTRL, pll); val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); pll_writel(val, PLLE_SS_CTRL, pll); @@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw) if (pll->lock) spin_lock_irqsave(pll->lock, flags); + /* If PLLE HW sequencer is enabled, SW should not disable PLLE */ + val = pll_readl(pll->params->aux_reg, pll); + if (val & PLLE_AUX_SEQ_ENABLE) + goto out; + val = pll_readl_base(pll); val &= ~PLLE_BASE_ENABLE; pll_writel_base(val, pll); + val = pll_readl(pll->params->aux_reg, pll); + val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL; + pll_writel(val, pll->params->aux_reg, pll); + val = pll_readl_misc(pll); val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE; pll_writel_misc(val, pll); udelay(1); +out: if (pll->lock) spin_unlock_irqrestore(pll->lock, flags); } diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 6ad381a888a6..ea2b9cbf9e70 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = { XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src), XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8), MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb), - MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc), + MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc), MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec), MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg), MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape), @@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = { NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock), MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy), MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi), - MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c), + I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c), MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif), MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape), MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb), @@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0), GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), + GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0), }; static struct tegra_periph_init_data div_clks[] = { diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c index 4559a20e3af6..474de0f0c26d 100644 --- a/drivers/clk/tegra/clk-tegra-super-gen4.c +++ b/drivers/clk/tegra/clk-tegra-super-gen4.c @@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m", "pll_p", "pll_p_out4", "unused", "unused", "pll_x", "pll_x_out0" }; -const struct tegra_super_gen_info tegra_super_gen_info_gen4 = { +static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = { .gen = gen4, .sclk_parents = sclk_parents, .cclk_g_parents = cclk_g_parents, @@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu "unused", "unused", "unused", "unused", "dfllCPU_out" }; -const struct tegra_super_gen_info tegra_super_gen_info_gen5 = { +static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = { .gen = gen5, .sclk_parents = sclk_parents_gen5, .cclk_g_parents = cclk_g_parents_gen5, @@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base, *dt_clk = clk; } -void __init tegra_super_clk_init(void __iomem *clk_base, +static void __init tegra_super_clk_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_clk_pll_params *params, diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 58514c44ea83..637041fd53ad 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -59,8 +59,8 @@ #define PLLC3_MISC3 0x50c #define PLLM_BASE 0x90 -#define PLLM_MISC0 0x9c #define PLLM_MISC1 0x98 +#define PLLM_MISC2 0x9c #define PLLP_BASE 0xa0 #define PLLP_MISC0 0xac #define PLLP_MISC1 0x680 @@ -99,7 +99,7 @@ #define PLLC4_MISC0 0x5a8 #define PLLC4_OUT 0x5e4 #define PLLMB_BASE 0x5e8 -#define PLLMB_MISC0 0x5ec +#define PLLMB_MISC1 0x5ec #define PLLA1_BASE 0x6a4 #define PLLA1_MISC0 0x6a8 #define PLLA1_MISC1 0x6ac @@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = { }; static const char *mux_pllmcp_clkm[] = { - "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", + "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb", + "pll_p", }; #define mux_pllmcp_clkm_idx NULL @@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = { /* PLLMB */ #define PLLMB_BASE_LOCK (1 << 27) -#define PLLMB_MISC0_LOCK_OVERRIDE (1 << 18) -#define PLLMB_MISC0_IDDQ (1 << 17) -#define PLLMB_MISC0_LOCK_ENABLE (1 << 16) +#define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18) +#define PLLMB_MISC1_IDDQ (1 << 17) +#define PLLMB_MISC1_LOCK_ENABLE (1 << 16) -#define PLLMB_MISC0_DEFAULT_VALUE 0x00030000 -#define PLLMB_MISC0_WRITE_MASK 0x0007ffff +#define PLLMB_MISC1_DEFAULT_VALUE 0x00030000 +#define PLLMB_MISC1_WRITE_MASK 0x0007ffff /* PLLP */ #define PLLP_BASE_OVERRIDE (1 << 28) @@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params) PLLCX_MISC3_WRITE_MASK); } -void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) +static void tegra210_pllcx_set_defaults(const char *name, + struct tegra_clk_pll *pllcx) { pllcx->params->defaults_set = true; @@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) udelay(1); } -void _pllc_set_defaults(struct tegra_clk_pll *pllcx) +static void _pllc_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_C", pllcx); } -void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) +static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_C2", pllcx); } -void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) +static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_C3", pllcx); } -void _plla1_set_defaults(struct tegra_clk_pll *pllcx) +static void _plla1_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_A1", pllcx); } @@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx) * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used. * Fractional SDM is allowed to provide exact audio rates. */ -void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) +static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) { u32 mask; u32 val = readl_relaxed(clk_base + plla->params->base_reg); @@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) * PLLD * PLL with fractional SDM. */ -void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) +static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) { u32 val; u32 mask = 0xffff; @@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss, udelay(1); } -void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) +static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) { plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE, PLLD2_MISC1_CFG_DEFAULT_VALUE, @@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) PLLD2_MISC3_CTRL2_DEFAULT_VALUE); } -void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) +static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) { plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE, PLLDP_MISC1_CFG_DEFAULT_VALUE, @@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support. * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers. */ -void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) +static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) { plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0); } @@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) * PLLRE * VCO is exposed to the clock tree directly along with post-divider output */ -void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) +static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) { u32 mask; u32 val = readl_relaxed(clk_base + pllre->params->base_reg); @@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b) { unsigned long input_rate; - if (!IS_ERR_OR_NULL(hw->clk)) { + /* cf rate */ + if (!IS_ERR_OR_NULL(hw->clk)) input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); - /* cf rate */ - input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate); - } else { + else input_rate = 38400000; - } + + input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate); switch (input_rate) { case 12000000: @@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll) PLLX_MISC5_WRITE_MASK); } -void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) +static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) { u32 val; u32 step_a, step_b; @@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) } /* PLLMB */ -void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) +static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) { u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg); @@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ - val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ); - mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE; + val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ); + mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE; _pll_misc_chk_default(clk_base, pllmb->params, 0, val, - ~mask & PLLMB_MISC0_WRITE_MASK); + ~mask & PLLMB_MISC1_WRITE_MASK); /* Enable lock detect */ val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); val &= ~mask; - val |= PLLMB_MISC0_DEFAULT_VALUE & mask; + val |= PLLMB_MISC1_DEFAULT_VALUE & mask; writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]); udelay(1); @@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) } /* set IDDQ, enable lock detect */ - writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE, + writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE, clk_base + pllmb->params->ext_misc_reg[0]); udelay(1); } @@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled) ~mask & PLLP_MISC1_WRITE_MASK); } -void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) +static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) { u32 mask; u32 val = readl_relaxed(clk_base + pllp->params->base_reg); @@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control) ~mask & PLLU_MISC1_WRITE_MASK); } -void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) +static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) { u32 val = readl_relaxed(clk_base + pllu->params->base_reg); @@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg) cfg->m *= PLL_SDM_COEFF; } -unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, - unsigned long parent_rate) +static unsigned long +tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, + unsigned long parent_rate) { unsigned long vco_min = params->vco_min; @@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = { .mdiv_default = 3, .div_nmp = &pllc_nmp, .freq_table = pll_cx_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _pllc_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = { .ext_misc_reg[2] = PLLC2_MISC2, .ext_misc_reg[3] = PLLC2_MISC3, .freq_table = pll_cx_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _pllc2_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = { .ext_misc_reg[2] = PLLC3_MISC2, .ext_misc_reg[3] = PLLC3_MISC3, .freq_table = pll_cx_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _pllc3_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = { .base_reg = PLLC4_BASE, .misc_reg = PLLC4_MISC0, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE, .lock_delay = 300, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLC4_MISC0, @@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = { .div_nmp = &pllss_nmp, .freq_table = pll_c4_vco_freq_table, .set_defaults = tegra210_pllc4_set_defaults, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | - TEGRA_PLL_VCO_OUT, + .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = { .vco_min = 800000000, .vco_max = 1866000000, .base_reg = PLLM_BASE, - .misc_reg = PLLM_MISC1, + .misc_reg = PLLM_MISC2, .lock_mask = PLL_BASE_LOCK, .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE, .lock_delay = 300, - .iddq_reg = PLLM_MISC0, + .iddq_reg = PLLM_MISC2, .iddq_bit_idx = PLLM_IDDQ_BIT, .max_p = PLL_QLIN_PDIV_MAX, - .ext_misc_reg[0] = PLLM_MISC0, - .ext_misc_reg[0] = PLLM_MISC1, + .ext_misc_reg[0] = PLLM_MISC2, + .ext_misc_reg[1] = PLLM_MISC1, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllm_nmp, @@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = { .vco_min = 800000000, .vco_max = 1866000000, .base_reg = PLLMB_BASE, - .misc_reg = PLLMB_MISC0, + .misc_reg = PLLMB_MISC1, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE, .lock_delay = 300, - .iddq_reg = PLLMB_MISC0, + .iddq_reg = PLLMB_MISC1, .iddq_bit_idx = PLLMB_IDDQ_BIT, .max_p = PLL_QLIN_PDIV_MAX, - .ext_misc_reg[0] = PLLMB_MISC0, + .ext_misc_reg[0] = PLLMB_MISC1, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllm_nmp, .freq_table = pll_m_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .set_defaults = tegra210_pllmb_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = { .base_reg = PLLRE_BASE, .misc_reg = PLLRE_MISC0, .lock_mask = PLLRE_MISC_LOCK, - .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE, .lock_delay = 300, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLRE_MISC0, @@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = { .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllre_nmp, .freq_table = pll_re_vco_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | - TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT, + .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT, .set_defaults = tegra210_pllre_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = { .base_reg = PLLP_BASE, .misc_reg = PLLP_MISC0, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE, .lock_delay = 300, .iddq_reg = PLLP_MISC0, .iddq_bit_idx = PLLXP_IDDQ_BIT, @@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = { .div_nmp = &pllp_nmp, .freq_table = pll_p_freq_table, .fixed_rate = 408000000, - .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | - TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT, + .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT, .set_defaults = tegra210_pllp_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = { .ext_misc_reg[2] = PLLA1_MISC2, .ext_misc_reg[3] = PLLA1_MISC3, .freq_table = pll_cx_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _plla1_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = { .base_reg = PLLA_BASE, .misc_reg = PLLA_MISC0, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE, .lock_delay = 300, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, @@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = { .ext_misc_reg[1] = PLLA_MISC1, .ext_misc_reg[2] = PLLA_MISC2, .freq_table = pll_a_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW | - TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW, .set_defaults = tegra210_plla_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, .set_gain = tegra210_clk_pll_set_gain, @@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = { .base_reg = PLLD_BASE, .misc_reg = PLLD_MISC0, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE, .lock_delay = 1000, .iddq_reg = PLLD_MISC0, .iddq_bit_idx = PLLD_IDDQ_BIT, @@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = { .ext_misc_reg[0] = PLLD_MISC0, .ext_misc_reg[1] = PLLD_MISC1, .freq_table = pll_d_freq_table, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .mdiv_default = 1, .set_defaults = tegra210_plld_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, @@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = { .base_reg = PLLD2_BASE, .misc_reg = PLLD2_MISC0, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE, .lock_delay = 300, .iddq_reg = PLLD2_BASE, .iddq_bit_idx = PLLSS_IDDQ_BIT, @@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = { .mdiv_default = 1, .freq_table = tegra210_pll_d2_freq_table, .set_defaults = tegra210_plld2_set_defaults, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .calc_rate = tegra210_pll_fixed_mdiv_cfg, .set_gain = tegra210_clk_pll_set_gain, .adjust_vco = tegra210_clk_adjust_vco_min, @@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = { .base_reg = PLLDP_BASE, .misc_reg = PLLDP_MISC, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE, .lock_delay = 300, .iddq_reg = PLLDP_BASE, .iddq_bit_idx = PLLSS_IDDQ_BIT, @@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = { .mdiv_default = 1, .freq_table = pll_dp_freq_table, .set_defaults = tegra210_plldp_set_defaults, - .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .flags = TEGRA_PLL_USE_LOCK, .calc_rate = tegra210_pll_fixed_mdiv_cfg, .set_gain = tegra210_clk_pll_set_gain, .adjust_vco = tegra210_clk_adjust_vco_min, @@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = { .base_reg = PLLU_BASE, .misc_reg = PLLU_MISC0, .lock_mask = PLL_BASE_LOCK, - .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE, .lock_delay = 1000, .iddq_reg = PLLU_MISC0, .iddq_bit_idx = PLLU_IDDQ_BIT, @@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = { .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllu_nmp, .freq_table = pll_u_freq_table, - .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | - TEGRA_PLL_VCO_OUT, + .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT, .set_defaults = tegra210_pllu_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; @@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true }, [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, + [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { @@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base, /* PLLU_VCO */ val = readl(clk_base + pll_u_vco_params.base_reg); - val &= ~BIT(24); /* disable PLLU_OVERRIDE */ + val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */ writel(val, clk_base + pll_u_vco_params.base_reg); clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc, @@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, - { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 }, - { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 }, { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c index 1c300388782b..cc739291a3ce 100644 --- a/drivers/clk/ti/dpll3xxx.c +++ b/drivers/clk/ti/dpll3xxx.c @@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw) parent = clk_hw_get_parent(hw); - if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) { + if (clk_hw_get_rate(hw) == + clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) { WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); r = _omap3_noncore_dpll_bypass(clk); } else { diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index e62f8cb2c9b5..3bca438ecd19 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c @@ -78,6 +78,9 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco) ret = regmap_read(icst->map, icst->vcoreg_off, &val); if (ret) return ret; + + /* Mask the 18 bits used by the VCO */ + val &= ~0x7ffff; val |= vco.v | (vco.r << 9) | (vco.s << 16); /* This magic unlocks the VCO so it can be controlled */ diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 659879a56dba..f93511031177 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -296,6 +296,7 @@ endif config QORIQ_CPUFREQ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" depends on OF && COMMON_CLK && (PPC_E500MC || ARM) + depends on !CPU_THERMAL || THERMAL select CLK_QORIQ help This adds the CPUFreq driver support for Freescale QorIQ SoCs diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 0031069b64c9..14b1f9393b05 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ SoCs. config ARM_MT8173_CPUFREQ - bool "Mediatek MT8173 CPUFreq support" + tristate "Mediatek MT8173 CPUFreq support" depends on ARCH_MEDIATEK && REGULATOR depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) - depends on !CPU_THERMAL || THERMAL=y + depends on !CPU_THERMAL || THERMAL select PM_OPP help This adds the CPUFreq driver support for Mediatek MT8173 SoC. diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c index 1efba340456d..2058e6d292ce 100644 --- a/drivers/cpufreq/mt8173-cpufreq.c +++ b/drivers/cpufreq/mt8173-cpufreq.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 20de861aa0ea..8bf9914d4d15 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err) dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); - clk_disable_unprepare(dd->iclk); + clk_disable(dd->iclk); if (req->base.complete) req->base.complete(&req->base, err); @@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) { int err; - err = clk_prepare_enable(dd->iclk); + err = clk_enable(dd->iclk); if (err) return err; @@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); - clk_disable_unprepare(dd->iclk); + clk_disable(dd->iclk); } static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, @@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev) goto res_err; } + err = clk_prepare(sha_dd->iclk); + if (err) + goto res_err; + atmel_sha_hw_version_init(sha_dd); atmel_sha_get_cap(sha_dd); @@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev) if (IS_ERR(pdata)) { dev_err(&pdev->dev, "platform data not available\n"); err = PTR_ERR(pdata); - goto res_err; + goto iclk_unprepare; } } if (!pdata->dma_slave) { err = -ENXIO; - goto res_err; + goto iclk_unprepare; } err = atmel_sha_dma_init(sha_dd, pdata); if (err) @@ -1457,6 +1461,8 @@ err_algs: if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); err_sha_dma: +iclk_unprepare: + clk_unprepare(sha_dd->iclk); res_err: tasklet_kill(&sha_dd->done_task); sha_dd_err: @@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev) if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); - iounmap(sha_dd->io_base); - - clk_put(sha_dd->iclk); - - if (sha_dd->irq >= 0) - free_irq(sha_dd->irq, sha_dd); + clk_unprepare(sha_dd->iclk); return 0; } diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 0643e3366e33..c0656e7f37b5 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) return -ENOMEM; dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); - if (!dma->cache_pool) + if (!dma->padding_pool) return -ENOMEM; cesa->dma = dma; diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index 848b93ee930f..fe9dce0245bf 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c @@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq, clk_set_min_rate(tegra->emc_clock, rate); clk_set_rate(tegra->emc_clock, 0); + *freq = rate; + return 0; } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index b2ac13b4ddaa..9810d1df0691 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -34,6 +34,8 @@ #include #include +#include + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -251,11 +253,54 @@ out: return events; } +static long dma_buf_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct dma_buf *dmabuf; + struct dma_buf_sync sync; + enum dma_data_direction direction; + + dmabuf = file->private_data; + + switch (cmd) { + case DMA_BUF_IOCTL_SYNC: + if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) + return -EFAULT; + + if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) + return -EINVAL; + + switch (sync.flags & DMA_BUF_SYNC_RW) { + case DMA_BUF_SYNC_READ: + direction = DMA_FROM_DEVICE; + break; + case DMA_BUF_SYNC_WRITE: + direction = DMA_TO_DEVICE; + break; + case DMA_BUF_SYNC_RW: + direction = DMA_BIDIRECTIONAL; + break; + default: + return -EINVAL; + } + + if (sync.flags & DMA_BUF_SYNC_END) + dma_buf_end_cpu_access(dmabuf, direction); + else + dma_buf_begin_cpu_access(dmabuf, direction); + + return 0; + default: + return -ENOTTY; + } +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, + .unlocked_ioctl = dma_buf_ioctl, }; /* diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 64f5d1bdbb48..8e304b1befc5 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -176,6 +176,7 @@ #define AT_XDMAC_MAX_CHAN 0x20 #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ +#define AT_XDMAC_RESIDUE_MAX_RETRIES 5 #define AT_XDMAC_DMA_BUSWIDTHS\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ @@ -1395,8 +1396,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct at_xdmac_desc *desc, *_desc; struct list_head *descs_list; enum dma_status ret; - int residue; - u32 cur_nda, mask, value; + int residue, retry; + u32 cur_nda, check_nda, cur_ubc, mask, value; u8 dwidth = 0; unsigned long flags; @@ -1433,7 +1434,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, cpu_relax(); } + /* + * When processing the residue, we need to read two registers but we + * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where + * we stand in the descriptor list and AT_XDMAC_CUBC is used + * to know how many data are remaining for the current descriptor. + * Since the dma channel is not paused to not loose data, between the + * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of + * descriptor. + * For that reason, after reading AT_XDMAC_CUBC, we check if we are + * still using the same descriptor by reading a second time + * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to + * read again AT_XDMAC_CUBC. + * Memory barriers are used to ensure the read order of the registers. + * A max number of retries is set because unlikely it can never ends if + * we are transferring a lot of data with small buffers. + */ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + rmb(); + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { + rmb(); + check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + + if (likely(cur_nda == check_nda)) + break; + + cur_nda = check_nda; + rmb(); + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + } + + if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { + ret = DMA_ERROR; + goto spin_unlock; + } + /* * Remove size of all microblocks already transferred and the current * one. Then add the remaining size to transfer of the current @@ -1446,7 +1482,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) break; } - residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; + residue += cur_ubc << dwidth; dma_set_residue(txstate, residue); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index e893318560db..5ad0ec1f0e29 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc) /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); - channel_set_bit(dw, MASK.BLOCK, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); dwc->initialized = true; @@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, spin_unlock_irqrestore(&dwc->lock, flags); } + + /* Re-enable interrupts */ + channel_set_bit(dw, MASK.BLOCK, dwc->mask); } /* ------------------------------------------------------------------------- */ @@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data) dwc_scan_descriptors(dw, dwc); } - /* - * Re-enable interrupts. - */ + /* Re-enable interrupts */ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); } @@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) int dw_dma_cyclic_start(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); unsigned long flags; if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { @@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan) } spin_lock_irqsave(&dwc->lock, flags); + + /* Enable interrupts to perform cyclic transfer */ + channel_set_bit(dw, MASK.BLOCK, dwc->mask); + dwc_dostart(dwc, dwc->cdesc->desc[0]); + spin_unlock_irqrestore(&dwc->lock, flags); return 0; diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 4c30fdd092b3..358f9689a3f5 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = { /* Haswell */ { PCI_VDEVICE(INTEL, 0x9c60) }, + + /* Broadwell */ + { PCI_VDEVICE(INTEL, 0x9ce0) }, + { } }; MODULE_DEVICE_TABLE(pci, dw_pci_id_table); diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index d92d65549406..e3d7fcb69b4c 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -113,6 +113,9 @@ #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ #define CHMAP_EXIST BIT(24) +/* CCSTAT register */ +#define EDMA_CCSTAT_ACTV BIT(4) + /* * Max of 20 segments per channel to conserve PaRAM slots * Also note that MAX_NR_SG should be atleast the no.of periods @@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&echan->vchan.lock, flags); } +/* + * This limit exists to avoid a possible infinite loop when waiting for proof + * that a particular transfer is completed. This limit can be hit if there + * are large bursts to/from slow devices or the CPU is never able to catch + * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART + * RX-FIFO, as many as 55 loops have been seen. + */ +#define EDMA_MAX_TR_WAIT_LOOPS 1000 + static u32 edma_residue(struct edma_desc *edesc) { bool dst = edesc->direction == DMA_DEV_TO_MEM; + int loop_count = EDMA_MAX_TR_WAIT_LOOPS; + struct edma_chan *echan = edesc->echan; struct edma_pset *pset = edesc->pset; dma_addr_t done, pos; int i; @@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc) * We always read the dst/src position from the first RamPar * pset. That's the one which is active now. */ - pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); + pos = edma_get_position(echan->ecc, echan->slot[0], dst); + + /* + * "pos" may represent a transfer request that is still being + * processed by the EDMACC or EDMATC. We will busy wait until + * any one of the situations occurs: + * 1. the DMA hardware is idle + * 2. a new transfer request is setup + * 3. we hit the loop limit + */ + while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) { + /* check if a new transfer request is setup */ + if (edma_get_position(echan->ecc, + echan->slot[0], dst) != pos) { + break; + } + + if (!--loop_count) { + dev_dbg_ratelimited(echan->vchan.chan.device->dev, + "%s: timeout waiting for PaRAM update\n", + __func__); + break; + } + + cpu_relax(); + } /* * Cyclic is simple. Just subtract pset[0].addr from pos. diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 2209f75fdf05..aac85c30c2cf 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -522,6 +522,8 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, chan_dbg(chan, "LD %p callback\n", desc); txd->callback(txd->callback_param); } + + dma_descriptor_unmap(txd); } /* Run any dependencies */ diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1d5df2ef148b..21539d5c54c3 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data) return; } + spin_lock_bh(&ioat_chan->cleanup_lock); + + /* handle the no-actives case */ + if (!ioat_ring_active(ioat_chan)) { + spin_lock_bh(&ioat_chan->prep_lock); + check_active(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + return; + } + /* if we haven't made progress and we have already * acknowledged a pending completion once, then be more * forceful with a restart */ - spin_lock_bh(&ioat_chan->cleanup_lock); if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) __cleanup(ioat_chan, phys_complete); else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { + u32 chanerr; + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); + dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", + status, chanerr); + dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n", + ioat_ring_active(ioat_chan)); + spin_lock_bh(&ioat_chan->prep_lock); ioat_restart_channel(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->cleanup_lock); return; - } else { + } else set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - } - - if (ioat_ring_active(ioat_chan)) - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - else { - spin_lock_bh(&ioat_chan->prep_lock); - check_active(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); - } + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); spin_unlock_bh(&ioat_chan->cleanup_lock); } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index f2a0310ae771..debca824bed6 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc, (PXA_DCMD_LENGTH & sizeof(u32)); if (flags & DMA_PREP_INTERRUPT) updater->dcmd |= PXA_DCMD_ENDIRQEN; + if (sw_desc->cyclic) + sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first; } static bool is_desc_completed(struct virt_dma_desc *vd) @@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) dev_dbg(&chan->vc.chan.dev->device, "%s(): checking txd %p[%x]: completed=%d\n", __func__, vd, vd->tx.cookie, is_desc_completed(vd)); + if (to_pxad_sw_desc(vd)->cyclic) { + vchan_cyclic_callback(vd); + break; + } if (is_desc_completed(vd)) { list_del(&vd->node); vchan_cookie_complete(vd); @@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan, return NULL; pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); - dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); + dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len); dev_dbg(&chan->vc.chan.dev->device, "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", __func__, (unsigned long)buf_addr, len, period_len, dir, flags); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index e438ee5b433f..f5c6b97c8958 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -1574,7 +1574,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) for (cha = 0; cha < KNL_MAX_CHAS; cha++) { if (knl_get_mc_route(target, mc_route_reg[cha]) == channel - && participants[channel]) { + && !participants[channel]) { participant_count++; participants[channel] = 1; break; diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 756eca8c4cf8..10e6774ab2a2 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, } if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(name, data, size) == false) { + efivar_validate(vendor, name, data, size) == false) { printk(KERN_ERR "efivars: Malformed variable content\n"); return -EINVAL; } @@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, } if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(name, data, size) == false) { + efivar_validate(new_var->VendorGuid, name, data, + size) == false) { printk(KERN_ERR "efivars: Malformed variable content\n"); return -EINVAL; } @@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, static int efivar_create_sysfs_entry(struct efivar_entry *new_var) { - int i, short_name_size; + int short_name_size; char *short_name; - unsigned long variable_name_size; - efi_char16_t *variable_name; + unsigned long utf8_name_size; + efi_char16_t *variable_name = new_var->var.VariableName; int ret; - variable_name = new_var->var.VariableName; - variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); - /* - * Length of the variable bytes in ASCII, plus the '-' separator, + * Length of the variable bytes in UTF8, plus the '-' separator, * plus the GUID, plus trailing NUL */ - short_name_size = variable_name_size / sizeof(efi_char16_t) - + 1 + EFI_VARIABLE_GUID_LEN + 1; - - short_name = kzalloc(short_name_size, GFP_KERNEL); + utf8_name_size = ucs2_utf8size(variable_name); + short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; + short_name = kmalloc(short_name_size, GFP_KERNEL); if (!short_name) return -ENOMEM; - /* Convert Unicode to normal chars (assume top bits are 0), - ala UTF-8 */ - for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) { - short_name[i] = variable_name[i] & 0xFF; - } + ucs2_as_utf8(short_name, variable_name, short_name_size); + /* This is ugly, but necessary to separate one vendor's private variables from another's. */ - - *(short_name + strlen(short_name)) = '-'; + short_name[utf8_name_size] = '-'; efi_guid_to_str(&new_var->var.VendorGuid, - short_name + strlen(short_name)); + short_name + utf8_name_size + 1); new_var->kobj.kset = efivars_kset; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 70a0fb10517f..7f2ea21c730d 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, } struct variable_validate { + efi_guid_t vendor; char *name; bool (*validate)(efi_char16_t *var_name, int match, u8 *data, unsigned long len); }; +/* + * This is the list of variables we need to validate, as well as the + * whitelist for what we think is safe not to default to immutable. + * + * If it has a validate() method that's not NULL, it'll go into the + * validation routine. If not, it is assumed valid, but still used for + * whitelisting. + * + * Note that it's sorted by {vendor,name}, but globbed names must come after + * any other name with the same prefix. + */ static const struct variable_validate variable_validate[] = { - { "BootNext", validate_uint16 }, - { "BootOrder", validate_boot_order }, - { "DriverOrder", validate_boot_order }, - { "Boot*", validate_load_option }, - { "Driver*", validate_load_option }, - { "ConIn", validate_device_path }, - { "ConInDev", validate_device_path }, - { "ConOut", validate_device_path }, - { "ConOutDev", validate_device_path }, - { "ErrOut", validate_device_path }, - { "ErrOutDev", validate_device_path }, - { "Timeout", validate_uint16 }, - { "Lang", validate_ascii_string }, - { "PlatformLang", validate_ascii_string }, - { "", NULL }, + { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, + { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, + { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, + { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, + { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, + { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, + { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, + { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, + { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, + { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, + { LINUX_EFI_CRASH_GUID, "*", NULL }, + { NULL_GUID, "", NULL }, }; +static bool +variable_matches(const char *var_name, size_t len, const char *match_name, + int *match) +{ + for (*match = 0; ; (*match)++) { + char c = match_name[*match]; + char u = var_name[*match]; + + /* Wildcard in the matching name means we've matched */ + if (c == '*') + return true; + + /* Case sensitive match */ + if (!c && *match == len) + return true; + + if (c != u) + return false; + + if (!c) + return true; + } + return true; +} + bool -efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) +efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, + unsigned long data_size) { int i; - u16 *unicode_name = var_name; + unsigned long utf8_size; + u8 *utf8_name; - for (i = 0; variable_validate[i].validate != NULL; i++) { + utf8_size = ucs2_utf8size(var_name); + utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); + if (!utf8_name) + return false; + + ucs2_as_utf8(utf8_name, var_name, utf8_size); + utf8_name[utf8_size] = '\0'; + + for (i = 0; variable_validate[i].name[0] != '\0'; i++) { const char *name = variable_validate[i].name; - int match; + int match = 0; - for (match = 0; ; match++) { - char c = name[match]; - u16 u = unicode_name[match]; + if (efi_guidcmp(vendor, variable_validate[i].vendor)) + continue; - /* All special variables are plain ascii */ - if (u > 127) - return true; - - /* Wildcard in the matching name means we've matched */ - if (c == '*') - return variable_validate[i].validate(var_name, - match, data, len); - - /* Case sensitive match */ - if (c != u) + if (variable_matches(utf8_name, utf8_size+1, name, &match)) { + if (variable_validate[i].validate == NULL) break; - - /* Reached the end of the string while matching */ - if (!c) - return variable_validate[i].validate(var_name, - match, data, len); + kfree(utf8_name); + return variable_validate[i].validate(var_name, match, + data, data_size); } } - + kfree(utf8_name); return true; } EXPORT_SYMBOL_GPL(efivar_validate); +bool +efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, + size_t len) +{ + int i; + bool found = false; + int match = 0; + + /* + * Check if our variable is in the validated variables list + */ + for (i = 0; variable_validate[i].name[0] != '\0'; i++) { + if (efi_guidcmp(variable_validate[i].vendor, vendor)) + continue; + + if (variable_matches(var_name, len, + variable_validate[i].name, &match)) { + found = true; + break; + } + } + + /* + * If it's in our list, it is removable. + */ + return found; +} +EXPORT_SYMBOL_GPL(efivar_variable_is_removable); + static efi_status_t check_var_size(u32 attributes, unsigned long size) { @@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, *set = false; - if (efivar_validate(name, data, *size) == false) + if (efivar_validate(*vendor, name, data, *size) == false) return -EINVAL; /* diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 2aeaebd1c6e7..3f87a03abc22 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev) handle_simple_irq, IRQ_TYPE_NONE); if (ret) { - dev_info(&pdev->dev, "could not add irqchip\n"); - return ret; + dev_err(&pdev->dev, "could not add irqchip\n"); + goto teardown; } gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc, @@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev) skip_irq: return 0; teardown: + of_mm_gpiochip_remove(&altera_gc->mmchip); pr_err("%s: registration failed with status %d\n", node->full_name, ret); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index ec58f4288649..cd007a67b302 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc, static int davinci_gpio_probe(struct platform_device *pdev) { int i, base; - unsigned ngpio; + unsigned ngpio, nbank; struct davinci_gpio_controller *chips; struct davinci_gpio_platform_data *pdata; struct davinci_gpio_regs __iomem *regs; @@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev) if (WARN_ON(ARCH_NR_GPIOS < ngpio)) ngpio = ARCH_NR_GPIOS; + nbank = DIV_ROUND_UP(ngpio, 32); chips = devm_kzalloc(dev, - ngpio * sizeof(struct davinci_gpio_controller), + nbank * sizeof(struct davinci_gpio_controller), GFP_KERNEL); if (!chips) return -ENOMEM; @@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) return irq; } - irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0, + irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0, &davinci_gpio_irq_ops, chips); if (!irq_domain) { diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index cf41440aff91..d9ab0cd1d205 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on) return 0; } +static void gpio_rcar_irq_bus_lock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct gpio_rcar_priv *p = gpiochip_get_data(gc); + + pm_runtime_get_sync(&p->pdev->dev); +} + +static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct gpio_rcar_priv *p = gpiochip_get_data(gc); + + pm_runtime_put(&p->pdev->dev); +} + + +static int gpio_rcar_irq_request_resources(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct gpio_rcar_priv *p = gpiochip_get_data(gc); + int error; + + error = pm_runtime_get_sync(&p->pdev->dev); + if (error < 0) + return error; + + return 0; +} + +static void gpio_rcar_irq_release_resources(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct gpio_rcar_priv *p = gpiochip_get_data(gc); + + pm_runtime_put(&p->pdev->dev); +} + static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) { struct gpio_rcar_priv *p = dev_id; @@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev) irq_chip->irq_unmask = gpio_rcar_irq_enable; irq_chip->irq_set_type = gpio_rcar_irq_set_type; irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; + irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock; + irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock; + irq_chip->irq_request_resources = gpio_rcar_irq_request_resources; + irq_chip->irq_release_resources = gpio_rcar_irq_release_resources; irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; ret = gpiochip_add_data(gpio_chip, p); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 8ae7ab68cb97..f2a74d0b68ae 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -25,6 +25,14 @@ config DRM_MIPI_DSI bool depends on DRM +config DRM_DP_AUX_CHARDEV + bool "DRM DP AUX Interface" + depends on DRM + help + Choose this option to enable a /dev/drm_dp_auxN node that allows to + read and write values to arbitrary DPCD registers on the DP aux + channel. + config DRM_KMS_HELPER tristate depends on DRM @@ -106,6 +114,8 @@ config DRM_TDFX Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), graphics card. If M is selected, the module will be called tdfx. +source "drivers/gpu/drm/arm/Kconfig" + config DRM_R128 tristate "ATI Rage 128" depends on DRM && PCI @@ -162,6 +172,8 @@ config DRM_AMDGPU source "drivers/gpu/drm/amd/amdgpu/Kconfig" source "drivers/gpu/drm/amd/powerplay/Kconfig" +source "drivers/gpu/drm/amd/acp/Kconfig" + source "drivers/gpu/drm/nouveau/Kconfig" config DRM_I810 diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 61766dec6a8d..6eb94fc561dc 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -22,10 +22,13 @@ drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_AGP) += drm_agpsupport.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ - drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o + drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ + drm_kms_helper_common.o + drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o +drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o @@ -33,6 +36,7 @@ CFLAGS_drm_trace_points.o := -I$(src) obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o +obj-$(CONFIG_DRM_ARM) += arm/ obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig new file mode 100644 index 000000000000..0f734ee05274 --- /dev/null +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -0,0 +1,10 @@ +menu "ACP Configuration" + +config DRM_AMD_ACP + bool "Enable ACP IP support" + select MFD_CORE + select PM_GENERIC_DOMAINS if PM + help + Choose this option to enable ACP IP support for AMD SOCs. + +endmenu diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile new file mode 100644 index 000000000000..8363cb57915b --- /dev/null +++ b/drivers/gpu/drm/amd/acp/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the ACP, which is a sub-component +# of AMDSOC/AMDGPU drm driver. +# It provides the HW control for ACP related functionalities. + +subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include + +AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/amd/acp/acp_hw.c similarity index 64% rename from drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c rename to drivers/gpu/drm/amd/acp/acp_hw.c index 341dc560acbb..7af83f142b4b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c +++ b/drivers/gpu/drm/amd/acp/acp_hw.c @@ -1,5 +1,5 @@ /* - * Copyright 2015 Red Hat Inc. + * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -19,30 +19,32 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Ben Skeggs */ -#include "gf100.h" -#include "ctxgf100.h" -#include +#include +#include +#include +#include +#include -static const struct gf100_gr_func -gm206_gr = { - .init = gm204_gr_init, - .mmio = gm204_gr_pack_mmio, - .ppc_nr = 2, - .grctx = &gm206_grctx, - .sclass = { - { -1, -1, FERMI_TWOD_A }, - { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, - { -1, -1, MAXWELL_B, &gf100_fermi }, - { -1, -1, MAXWELL_COMPUTE_B }, - {} - } -}; +#include "acp_gfx_if.h" -int -gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +#define ACP_MODE_I2S 0 +#define ACP_MODE_AZ 1 + +#define mmACP_AZALIA_I2S_SELECT 0x51d4 + +int amd_acp_hw_init(void *cgs_device, + unsigned acp_version_major, unsigned acp_version_minor) { - return gf100_gr_new_(&gm206_gr, device, index, pgr); + unsigned int acp_mode = ACP_MODE_I2S; + + if ((acp_version_major == 2) && (acp_version_minor == 2)) + acp_mode = cgs_read_register(cgs_device, + mmACP_AZALIA_I2S_SELECT); + + if (acp_mode != ACP_MODE_I2S) + return -ENODEV; + + return 0; } diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h new file mode 100644 index 000000000000..bccf47b63899 --- /dev/null +++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h @@ -0,0 +1,34 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * +*/ + +#ifndef _ACP_GFX_IF_H +#define _ACP_GFX_IF_H + +#include +#include "cgs_linux.h" +#include "cgs_common.h" + +int amd_acp_hw_init(void *cgs_device, + unsigned acp_version_major, unsigned acp_version_minor); + +#endif /* _ACP_GFX_IF_H */ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 20c9539abc36..c7fcdcedaadb 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -8,7 +8,8 @@ ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/amdgpu \ -I$(FULL_AMD_PATH)/scheduler \ - -I$(FULL_AMD_PATH)/powerplay/inc + -I$(FULL_AMD_PATH)/powerplay/inc \ + -I$(FULL_AMD_PATH)/acp/include amdgpu-y := amdgpu_drv.o @@ -20,7 +21,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ - atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \ + atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o @@ -92,7 +93,17 @@ amdgpu-y += amdgpu_cgs.o amdgpu-y += \ ../scheduler/gpu_scheduler.o \ ../scheduler/sched_fence.o \ - amdgpu_sched.o + amdgpu_job.o + +# ACP componet +ifneq ($(CONFIG_DRM_AMD_ACP),) +amdgpu-y += amdgpu_acp.o + +AMDACPPATH := ../acp +include $(FULL_AMD_PATH)/acp/Makefile + +amdgpu-y += $(AMD_ACP_FILES) +endif amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 82edf95b7740..d0489722fc7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -53,6 +53,7 @@ #include "amdgpu_ucode.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" +#include "amdgpu_acp.h" #include "gpu_scheduler.h" @@ -74,7 +75,6 @@ extern int amdgpu_dpm; extern int amdgpu_smc_load_fw; extern int amdgpu_aspm; extern int amdgpu_runtime_pm; -extern int amdgpu_hard_reset; extern unsigned amdgpu_ip_block_mask; extern int amdgpu_bapm; extern int amdgpu_deep_color; @@ -82,11 +82,11 @@ extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; -extern int amdgpu_enable_scheduler; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; -extern int amdgpu_enable_semaphores; extern int amdgpu_powerplay; +extern unsigned amdgpu_pcie_gen_cap; +extern unsigned amdgpu_pcie_lane_cap; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -106,9 +106,6 @@ extern int amdgpu_powerplay; /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 2 -/* number of hw syncs before falling back on blocking */ -#define AMDGPU_NUM_SYNCS 4 - /* hardcode that limit for now */ #define AMDGPU_VA_RESERVED_SIZE (8 << 20) @@ -132,47 +129,6 @@ extern int amdgpu_powerplay; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) -/* CG block flags */ -#define AMDGPU_CG_BLOCK_GFX (1 << 0) -#define AMDGPU_CG_BLOCK_MC (1 << 1) -#define AMDGPU_CG_BLOCK_SDMA (1 << 2) -#define AMDGPU_CG_BLOCK_UVD (1 << 3) -#define AMDGPU_CG_BLOCK_VCE (1 << 4) -#define AMDGPU_CG_BLOCK_HDP (1 << 5) -#define AMDGPU_CG_BLOCK_BIF (1 << 6) - -/* CG flags */ -#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0) -#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1) -#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2) -#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3) -#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4) -#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5) -#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6) -#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7) -#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8) -#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9) -#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10) -#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11) -#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12) -#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13) -#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14) -#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15) -#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16) - -/* PG flags */ -#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0) -#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1) -#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2) -#define AMDGPU_PG_SUPPORT_UVD (1 << 3) -#define AMDGPU_PG_SUPPORT_VCE (1 << 4) -#define AMDGPU_PG_SUPPORT_CP (1 << 5) -#define AMDGPU_PG_SUPPORT_GDS (1 << 6) -#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7) -#define AMDGPU_PG_SUPPORT_SDMA (1 << 8) -#define AMDGPU_PG_SUPPORT_ACP (1 << 9) -#define AMDGPU_PG_SUPPORT_SAMU (1 << 10) - /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L #define AMDGPU_GFX_SAFE_MODE 0x00000001L @@ -189,7 +145,6 @@ struct amdgpu_fence; struct amdgpu_ib; struct amdgpu_vm; struct amdgpu_ring; -struct amdgpu_semaphore; struct amdgpu_cs_parser; struct amdgpu_job; struct amdgpu_irq_src; @@ -287,7 +242,7 @@ struct amdgpu_vm_pte_funcs { unsigned count); /* write pte one entry at a time with addr mapping */ void (*write_pte)(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); /* for linear pte/pde updates without addr mapping */ @@ -295,8 +250,6 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); - /* pad the indirect buffer to the necessary number of dw */ - void (*pad_ib)(struct amdgpu_ib *ib); }; /* provided by the gmc block */ @@ -334,9 +287,6 @@ struct amdgpu_ring_funcs { struct amdgpu_ib *ib); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); - bool (*emit_semaphore)(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait); void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr); void (*emit_hdp_flush)(struct amdgpu_ring *ring); @@ -349,6 +299,8 @@ struct amdgpu_ring_funcs { int (*test_ib)(struct amdgpu_ring *ring); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); + /* pad the indirect buffer to the necessary number of dw */ + void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); }; /* @@ -394,7 +346,7 @@ struct amdgpu_fence_driver { uint64_t gpu_addr; volatile uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ - uint64_t sync_seq[AMDGPU_MAX_RINGS]; + uint64_t sync_seq; atomic64_t last_seq; bool initialized; struct amdgpu_irq_src *irq_src; @@ -447,11 +399,6 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); -bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *ring); -void amdgpu_fence_note_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *ring); - /* * TTM. */ @@ -470,6 +417,8 @@ struct amdgpu_mman { /* buffer handling */ const struct amdgpu_buffer_funcs *buffer_funcs; struct amdgpu_ring *buffer_funcs_ring; + /* Scheduler entity for buffer moves */ + struct amd_sched_entity entity; }; int amdgpu_copy_buffer(struct amdgpu_ring *ring, @@ -484,8 +433,6 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; - unsigned prefered_domains; - unsigned allowed_domains; uint32_t priority; }; @@ -522,7 +469,8 @@ struct amdgpu_bo { /* Protected by gem.mutex */ struct list_head list; /* Protected by tbo.reserved */ - u32 initial_domain; + u32 prefered_domains; + u32 allowed_domains; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; struct ttm_placement placement; struct ttm_buffer_object tbo; @@ -544,7 +492,6 @@ struct amdgpu_bo { struct amdgpu_bo *parent; struct ttm_bo_kmap_obj dma_buf_vmap; - pid_t pid; struct amdgpu_mn *mn; struct list_head mn_list; }; @@ -606,8 +553,6 @@ struct amdgpu_sa_manager { uint32_t align; }; -struct amdgpu_sa_bo; - /* sub-allocation buffer */ struct amdgpu_sa_bo { struct list_head olist; @@ -621,13 +566,7 @@ struct amdgpu_sa_bo { /* * GEM objects. */ -struct amdgpu_gem { - struct mutex mutex; - struct list_head objects; -}; - -int amdgpu_gem_init(struct amdgpu_device *adev); -void amdgpu_gem_fini(struct amdgpu_device *adev); +void amdgpu_gem_force_release(struct amdgpu_device *adev); int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, bool kernel, @@ -639,32 +578,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, int amdgpu_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); - -/* - * Semaphores. - */ -struct amdgpu_semaphore { - struct amdgpu_sa_bo *sa_bo; - signed waiters; - uint64_t gpu_addr; -}; - -int amdgpu_semaphore_create(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore); -bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore); -bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore); -void amdgpu_semaphore_free(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore, - struct fence *fence); - /* * Synchronization */ struct amdgpu_sync { - struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; - struct fence *sync_to[AMDGPU_MAX_RINGS]; DECLARE_HASHTABLE(fences, 4); struct fence *last_vm_update; }; @@ -676,12 +593,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -int amdgpu_sync_rings(struct amdgpu_sync *sync, - struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); int amdgpu_sync_wait(struct amdgpu_sync *sync); -void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct fence *fence); +void amdgpu_sync_free(struct amdgpu_sync *sync); /* * GART structures, functions & helpers @@ -799,6 +713,7 @@ struct amdgpu_flip_work { struct fence *excl; unsigned shared_count; struct fence **shared; + struct fence_cb cb; }; @@ -811,12 +726,12 @@ struct amdgpu_ib { uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; - struct amdgpu_ring *ring; struct amdgpu_fence *fence; struct amdgpu_user_fence *user; struct amdgpu_vm *vm; + unsigned vm_id; + uint64_t vm_pd_addr; struct amdgpu_ctx *ctx; - struct amdgpu_sync sync; uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; uint32_t oa_base, oa_size; @@ -835,13 +750,14 @@ enum amdgpu_ring_type { extern struct amd_sched_backend_ops amdgpu_sched_ops; -int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_ib *ibs, - unsigned num_ibs, - int (*free_job)(struct amdgpu_job *), - void *owner, - struct fence **fence); +int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, + struct amdgpu_job **job); +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, + struct amdgpu_job **job); +void amdgpu_job_free(struct amdgpu_job *job); +int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, + struct amd_sched_entity *entity, void *owner, + struct fence **f); struct amdgpu_ring { struct amdgpu_device *adev; @@ -850,7 +766,6 @@ struct amdgpu_ring { struct amd_gpu_scheduler sched; spinlock_t fence_lock; - struct mutex *ring_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; @@ -859,7 +774,7 @@ struct amdgpu_ring { unsigned wptr; unsigned wptr_old; unsigned ring_size; - unsigned ring_free_dw; + unsigned max_dw; int count_dw; uint64_t gpu_addr; uint32_t align_mask; @@ -867,8 +782,6 @@ struct amdgpu_ring { bool ready; u32 nop; u32 idx; - u64 last_semaphore_signal_addr; - u64 last_semaphore_wait_addr; u32 me; u32 pipe; u32 queue; @@ -881,7 +794,6 @@ struct amdgpu_ring { struct amdgpu_ctx *current_ctx; enum amdgpu_ring_type type; char name[16]; - bool is_pte_ring; }; /* @@ -925,13 +837,15 @@ struct amdgpu_vm_pt { }; struct amdgpu_vm_id { - unsigned id; - uint64_t pd_gpu_addr; + struct amdgpu_vm_manager_id *mgr_id; + uint64_t pd_gpu_addr; /* last flushed PD/PT update */ - struct fence *flushed_updates; + struct fence *flushed_updates; }; struct amdgpu_vm { + /* tree of virtual addresses mapped */ + spinlock_t it_lock; struct rb_root va; /* protecting invalidated */ @@ -956,30 +870,40 @@ struct amdgpu_vm { /* for id and flush management per ring */ struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; - /* for interval tree */ - spinlock_t it_lock; + /* protecting freed */ spinlock_t freed_lock; + + /* Scheduler entity for page table updates */ + struct amd_sched_entity entity; +}; + +struct amdgpu_vm_manager_id { + struct list_head list; + struct fence *active; + atomic_long_t owner; }; struct amdgpu_vm_manager { - struct { - struct fence *active; - atomic_long_t owner; - } ids[AMDGPU_NUM_VM]; + /* Handling of VMIDs */ + struct mutex lock; + unsigned num_ids; + struct list_head ids_lru; + struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; uint32_t max_pfn; - /* number of VMIDs */ - unsigned nvm; /* vram base address for page table entry */ u64 vram_base_offset; /* is vm enabled? */ bool enabled; /* vm pte handling */ const struct amdgpu_vm_pte_funcs *vm_pte_funcs; - struct amdgpu_ring *vm_pte_funcs_ring; + struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; + unsigned vm_pte_num_rings; + atomic_t vm_pte_next_ring; }; +void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); @@ -990,14 +914,12 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync); + struct amdgpu_sync *sync, struct fence *fence, + unsigned *vm_id, uint64_t *vm_pd_addr); void amdgpu_vm_flush(struct amdgpu_ring *ring, - struct amdgpu_vm *vm, - struct fence *updates); -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence); -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); + unsigned vmid, + uint64_t pd_addr); +uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -1023,7 +945,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, uint64_t addr); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -int amdgpu_vm_free_job(struct amdgpu_job *job); /* * context related structures @@ -1051,10 +972,6 @@ struct amdgpu_ctx_mgr { struct idr ctx_handles; }; -int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, - struct amdgpu_ctx *ctx); -void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); - struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); @@ -1096,6 +1013,8 @@ struct amdgpu_bo_list { struct amdgpu_bo_list * amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); +void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, + struct list_head *validated); void amdgpu_bo_list_put(struct amdgpu_bo_list *list); void amdgpu_bo_list_free(struct amdgpu_bo_list *list); @@ -1169,6 +1088,7 @@ struct amdgpu_gca_config { unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; unsigned gb_addr_config; + unsigned num_rbs; uint32_t tile_mode_array[32]; uint32_t macrotile_mode_array[16]; @@ -1211,23 +1131,21 @@ struct amdgpu_gfx { unsigned ce_ram_size; }; -int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, +int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); -int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_ib *ib, void *owner); +int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, + struct amdgpu_ib *ib, void *owner, + struct fence *last_vm_update, + struct fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); -/* Ring access between begin & end cannot sleep */ -void amdgpu_ring_free_size(struct amdgpu_ring *ring); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); -int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); +void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); -void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, uint32_t **data); int amdgpu_ring_restore(struct amdgpu_ring *ring, @@ -1246,47 +1164,57 @@ struct amdgpu_cs_chunk { uint32_t chunk_id; uint32_t length_dw; uint32_t *kdata; - void __user *user_ptr; }; struct amdgpu_cs_parser { struct amdgpu_device *adev; struct drm_file *filp; struct amdgpu_ctx *ctx; - struct amdgpu_bo_list *bo_list; + /* chunks */ unsigned nchunks; struct amdgpu_cs_chunk *chunks; - /* relocations */ + + /* scheduler job object */ + struct amdgpu_job *job; + + /* buffer objects */ + struct ww_acquire_ctx ticket; + struct amdgpu_bo_list *bo_list; struct amdgpu_bo_list_entry vm_pd; - struct list_head validated; - struct fence *fence; - - struct amdgpu_ib *ibs; - uint32_t num_ibs; - - struct ww_acquire_ctx ticket; + struct list_head validated; + struct fence *fence; + uint64_t bytes_moved_threshold; + uint64_t bytes_moved; /* user fence */ - struct amdgpu_user_fence uf; struct amdgpu_bo_list_entry uf_entry; }; struct amdgpu_job { struct amd_sched_job base; struct amdgpu_device *adev; + struct amdgpu_ring *ring; + struct amdgpu_sync sync; struct amdgpu_ib *ibs; uint32_t num_ibs; void *owner; struct amdgpu_user_fence uf; - int (*free_job)(struct amdgpu_job *job); }; #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) -static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) +static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, + uint32_t ib_idx, int idx) { - return p->ibs[ib_idx].ptr[idx]; + return p->job->ibs[ib_idx].ptr[idx]; +} + +static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, + uint32_t ib_idx, int idx, + uint32_t value) +{ + p->job->ibs[ib_idx].ptr[idx] = value; } /* @@ -1538,6 +1466,7 @@ enum amdgpu_dpm_forced_level { AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, AMDGPU_DPM_FORCED_LEVEL_LOW = 1, AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, + AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, }; struct amdgpu_vce_state { @@ -1667,6 +1596,7 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + struct amd_sched_entity entity; }; /* @@ -1691,6 +1621,7 @@ struct amdgpu_vce { struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; unsigned harvest_config; + struct amd_sched_entity entity; }; /* @@ -1924,6 +1855,18 @@ void *amdgpu_cgs_create_device(struct amdgpu_device *adev); void amdgpu_cgs_destroy_device(void *cgs_device); +/* + * CGS + */ +void *amdgpu_cgs_create_device(struct amdgpu_device *adev); +void amdgpu_cgs_destroy_device(void *cgs_device); + + +/* GPU virtualization */ +struct amdgpu_virtualization { + bool supports_sr_iov; +}; + /* * Core structure, functions and helpers. */ @@ -1944,6 +1887,10 @@ struct amdgpu_device { struct drm_device *ddev; struct pci_dev *pdev; +#ifdef CONFIG_DRM_AMD_ACP + struct amdgpu_acp acp; +#endif + /* ASIC */ enum amd_asic_type asic_type; uint32_t family; @@ -2020,7 +1967,6 @@ struct amdgpu_device { /* memory management */ struct amdgpu_mman mman; - struct amdgpu_gem gem; struct amdgpu_vram_scratch vram_scratch; struct amdgpu_wb wb; atomic64_t vram_usage; @@ -2038,7 +1984,6 @@ struct amdgpu_device { /* rings */ unsigned fence_context; - struct mutex ring_lock; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; @@ -2050,6 +1995,7 @@ struct amdgpu_device { /* powerplay */ struct amd_powerplay powerplay; bool pp_enabled; + bool pp_force_state_enabled; /* dpm */ struct amdgpu_pm pm; @@ -2091,8 +2037,7 @@ struct amdgpu_device { /* amdkfd interface */ struct kfd_dev *kfd; - /* kernel conext for IB submission */ - struct amdgpu_ctx kernel_ctx; + struct amdgpu_virtualization virtualization; }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2197,7 +2142,6 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) ring->ring[ring->wptr++] = v; ring->wptr &= ring->ptr_mask; ring->count_dw--; - ring->ring_free_dw--; } static inline struct amdgpu_sdma_instance * @@ -2233,9 +2177,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) -#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) @@ -2245,9 +2188,9 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) -#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) +#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) @@ -2339,6 +2282,21 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_get_performance_level(adev) \ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) +#define amdgpu_dpm_get_pp_num_states(adev, data) \ + (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) + +#define amdgpu_dpm_get_pp_table(adev, table) \ + (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) + +#define amdgpu_dpm_set_pp_table(adev, buf, size) \ + (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) + +#define amdgpu_dpm_print_clock_levels(adev, type, buf) \ + (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) + +#define amdgpu_dpm_force_clock_level(adev, type, level) \ + (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2349,7 +2307,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); -bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, @@ -2360,6 +2317,9 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); +struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c new file mode 100644 index 000000000000..d6b0bff510aa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -0,0 +1,500 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include +#include +#include +#include +#include + +#include "amdgpu.h" +#include "atom.h" +#include "amdgpu_acp.h" + +#include "acp_gfx_if.h" + +#define ACP_TILE_ON_MASK 0x03 +#define ACP_TILE_OFF_MASK 0x02 +#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f +#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 + +#define ACP_TILE_P1_MASK 0x3e +#define ACP_TILE_P2_MASK 0x3d +#define ACP_TILE_DSP0_MASK 0x3b +#define ACP_TILE_DSP1_MASK 0x37 + +#define ACP_TILE_DSP2_MASK 0x2f + +#define ACP_DMA_REGS_END 0x146c0 +#define ACP_I2S_PLAY_REGS_START 0x14840 +#define ACP_I2S_PLAY_REGS_END 0x148b4 +#define ACP_I2S_CAP_REGS_START 0x148b8 +#define ACP_I2S_CAP_REGS_END 0x1496c + +#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac +#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 +#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c +#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 + +#define mmACP_PGFSM_RETAIN_REG 0x51c9 +#define mmACP_PGFSM_CONFIG_REG 0x51ca +#define mmACP_PGFSM_READ_REG_0 0x51cc + +#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 +#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 +#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa +#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb + +#define ACP_TIMEOUT_LOOP 0x000000FF +#define ACP_DEVS 3 +#define ACP_SRC_ID 162 + +enum { + ACP_TILE_P1 = 0, + ACP_TILE_P2, + ACP_TILE_DSP0, + ACP_TILE_DSP1, + ACP_TILE_DSP2, +}; + +static int acp_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->acp.parent = adev->dev; + + adev->acp.cgs_device = + amdgpu_cgs_create_device(adev); + if (!adev->acp.cgs_device) + return -EINVAL; + + return 0; +} + +static int acp_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->acp.cgs_device) + amdgpu_cgs_destroy_device(adev->acp.cgs_device); + + return 0; +} + +/* power off a tile/block within ACP */ +static int acp_suspend_tile(void *cgs_dev, int tile) +{ + u32 val = 0; + u32 count = 0; + + if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { + pr_err("Invalid ACP tile : %d to suspend\n", tile); + return -1; + } + + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); + val &= ACP_TILE_ON_MASK; + + if (val == 0x0) { + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); + val = val | (1 << tile); + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); + cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, + 0x500 + tile); + + count = ACP_TIMEOUT_LOOP; + while (true) { + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + + tile); + val = val & ACP_TILE_ON_MASK; + if (val == ACP_TILE_OFF_MASK) + break; + if (--count == 0) { + pr_err("Timeout reading ACP PGFSM status\n"); + return -ETIMEDOUT; + } + udelay(100); + } + + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); + + val |= ACP_TILE_OFF_RETAIN_REG_MASK; + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); + } + return 0; +} + +/* power on a tile/block within ACP */ +static int acp_resume_tile(void *cgs_dev, int tile) +{ + u32 val = 0; + u32 count = 0; + + if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { + pr_err("Invalid ACP tile to resume\n"); + return -1; + } + + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); + val = val & ACP_TILE_ON_MASK; + + if (val != 0x0) { + cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, + 0x600 + tile); + count = ACP_TIMEOUT_LOOP; + while (true) { + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + + tile); + val = val & ACP_TILE_ON_MASK; + if (val == 0x0) + break; + if (--count == 0) { + pr_err("Timeout reading ACP PGFSM status\n"); + return -ETIMEDOUT; + } + udelay(100); + } + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); + if (tile == ACP_TILE_P1) + val = val & (ACP_TILE_P1_MASK); + else if (tile == ACP_TILE_P2) + val = val & (ACP_TILE_P2_MASK); + + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); + } + return 0; +} + +struct acp_pm_domain { + void *cgs_dev; + struct generic_pm_domain gpd; +}; + +static int acp_poweroff(struct generic_pm_domain *genpd) +{ + int i, ret; + struct acp_pm_domain *apd; + + apd = container_of(genpd, struct acp_pm_domain, gpd); + if (apd != NULL) { + /* Donot return abruptly if any of power tile fails to suspend. + * Log it and continue powering off other tile + */ + for (i = 4; i >= 0 ; i--) { + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); + if (ret) + pr_err("ACP tile %d tile suspend failed\n", i); + } + } + return 0; +} + +static int acp_poweron(struct generic_pm_domain *genpd) +{ + int i, ret; + struct acp_pm_domain *apd; + + apd = container_of(genpd, struct acp_pm_domain, gpd); + if (apd != NULL) { + for (i = 0; i < 2; i++) { + ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i); + if (ret) { + pr_err("ACP tile %d resume failed\n", i); + break; + } + } + + /* Disable DSPs which are not going to be used */ + for (i = 0; i < 3; i++) { + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i); + /* Continue suspending other DSP, even if one fails */ + if (ret) + pr_err("ACP DSP %d suspend failed\n", i); + } + } + return 0; +} + +static struct device *get_mfd_cell_dev(const char *device_name, int r) +{ + char auto_dev_name[25]; + struct device *dev; + + snprintf(auto_dev_name, sizeof(auto_dev_name), + "%s.%d.auto", device_name, r); + dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); + dev_info(dev, "device %s added to pm domain\n", auto_dev_name); + + return dev; +} + +/** + * acp_hw_init - start and test ACP block + * + * @adev: amdgpu_device pointer + * + */ +static int acp_hw_init(void *handle) +{ + int r, i; + uint64_t acp_base; + struct device *dev; + struct i2s_platform_data *i2s_pdata; + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + const struct amdgpu_ip_block_version *ip_version = + amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); + + if (!ip_version) + return -EINVAL; + + r = amd_acp_hw_init(adev->acp.cgs_device, + ip_version->major, ip_version->minor); + /* -ENODEV means board uses AZ rather than ACP */ + if (r == -ENODEV) + return 0; + else if (r) + return r; + + r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO, + 0x5289, 0, &acp_base); + if (r == -ENODEV) + return 0; + else if (r) + return r; + + adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); + if (adev->acp.acp_genpd == NULL) + return -ENOMEM; + + adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; + adev->acp.acp_genpd->gpd.power_off = acp_poweroff; + adev->acp.acp_genpd->gpd.power_on = acp_poweron; + + + adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; + + pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + + adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, + GFP_KERNEL); + + if (adev->acp.acp_cell == NULL) + return -ENOMEM; + + adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL); + + if (adev->acp.acp_res == NULL) { + kfree(adev->acp.acp_cell); + return -ENOMEM; + } + + i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL); + if (i2s_pdata == NULL) { + kfree(adev->acp.acp_res); + kfree(adev->acp.acp_cell); + return -ENOMEM; + } + + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + i2s_pdata[0].cap = DWC_I2S_PLAY; + i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; + i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; + i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; + + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1; + i2s_pdata[1].cap = DWC_I2S_RECORD; + i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; + i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; + i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; + + adev->acp.acp_res[0].name = "acp2x_dma"; + adev->acp.acp_res[0].flags = IORESOURCE_MEM; + adev->acp.acp_res[0].start = acp_base; + adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END; + + adev->acp.acp_res[1].name = "acp2x_dw_i2s_play"; + adev->acp.acp_res[1].flags = IORESOURCE_MEM; + adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; + adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END; + + adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap"; + adev->acp.acp_res[2].flags = IORESOURCE_MEM; + adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; + adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; + + adev->acp.acp_res[3].name = "acp2x_dma_irq"; + adev->acp.acp_res[3].flags = IORESOURCE_IRQ; + adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); + adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; + + adev->acp.acp_cell[0].name = "acp_audio_dma"; + adev->acp.acp_cell[0].num_resources = 4; + adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; + + adev->acp.acp_cell[1].name = "designware-i2s"; + adev->acp.acp_cell[1].num_resources = 1; + adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; + adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; + adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); + + adev->acp.acp_cell[2].name = "designware-i2s"; + adev->acp.acp_cell[2].num_resources = 1; + adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; + adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; + adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); + + r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, + ACP_DEVS); + if (r) + return r; + + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); + if (r) { + dev_err(dev, "Failed to add dev to genpd\n"); + return r; + } + } + + return 0; +} + +/** + * acp_hw_fini - stop the hardware block + * + * @adev: amdgpu_device pointer + * + */ +static int acp_hw_fini(void *handle) +{ + int i, ret; + struct device *dev; + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); + /* If removal fails, dont giveup and try rest */ + if (ret) + dev_err(dev, "remove dev from genpd failed\n"); + } + + mfd_remove_devices(adev->acp.parent); + kfree(adev->acp.acp_res); + kfree(adev->acp.acp_genpd); + kfree(adev->acp.acp_cell); + + return 0; +} + +static int acp_suspend(void *handle) +{ + return 0; +} + +static int acp_resume(void *handle) +{ + int i, ret; + struct acp_pm_domain *apd; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* SMU block will power on ACP irrespective of ACP runtime status. + * Power off explicitly based on genpd ACP runtime status so that ACP + * hw and ACP-genpd status are in sync. + * 'suspend_power_off' represents "Power status before system suspend" + */ + if (adev->acp.acp_genpd->gpd.suspend_power_off == true) { + apd = container_of(&adev->acp.acp_genpd->gpd, + struct acp_pm_domain, gpd); + + for (i = 4; i >= 0 ; i--) { + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); + if (ret) + pr_err("ACP tile %d tile suspend failed\n", i); + } + } + return 0; +} + +static int acp_early_init(void *handle) +{ + return 0; +} + +static bool acp_is_idle(void *handle) +{ + return true; +} + +static int acp_wait_for_idle(void *handle) +{ + return 0; +} + +static int acp_soft_reset(void *handle) +{ + return 0; +} + +static void acp_print_status(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + dev_info(adev->dev, "ACP STATUS\n"); +} + +static int acp_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int acp_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs acp_ip_funcs = { + .early_init = acp_early_init, + .late_init = NULL, + .sw_init = acp_sw_init, + .sw_fini = acp_sw_fini, + .hw_init = acp_hw_init, + .hw_fini = acp_hw_fini, + .suspend = acp_suspend, + .resume = acp_resume, + .is_idle = acp_is_idle, + .wait_for_idle = acp_wait_for_idle, + .soft_reset = acp_soft_reset, + .print_status = acp_print_status, + .set_clockgating_state = acp_set_clockgating_state, + .set_powergating_state = acp_set_powergating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h new file mode 100644 index 000000000000..f6e32a639107 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h @@ -0,0 +1,42 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_ACP_H__ +#define __AMDGPU_ACP_H__ + +#include + +struct amdgpu_acp { + struct device *parent; + void *cgs_device; + struct amd_acp_private *private; + struct mfd_cell *acp_cell; + struct resource *acp_res; + struct acp_pm_domain *acp_genpd; +}; + +extern const struct amd_ip_funcs acp_ip_funcs; + +#endif /* __AMDGPU_ACP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 84d68d658f8a..32809f749903 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -30,25 +30,38 @@ const struct kfd2kgd_calls *kfd2kgd; const struct kgd2kfd_calls *kgd2kfd; bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); -bool amdgpu_amdkfd_init(void) +int amdgpu_amdkfd_init(void) { + int ret; + #if defined(CONFIG_HSA_AMD_MODULE) - bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); + int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); kgd2kfd_init_p = symbol_request(kgd2kfd_init); if (kgd2kfd_init_p == NULL) - return false; + return -ENOENT; + + ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd); + if (ret) { + symbol_put(kgd2kfd_init); + kgd2kfd = NULL; + } + +#elif defined(CONFIG_HSA_AMD) + ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); + if (ret) + kgd2kfd = NULL; + +#else + ret = -ENOENT; #endif - return true; + + return ret; } bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev) { -#if defined(CONFIG_HSA_AMD_MODULE) - bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); -#endif - switch (rdev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: @@ -62,35 +75,7 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev) return false; } -#if defined(CONFIG_HSA_AMD_MODULE) - kgd2kfd_init_p = symbol_request(kgd2kfd_init); - - if (kgd2kfd_init_p == NULL) { - kfd2kgd = NULL; - return false; - } - - if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) { - symbol_put(kgd2kfd_init); - kfd2kgd = NULL; - kgd2kfd = NULL; - - return false; - } - return true; -#elif defined(CONFIG_HSA_AMD) - if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) { - kfd2kgd = NULL; - kgd2kfd = NULL; - return false; - } - - return true; -#else - kfd2kgd = NULL; - return false; -#endif } void amdgpu_amdkfd_fini(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index a8be765542e6..de530f68d4e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -36,7 +36,7 @@ struct kgd_mem { void *cpu_ptr; }; -bool amdgpu_amdkfd_init(void); +int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9416e0f5c1db..84b0ce39ee14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1514,6 +1514,19 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, return -EINVAL; } +bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev) +{ + int index = GetIndexIntoMasterTable(DATA, GPUVirtualizationInfo); + u8 frev, crev; + u16 data_offset, size; + + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) + return true; + + return false; +} + void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) { uint32_t bios_6_scratch; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 0ebb959ea435..9e1442053fe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -196,6 +196,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, u8 module_index, struct atom_mc_reg_table *reg_table); +bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); + void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f82a2dd83874..4792f9d0b7d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -32,6 +32,9 @@ #include "amdgpu.h" #include "amdgpu_trace.h" +#define AMDGPU_BO_LIST_MAX_PRIORITY 32u +#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) + static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, struct amdgpu_bo_list **result, int *id) @@ -90,6 +93,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, bool has_userptr = false; unsigned i; + int r; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) @@ -99,31 +103,35 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry = &array[i]; struct drm_gem_object *gobj; + struct mm_struct *usermm; gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); - if (!gobj) + if (!gobj) { + r = -ENOENT; goto error_free; + } entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_unreference_unlocked(gobj); - entry->priority = info[i].bo_priority; - entry->prefered_domains = entry->robj->initial_domain; - entry->allowed_domains = entry->prefered_domains; - if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) - entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; - if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { + entry->priority = min(info[i].bo_priority, + AMDGPU_BO_LIST_MAX_PRIORITY); + usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm); + if (usermm) { + if (usermm != current->mm) { + amdgpu_bo_unref(&entry->robj); + r = -EPERM; + goto error_free; + } has_userptr = true; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; } entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; trace_amdgpu_bo_list_set(list, entry->robj); @@ -144,8 +152,10 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, return 0; error_free: + while (i--) + amdgpu_bo_unref(&array[i].robj); drm_free_large(array); - return -ENOENT; + return r; } struct amdgpu_bo_list * @@ -161,6 +171,36 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) return result; } +void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, + struct list_head *validated) +{ + /* This is based on the bucket sort with O(n) time complexity. + * An item with priority "i" is added to bucket[i]. The lists are then + * concatenated in descending order. + */ + struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; + unsigned i; + + for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) + INIT_LIST_HEAD(&bucket[i]); + + /* Since buffers which appear sooner in the relocation list are + * likely to be used more often than buffers which appear later + * in the list, the sort mustn't change the ordering of buffers + * with the same priority, i.e. it must be stable. + */ + for (i = 0; i < list->num_entries; i++) { + unsigned priority = list->array[i].priority; + + list_add_tail(&list->array[i].tv.head, + &bucket[priority]); + } + + /* Connect the sorted buckets in the output list. */ + for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) + list_splice(&bucket[i], validated); +} + void amdgpu_bo_list_put(struct amdgpu_bo_list *list) { mutex_unlock(&list->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a081dda9fa2f..7a4b101e10c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device, case CGS_SYSTEM_INFO_PCIE_MLW: sys_info->value = adev->pm.pcie_mlw_mask; break; + case CGS_SYSTEM_INFO_CG_FLAGS: + sys_info->value = adev->cg_flags; + break; + case CGS_SYSTEM_INFO_PG_FLAGS: + sys_info->value = adev->pg_flags; + break; default: return -ENODEV; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 89c3dd62ba21..119cdc2c43e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { /* Don't try to start link training before we * have the dpcd */ - if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) + if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) return; /* set it to OFF so that drm_helper_connector_dpms() diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b882e8175615..52c3eb96b199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -30,47 +30,6 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -#define AMDGPU_CS_MAX_PRIORITY 32u -#define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1) - -/* This is based on the bucket sort with O(n) time complexity. - * An item with priority "i" is added to bucket[i]. The lists are then - * concatenated in descending order. - */ -struct amdgpu_cs_buckets { - struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; -}; - -static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b) -{ - unsigned i; - - for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) - INIT_LIST_HEAD(&b->bucket[i]); -} - -static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b, - struct list_head *item, unsigned priority) -{ - /* Since buffers which appear sooner in the relocation list are - * likely to be used more often than buffers which appear later - * in the list, the sort mustn't change the ordering of buffers - * with the same priority, i.e. it must be stable. - */ - list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); -} - -static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b, - struct list_head *out_list) -{ - unsigned i; - - /* Connect the sorted buckets in the output list. */ - for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) { - list_splice(&b->bucket[i], out_list); - } -} - int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, struct amdgpu_ring **out_ring) @@ -128,6 +87,7 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, + struct amdgpu_user_fence *uf, struct drm_amdgpu_cs_chunk_fence *fence_data) { struct drm_gem_object *gobj; @@ -139,17 +99,15 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, if (gobj == NULL) return -EINVAL; - p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - p->uf.offset = fence_data->offset; + uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); + uf->offset = fence_data->offset; - if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { + if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) { drm_gem_object_unreference_unlocked(gobj); return -EINVAL; } - p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); - p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; - p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.robj = amdgpu_bo_ref(uf->bo); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; p->uf_entry.tv.shared = true; @@ -160,11 +118,12 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array; - struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size; + struct amdgpu_user_fence uf = {}; + unsigned size, num_ibs = 0; int i; int ret; @@ -181,15 +140,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto free_chunk; } - p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); - /* get chunks */ - INIT_LIST_HEAD(&p->validated); chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; - goto put_bo_list; + goto put_ctx; } p->nchunks = cs->in.num_chunks; @@ -197,7 +153,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) GFP_KERNEL); if (!p->chunks) { ret = -ENOMEM; - goto put_bo_list; + goto put_ctx; } for (i = 0; i < p->nchunks; i++) { @@ -217,7 +173,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) size = p->chunks[i].length_dw; cdata = (void __user *)(unsigned long)user_chunk.chunk_data; - p->chunks[i].user_ptr = cdata; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); if (p->chunks[i].kdata == NULL) { @@ -233,7 +188,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) switch (p->chunks[i].chunk_id) { case AMDGPU_CHUNK_ID_IB: - p->num_ibs++; + ++num_ibs; break; case AMDGPU_CHUNK_ID_FENCE: @@ -243,7 +198,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto free_partial_kdata; } - ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); + ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata); if (ret) goto free_partial_kdata; @@ -258,12 +213,11 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } } - - p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!p->ibs) { - ret = -ENOMEM; + ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job); + if (ret) goto free_all_kdata; - } + + p->job->uf = uf; kfree(chunk_array); return 0; @@ -274,9 +228,7 @@ free_partial_kdata: for (; i >= 0; i--) drm_free_large(p->chunks[i].kdata); kfree(p->chunks); -put_bo_list: - if (p->bo_list) - amdgpu_bo_list_put(p->bo_list); +put_ctx: amdgpu_ctx_put(p->ctx); free_chunk: kfree(chunk_array); @@ -336,80 +288,76 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } -int amdgpu_cs_list_validate(struct amdgpu_device *adev, - struct amdgpu_vm *vm, +int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { struct amdgpu_bo_list_entry *lobj; - struct amdgpu_bo *bo; - u64 bytes_moved = 0, initial_bytes_moved; - u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); + u64 initial_bytes_moved; int r; list_for_each_entry(lobj, validated, tv.head) { - bo = lobj->robj; - if (!bo->pin_count) { - u32 domain = lobj->prefered_domains; - u32 current_domain = - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + struct amdgpu_bo *bo = lobj->robj; + struct mm_struct *usermm; + uint32_t domain; - /* Check if this buffer will be moved and don't move it - * if we have moved too many buffers for this IB already. - * - * Note that this allows moving at least one buffer of - * any size, because it doesn't take the current "bo" - * into account. We don't want to disallow buffer moves - * completely. - */ - if ((lobj->allowed_domains & current_domain) != 0 && - (domain & current_domain) == 0 && /* will be moved */ - bytes_moved > bytes_moved_threshold) { - /* don't move it */ - domain = current_domain; - } - - retry: - amdgpu_ttm_placement_from_domain(bo, domain); - initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - bytes_moved += atomic64_read(&adev->num_bytes_moved) - - initial_bytes_moved; - - if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { - domain = lobj->allowed_domains; - goto retry; - } - return r; + usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); + if (usermm && usermm != current->mm) + return -EPERM; + + if (bo->pin_count) + continue; + + /* Avoid moving this one if we have moved too many buffers + * for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if (p->bytes_moved <= p->bytes_moved_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + + retry: + amdgpu_ttm_placement_from_domain(bo, domain); + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - + initial_bytes_moved; + + if (unlikely(r)) { + if (r != -ERESTARTSYS && domain != bo->allowed_domains) { + domain = bo->allowed_domains; + goto retry; } + return r; } - lobj->bo_va = amdgpu_vm_bo_find(vm, bo); } return 0; } -static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) +static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, + union drm_amdgpu_cs *cs) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_cs_buckets buckets; struct list_head duplicates; bool need_mmap_lock = false; - int i, r; + int r; + INIT_LIST_HEAD(&p->validated); + + p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); if (p->bo_list) { need_mmap_lock = p->bo_list->has_userptr; - amdgpu_cs_buckets_init(&buckets); - for (i = 0; i < p->bo_list->num_entries; i++) - amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head, - p->bo_list->array[i].priority); - - amdgpu_cs_buckets_get_list(&buckets, &p->validated); + amdgpu_bo_list_get_list(p->bo_list, &p->validated); } INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - if (p->uf.bo) + if (p->job->uf.bo) list_add(&p->uf_entry.tv.head, &p->validated); if (need_mmap_lock) @@ -421,11 +369,27 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); + p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); + p->bytes_moved = 0; + + r = amdgpu_cs_list_validate(p, &duplicates); if (r) goto error_validate; - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); + r = amdgpu_cs_list_validate(p, &p->validated); + if (r) + goto error_validate; + + if (p->bo_list) { + struct amdgpu_vm *vm = &fpriv->vm; + unsigned i; + + for (i = 0; i < p->bo_list->num_entries; i++) { + struct amdgpu_bo *bo = p->bo_list->array[i].robj; + + p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); + } + } error_validate: if (r) { @@ -447,7 +411,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) list_for_each_entry(e, &p->validated, tv.head) { struct reservation_object *resv = e->robj->tbo.resv; - r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp); + r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); if (r) return r; @@ -510,11 +474,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - if (parser->ibs) - for (i = 0; i < parser->num_ibs; i++) - amdgpu_ib_free(parser->adev, &parser->ibs[i]); - kfree(parser->ibs); - amdgpu_bo_unref(&parser->uf.bo); + if (parser->job) + amdgpu_job_free(parser->job); amdgpu_bo_unref(&parser->uf_entry.robj); } @@ -530,7 +491,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (r) return r; - r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence); + r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence); if (r) return r; @@ -556,14 +517,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, return r; f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); + r = amdgpu_sync_fence(adev, &p->job->sync, f); if (r) return r; } } - r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); + r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ @@ -581,29 +542,25 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, } static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, - struct amdgpu_cs_parser *parser) + struct amdgpu_cs_parser *p) { - struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_ring *ring; + struct amdgpu_ring *ring = p->job->ring; int i, r; - if (parser->num_ibs == 0) - return 0; - /* Only for UVD/VCE VM emulation */ - for (i = 0; i < parser->num_ibs; i++) { - ring = parser->ibs[i].ring; - if (ring->funcs->parse_cs) { - r = amdgpu_ring_parse_cs(ring, parser, i); + if (ring->funcs->parse_cs) { + for (i = 0; i < p->job->num_ibs; i++) { + r = amdgpu_ring_parse_cs(ring, p, i); if (r) return r; } } - r = amdgpu_bo_vm_update_pte(parser, vm); + r = amdgpu_bo_vm_update_pte(p, vm); if (!r) - amdgpu_cs_sync_rings(parser); + amdgpu_cs_sync_rings(p); return r; } @@ -626,14 +583,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, int i, j; int r; - for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) { + for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { struct amdgpu_cs_chunk *chunk; struct amdgpu_ib *ib; struct drm_amdgpu_cs_chunk_ib *chunk_ib; struct amdgpu_ring *ring; chunk = &parser->chunks[i]; - ib = &parser->ibs[j]; + ib = &parser->job->ibs[j]; chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) @@ -645,6 +602,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (r) return r; + if (parser->job->ring && parser->job->ring != ring) + return -EINVAL; + + parser->job->ring = ring; + if (ring->funcs->parse_cs) { struct amdgpu_bo_va_mapping *m; struct amdgpu_bo *aobj = NULL; @@ -673,7 +635,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; kptr += chunk_ib->va_start - offset; - r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); + r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -682,7 +644,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); amdgpu_bo_kunmap(aobj); } else { - r = amdgpu_ib_get(ring, vm, 0, ib); + r = amdgpu_ib_get(adev, vm, 0, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -697,15 +659,12 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, j++; } - if (!parser->num_ibs) - return 0; - /* add GDS resources to first IB */ if (parser->bo_list) { struct amdgpu_bo *gds = parser->bo_list->gds_obj; struct amdgpu_bo *gws = parser->bo_list->gws_obj; struct amdgpu_bo *oa = parser->bo_list->oa_obj; - struct amdgpu_ib *ib = &parser->ibs[0]; + struct amdgpu_ib *ib = &parser->job->ibs[0]; if (gds) { ib->gds_base = amdgpu_bo_gpu_offset(gds); @@ -721,15 +680,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } } /* wrap the last IB with user fence */ - if (parser->uf.bo) { - struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; + if (parser->job->uf.bo) { + struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; /* UVD & VCE fw doesn't support user fences */ - if (ib->ring->type == AMDGPU_RING_TYPE_UVD || - ib->ring->type == AMDGPU_RING_TYPE_VCE) + if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD || + parser->job->ring->type == AMDGPU_RING_TYPE_VCE) return -EINVAL; - ib->user = &parser->uf; + ib->user = &parser->job->uf; } return 0; @@ -739,14 +698,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_ib *ib; int i, j, r; - if (!p->num_ibs) - return 0; - - /* Add dependencies to first IB */ - ib = &p->ibs[0]; for (i = 0; i < p->nchunks; ++i) { struct drm_amdgpu_cs_chunk_dep *deps; struct amdgpu_cs_chunk *chunk; @@ -784,7 +737,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, return r; } else if (fence) { - r = amdgpu_sync_fence(adev, &ib->sync, fence); + r = amdgpu_sync_fence(adev, &p->job->sync, + fence); fence_put(fence); amdgpu_ctx_put(ctx); if (r) @@ -796,15 +750,36 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, return 0; } -static int amdgpu_cs_free_job(struct amdgpu_job *job) +static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, + union drm_amdgpu_cs *cs) { - int i; - if (job->ibs) - for (i = 0; i < job->num_ibs; i++) - amdgpu_ib_free(job->adev, &job->ibs[i]); - kfree(job->ibs); - if (job->uf.bo) - amdgpu_bo_unref(&job->uf.bo); + struct amdgpu_ring *ring = p->job->ring; + struct amd_sched_fence *fence; + struct amdgpu_job *job; + + job = p->job; + p->job = NULL; + + job->base.sched = &ring->sched; + job->base.s_entity = &p->ctx->rings[ring->idx].entity; + job->owner = p->filp; + + fence = amd_sched_fence_create(job->base.s_entity, p->filp); + if (!fence) { + amdgpu_job_free(job); + return -ENOMEM; + } + + job->base.s_fence = fence; + p->fence = fence_get(&fence->base); + + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, + &fence->base); + job->ibs[job->num_ibs - 1].sequence = cs->out.handle; + + trace_amdgpu_cs_ioctl(job); + amd_sched_entity_push_job(&job->base); + return 0; } @@ -829,7 +804,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_handle_lockup(adev, r); return r; } - r = amdgpu_cs_parser_relocs(&parser); + r = amdgpu_cs_parser_bos(&parser, data); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); else if (r && r != -ERESTARTSYS) @@ -848,68 +823,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) goto out; - for (i = 0; i < parser.num_ibs; i++) + for (i = 0; i < parser.job->num_ibs; i++) trace_amdgpu_cs(&parser, i); r = amdgpu_cs_ib_vm_chunk(adev, &parser); if (r) goto out; - if (amdgpu_enable_scheduler && parser.num_ibs) { - struct amdgpu_ring * ring = parser.ibs->ring; - struct amd_sched_fence *fence; - struct amdgpu_job *job; - - job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); - if (!job) { - r = -ENOMEM; - goto out; - } - - job->base.sched = &ring->sched; - job->base.s_entity = &parser.ctx->rings[ring->idx].entity; - job->adev = parser.adev; - job->owner = parser.filp; - job->free_job = amdgpu_cs_free_job; - - job->ibs = parser.ibs; - job->num_ibs = parser.num_ibs; - parser.ibs = NULL; - parser.num_ibs = 0; - - if (job->ibs[job->num_ibs - 1].user) { - job->uf = parser.uf; - job->ibs[job->num_ibs - 1].user = &job->uf; - parser.uf.bo = NULL; - } - - fence = amd_sched_fence_create(job->base.s_entity, - parser.filp); - if (!fence) { - r = -ENOMEM; - amdgpu_cs_free_job(job); - kfree(job); - goto out; - } - job->base.s_fence = fence; - parser.fence = fence_get(&fence->base); - - cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, - &fence->base); - job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - - trace_amdgpu_cs_ioctl(job); - amd_sched_entity_push_job(&job->base); - - } else { - struct amdgpu_fence *fence; - - r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, - parser.filp); - fence = parser.ibs[parser.num_ibs - 1].fence; - parser.fence = fence_get(&fence->base); - cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; - } + r = amdgpu_cs_submit(&parser, cs); out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); @@ -980,30 +901,36 @@ struct amdgpu_bo_va_mapping * amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo) { - struct amdgpu_bo_list_entry *reloc; struct amdgpu_bo_va_mapping *mapping; + unsigned i; + + if (!parser->bo_list) + return NULL; addr /= AMDGPU_GPU_PAGE_SIZE; - list_for_each_entry(reloc, &parser->validated, tv.head) { - if (!reloc->bo_va) + for (i = 0; i < parser->bo_list->num_entries; i++) { + struct amdgpu_bo_list_entry *lobj; + + lobj = &parser->bo_list->array[i]; + if (!lobj->bo_va) continue; - list_for_each_entry(mapping, &reloc->bo_va->valids, list) { + list_for_each_entry(mapping, &lobj->bo_va->valids, list) { if (mapping->it.start > addr || addr > mapping->it.last) continue; - *bo = reloc->bo_va->bo; + *bo = lobj->bo_va->bo; return mapping; } - list_for_each_entry(mapping, &reloc->bo_va->invalids, list) { + list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { if (mapping->it.start > addr || addr > mapping->it.last) continue; - *bo = reloc->bo_va->bo; + *bo = lobj->bo_va->bo; return mapping; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 17d1fb12128a..17e13621fae9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -25,8 +25,7 @@ #include #include "amdgpu.h" -int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, - struct amdgpu_ctx *ctx) +static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) { unsigned i, j; int r; @@ -35,44 +34,38 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, ctx->adev = adev; kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); - ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs * - AMDGPU_MAX_RINGS, GFP_KERNEL); + ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, + sizeof(struct fence*), GFP_KERNEL); if (!ctx->fences) return -ENOMEM; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { ctx->rings[i].sequence = 1; - ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) * - amdgpu_sched_jobs * i; + ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; } - if (amdgpu_enable_scheduler) { - /* create context entity for each ring */ - for (i = 0; i < adev->num_rings; i++) { - struct amd_sched_rq *rq; - if (pri >= AMD_SCHED_MAX_PRIORITY) { - kfree(ctx->fences); - return -EINVAL; - } - rq = &adev->rings[i]->sched.sched_rq[pri]; - r = amd_sched_entity_init(&adev->rings[i]->sched, - &ctx->rings[i].entity, - rq, amdgpu_sched_jobs); - if (r) - break; - } + /* create context entity for each ring */ + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + struct amd_sched_rq *rq; - if (i < adev->num_rings) { - for (j = 0; j < i; j++) - amd_sched_entity_fini(&adev->rings[j]->sched, - &ctx->rings[j].entity); - kfree(ctx->fences); - return r; - } + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; + r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, + rq, amdgpu_sched_jobs); + if (r) + break; + } + + if (i < adev->num_rings) { + for (j = 0; j < i; j++) + amd_sched_entity_fini(&adev->rings[j]->sched, + &ctx->rings[j].entity); + kfree(ctx->fences); + return r; } return 0; } -void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) +static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) { struct amdgpu_device *adev = ctx->adev; unsigned i, j; @@ -85,11 +78,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) fence_put(ctx->rings[i].fences[j]); kfree(ctx->fences); - if (amdgpu_enable_scheduler) { - for (i = 0; i < adev->num_rings; i++) - amd_sched_entity_fini(&adev->rings[i]->sched, - &ctx->rings[i].entity); - } + for (i = 0; i < adev->num_rings; i++) + amd_sched_entity_fini(&adev->rings[i]->sched, + &ctx->rings[i].entity); } static int amdgpu_ctx_alloc(struct amdgpu_device *adev, @@ -112,7 +103,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, return r; } *id = (uint32_t)r; - r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx); + r = amdgpu_ctx_init(adev, ctx); if (r) { idr_remove(&mgr->ctx_handles, *id); *id = 0; @@ -200,18 +191,18 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, id = args->in.ctx_id; switch (args->in.op) { - case AMDGPU_CTX_OP_ALLOC_CTX: - r = amdgpu_ctx_alloc(adev, fpriv, &id); - args->out.alloc.ctx_id = id; - break; - case AMDGPU_CTX_OP_FREE_CTX: - r = amdgpu_ctx_free(fpriv, id); - break; - case AMDGPU_CTX_OP_QUERY_STATE: - r = amdgpu_ctx_query(adev, fpriv, id, &args->out); - break; - default: - return -EINVAL; + case AMDGPU_CTX_OP_ALLOC_CTX: + r = amdgpu_ctx_alloc(adev, fpriv, &id); + args->out.alloc.ctx_id = id; + break; + case AMDGPU_CTX_OP_FREE_CTX: + r = amdgpu_ctx_free(fpriv, id); + break; + case AMDGPU_CTX_OP_QUERY_STATE: + r = amdgpu_ctx_query(adev, fpriv, id, &args->out); + break; + default: + return -EINVAL; } return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 65531463f88e..2139da773da6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -635,31 +635,6 @@ bool amdgpu_card_posted(struct amdgpu_device *adev) } -/** - * amdgpu_boot_test_post_card - check and possibly initialize the hw - * - * @adev: amdgpu_device pointer - * - * Check if the asic is initialized and if not, attempt to initialize - * it (all asics). - * Returns true if initialized or false if not. - */ -bool amdgpu_boot_test_post_card(struct amdgpu_device *adev) -{ - if (amdgpu_card_posted(adev)) - return true; - - if (adev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - if (adev->is_atom_bios) - amdgpu_atom_asic_init(adev->mode_info.atom_context); - return true; - } else { - dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); - return false; - } -} - /** * amdgpu_dummy_page_init - init dummy page used by the driver * @@ -959,12 +934,6 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - /* vramlimit must be a power of two */ - if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { - dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", - amdgpu_vram_limit); - amdgpu_vram_limit = 0; - } if (amdgpu_gart_size != -1) { /* gtt size must be power of two and greater or equal to 32M */ @@ -1434,7 +1403,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs_ring = NULL; adev->vm_manager.vm_pte_funcs = NULL; - adev->vm_manager.vm_pte_funcs_ring = NULL; + adev->vm_manager.vm_pte_num_rings = 0; adev->gart.gart_funcs = NULL; adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); @@ -1455,9 +1424,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* mutex initialization are all done here so we * can recall function without having locking issues */ - mutex_init(&adev->ring_lock); + mutex_init(&adev->vm_manager.lock); atomic_set(&adev->irq.ih.lock, 0); - mutex_init(&adev->gem.mutex); mutex_init(&adev->pm.mutex); mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->srbm_mutex); @@ -1531,8 +1499,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } + /* See if the asic supports SR-IOV */ + adev->virtualization.supports_sr_iov = + amdgpu_atombios_has_gpu_virtualization_table(adev); + /* Post card if necessary */ - if (!amdgpu_card_posted(adev)) { + if (!amdgpu_card_posted(adev) || + adev->virtualization.supports_sr_iov) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; @@ -1577,11 +1550,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } - r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx); - if (r) { - dev_err(adev->dev, "failed to create kernel context (%d).\n", r); - return r; - } r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); @@ -1645,7 +1613,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->shutdown = true; /* evict vram memory */ amdgpu_bo_evict_vram(adev); - amdgpu_ctx_fini(&adev->kernel_ctx); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); @@ -1795,15 +1762,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (!amdgpu_card_posted(adev)) + amdgpu_atom_asic_init(adev->mode_info.atom_context); r = amdgpu_resume(adev); + if (r) + DRM_ERROR("amdgpu_resume failed (%d).\n", r); amdgpu_fence_driver_resume(adev); - r = amdgpu_ib_ring_tests(adev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); + if (resume) { + r = amdgpu_ib_ring_tests(adev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + } r = amdgpu_late_init(adev); if (r) @@ -1889,6 +1861,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) retry: r = amdgpu_asic_reset(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (!r) { dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); r = amdgpu_resume(adev); @@ -1933,80 +1908,97 @@ retry: return r; } +#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */ +#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */ + void amdgpu_get_pcie_info(struct amdgpu_device *adev) { u32 mask; int ret; - if (pci_is_root_bus(adev->pdev->bus)) + if (amdgpu_pcie_gen_cap) + adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; + + if (amdgpu_pcie_lane_cap) + adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; + + /* covers APUs as well */ + if (pci_is_root_bus(adev->pdev->bus)) { + if (adev->pm.pcie_gen_mask == 0) + adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; + if (adev->pm.pcie_mlw_mask == 0) + adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (adev->flags & AMD_IS_APU) - return; - - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (!ret) { - adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | - CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); - - if (mask & DRM_PCIE_SPEED_25) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; - if (mask & DRM_PCIE_SPEED_50) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; - if (mask & DRM_PCIE_SPEED_80) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; } - ret = drm_pcie_get_max_link_width(adev->ddev, &mask); - if (!ret) { - switch (mask) { - case 32: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 16: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 12: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 8: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 4: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 2: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 1: - adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; - break; - default: - break; + + if (adev->pm.pcie_gen_mask == 0) { + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (!ret) { + adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); + + if (mask & DRM_PCIE_SPEED_25) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; + if (mask & DRM_PCIE_SPEED_50) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; + if (mask & DRM_PCIE_SPEED_80) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; + } else { + adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; + } + } + if (adev->pm.pcie_mlw_mask == 0) { + ret = drm_pcie_get_max_link_width(adev->ddev, &mask); + if (!ret) { + switch (mask) { + case 32: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 16: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 12: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 8: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 4: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 2: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 1: + adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; + break; + default: + break; + } + } else { + adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index acd066d0a805..f0ed974bd4e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,32 +35,30 @@ #include #include -static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, - struct fence **f) +static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) { - struct amdgpu_fence *fence; - long r; + struct amdgpu_flip_work *work = + container_of(cb, struct amdgpu_flip_work, cb); - if (*f == NULL) - return; + fence_put(f); + schedule_work(&work->flip_work); +} - fence = to_amdgpu_fence(*f); - if (fence) { - r = fence_wait(&fence->base, false); - if (r == -EDEADLK) - r = amdgpu_gpu_reset(adev); - } else - r = fence_wait(*f, false); +static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, + struct fence **f) +{ + struct fence *fence= *f; - if (r) - DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); + if (fence == NULL) + return false; - /* We continue with the page flip even if we failed to wait on - * the fence, otherwise the DRM core and userspace will be - * confused about which BO the CRTC is scanning out - */ - fence_put(*f); *f = NULL; + + if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) + return true; + + fence_put(*f); + return false; } static void amdgpu_flip_work_func(struct work_struct *__work) @@ -72,13 +70,16 @@ static void amdgpu_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &amdgpuCrtc->base; unsigned long flags; - unsigned i; - int vpos, hpos, stat, min_udelay; + unsigned i, repcnt = 4; + int vpos, hpos, stat, min_udelay = 0; struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; - amdgpu_flip_wait_fence(adev, &work->excl); + if (amdgpu_flip_handle_fence(work, &work->excl)) + return; + for (i = 0; i < work->shared_count; ++i) - amdgpu_flip_wait_fence(adev, &work->shared[i]); + if (amdgpu_flip_handle_fence(work, &work->shared[i])) + return; /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -96,7 +97,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) * In practice this won't execute very often unless on very fast * machines because the time window for this to happen is very small. */ - for (;;) { + while (amdgpuCrtc->enabled && --repcnt) { /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank * start in hpos, and to the "fudged earlier" vblank start in * vpos. @@ -112,18 +113,30 @@ static void amdgpu_flip_work_func(struct work_struct *__work) break; /* Sleep at least until estimated real start of hw vblank */ - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); + if (min_udelay > vblank->framedur_ns / 2000) { + /* Don't wait ridiculously long - something is wrong */ + repcnt = 0; + break; + } + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); usleep_range(min_udelay, 2 * min_udelay); spin_lock_irqsave(&crtc->dev->event_lock, flags); }; - /* do the flip (mmio) */ - adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); + if (!repcnt) + DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " + "framedur %d, linedur %d, stat %d, vpos %d, " + "hpos %d\n", work->crtc_id, min_udelay, + vblank->framedur_ns / 1000, + vblank->linedur_ns / 1000, stat, vpos, hpos); + /* set the flip status */ amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + /* Do the flip (mmio) */ + adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); } /* @@ -242,7 +255,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - queue_work(amdgpu_crtc->pflip_queue, &work->flip_work); + amdgpu_flip_work_func(&work->flip_work); return 0; vblank_cleanup: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9c1af8976bef..74a2f8a6be1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -69,7 +69,6 @@ int amdgpu_dpm = -1; int amdgpu_smc_load_fw = 1; int amdgpu_aspm = -1; int amdgpu_runtime_pm = -1; -int amdgpu_hard_reset = 0; unsigned amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; int amdgpu_deep_color = 0; @@ -78,11 +77,11 @@ int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; int amdgpu_exp_hw_support = 0; -int amdgpu_enable_scheduler = 1; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; -int amdgpu_enable_semaphores = 0; int amdgpu_powerplay = -1; +unsigned amdgpu_pcie_gen_cap = 0; +unsigned amdgpu_pcie_lane_cap = 0; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -126,9 +125,6 @@ module_param_named(aspm, amdgpu_aspm, int, 0444); MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); module_param_named(runpm, amdgpu_runtime_pm, int, 0444); -MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); -module_param_named(hard_reset, amdgpu_hard_reset, int, 0444); - MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))"); module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444); @@ -153,23 +149,23 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); -MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)"); -module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); - MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); -MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))"); -module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); - #ifdef CONFIG_DRM_AMD_POWERPLAY MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); module_param_named(powerplay, amdgpu_powerplay, int, 0444); #endif +MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); +module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); + +MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); +module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); + static struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -322,6 +318,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } + /* + * Initialize amdkfd before starting radeon. If it was not loaded yet, + * defer radeon probing + */ + ret = amdgpu_amdkfd_init(); + if (ret == -EPROBE_DEFER) + return ret; + /* Get rid of things like offb */ ret = amdgpu_kick_out_firmware_fb(pdev); if (ret) @@ -564,8 +568,6 @@ static int __init amdgpu_init(void) driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); - amdgpu_amdkfd_init(); - /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3671f9f220bd..97db196dc6f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -107,7 +107,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, if ((*fence) == NULL) { return -ENOMEM; } - (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; + (*fence)->seq = ++ring->fence_drv.sync_seq; (*fence)->ring = ring; (*fence)->owner = owner; fence_init(&(*fence)->base, &amdgpu_fence_ops, @@ -171,7 +171,7 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) */ last_seq = atomic64_read(&ring->fence_drv.last_seq); do { - last_emitted = ring->fence_drv.sync_seq[ring->idx]; + last_emitted = ring->fence_drv.sync_seq; seq = amdgpu_fence_read(ring); seq |= last_seq & 0xffffffff00000000LL; if (seq < last_seq) { @@ -260,34 +260,28 @@ static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) } /* - * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal + * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal * @ring: ring to wait on for the seq number * @seq: seq number wait for * * return value: * 0: seq signaled, and gpu not hang - * -EDEADL: GPU hang detected * -EINVAL: some paramter is not valid */ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) { - bool signaled = false; - BUG_ON(!ring); - if (seq > ring->fence_drv.sync_seq[ring->idx]) + if (seq > ring->fence_drv.sync_seq) return -EINVAL; if (atomic64_read(&ring->fence_drv.last_seq) >= seq) return 0; amdgpu_fence_schedule_fallback(ring); - wait_event(ring->fence_drv.fence_queue, ( - (signaled = amdgpu_fence_seq_signaled(ring, seq)))); + wait_event(ring->fence_drv.fence_queue, + amdgpu_fence_seq_signaled(ring, seq)); - if (signaled) - return 0; - else - return -EDEADLK; + return 0; } /** @@ -304,7 +298,7 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) { uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; - if (seq >= ring->fence_drv.sync_seq[ring->idx]) + if (seq >= ring->fence_drv.sync_seq) return -ENOENT; return amdgpu_fence_ring_wait_seq(ring, seq); @@ -322,7 +316,7 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) */ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { - uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint64_t seq = ring->fence_drv.sync_seq; if (!seq) return 0; @@ -347,7 +341,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) * but it's ok to report slightly wrong fence count here. */ amdgpu_fence_process(ring); - emitted = ring->fence_drv.sync_seq[ring->idx] + emitted = ring->fence_drv.sync_seq - atomic64_read(&ring->fence_drv.last_seq); /* to avoid 32bits warp around */ if (emitted > 0x10000000) @@ -356,68 +350,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) return (unsigned)emitted; } -/** - * amdgpu_fence_need_sync - do we need a semaphore - * - * @fence: amdgpu fence object - * @dst_ring: which ring to check against - * - * Check if the fence needs to be synced against another ring - * (all asics). If so, we need to emit a semaphore. - * Returns true if we need to sync with another ring, false if - * not. - */ -bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *dst_ring) -{ - struct amdgpu_fence_driver *fdrv; - - if (!fence) - return false; - - if (fence->ring == dst_ring) - return false; - - /* we are protected by the ring mutex */ - fdrv = &dst_ring->fence_drv; - if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) - return false; - - return true; -} - -/** - * amdgpu_fence_note_sync - record the sync point - * - * @fence: amdgpu fence object - * @dst_ring: which ring to check against - * - * Note the sequence number at which point the fence will - * be synced with the requested ring (all asics). - */ -void amdgpu_fence_note_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *dst_ring) -{ - struct amdgpu_fence_driver *dst, *src; - unsigned i; - - if (!fence) - return; - - if (fence->ring == dst_ring) - return; - - /* we are protected by the ring mutex */ - src = &fence->ring->fence_drv; - dst = &dst_ring->fence_drv; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (i == dst_ring->idx) - continue; - - dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); - } -} - /** * amdgpu_fence_driver_start_ring - make the fence driver * ready for use on the requested ring. @@ -471,13 +403,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, */ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) { - int i, r; + long timeout; + int r; ring->fence_drv.cpu_addr = NULL; ring->fence_drv.gpu_addr = 0; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - ring->fence_drv.sync_seq[i] = 0; - + ring->fence_drv.sync_seq = 0; atomic64_set(&ring->fence_drv.last_seq, 0); ring->fence_drv.initialized = false; @@ -486,26 +417,24 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) init_waitqueue_head(&ring->fence_drv.fence_queue); - if (amdgpu_enable_scheduler) { - long timeout = msecs_to_jiffies(amdgpu_lockup_timeout); - if (timeout == 0) { - /* - * FIXME: - * Delayed workqueue cannot use it directly, - * so the scheduler will not use delayed workqueue if - * MAX_SCHEDULE_TIMEOUT is set. - * Currently keep it simple and silly. - */ - timeout = MAX_SCHEDULE_TIMEOUT; - } - r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, - amdgpu_sched_hw_submission, - timeout, ring->name); - if (r) { - DRM_ERROR("Failed to create scheduler on ring %s.\n", - ring->name); - return r; - } + timeout = msecs_to_jiffies(amdgpu_lockup_timeout); + if (timeout == 0) { + /* + * FIXME: + * Delayed workqueue cannot use it directly, + * so the scheduler will not use delayed workqueue if + * MAX_SCHEDULE_TIMEOUT is set. + * Currently keep it simple and silly. + */ + timeout = MAX_SCHEDULE_TIMEOUT; + } + r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, + amdgpu_sched_hw_submission, + timeout, ring->name); + if (r) { + DRM_ERROR("Failed to create scheduler on ring %s.\n", + ring->name); + return r; } return 0; @@ -552,7 +481,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) kmem_cache_destroy(amdgpu_fence_slab); - mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -570,7 +498,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) del_timer_sync(&ring->fence_drv.fallback_timer); ring->fence_drv.initialized = false; } - mutex_unlock(&adev->ring_lock); } /** @@ -585,7 +512,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) { int i, r; - mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) @@ -602,7 +528,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } - mutex_unlock(&adev->ring_lock); } /** @@ -621,7 +546,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) { int i; - mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) @@ -631,7 +555,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) amdgpu_irq_get(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } - mutex_unlock(&adev->ring_lock); } /** @@ -651,7 +574,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; - amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); + amdgpu_fence_write(ring, ring->fence_drv.sync_seq); } } @@ -781,7 +704,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - int i, j; + int i; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -794,28 +717,38 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) seq_printf(m, "Last signaled fence 0x%016llx\n", (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); seq_printf(m, "Last emitted 0x%016llx\n", - ring->fence_drv.sync_seq[i]); - - for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { - struct amdgpu_ring *other = adev->rings[j]; - if (i != j && other && other->fence_drv.initialized && - ring->fence_drv.sync_seq[j]) - seq_printf(m, "Last sync to ring %d 0x%016llx\n", - j, ring->fence_drv.sync_seq[j]); - } + ring->fence_drv.sync_seq); } return 0; } +/** + * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset + * + * Manually trigger a gpu reset at the next fence wait. + */ +static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + + seq_printf(m, "gpu reset\n"); + amdgpu_gpu_reset(adev); + + return 0; +} + static struct drm_info_list amdgpu_debugfs_fence_list[] = { {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, + {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} }; #endif int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); #else return 0; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7380f782cd14..7a47c45b2131 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -83,24 +83,32 @@ retry: return r; } *obj = &robj->gem_base; - robj->pid = task_pid_nr(current); - - mutex_lock(&adev->gem.mutex); - list_add_tail(&robj->list, &adev->gem.objects); - mutex_unlock(&adev->gem.mutex); return 0; } -int amdgpu_gem_init(struct amdgpu_device *adev) +void amdgpu_gem_force_release(struct amdgpu_device *adev) { - INIT_LIST_HEAD(&adev->gem.objects); - return 0; -} + struct drm_device *ddev = adev->ddev; + struct drm_file *file; -void amdgpu_gem_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_force_delete(adev); + mutex_lock(&ddev->struct_mutex); + + list_for_each_entry(file, &ddev->filelist, lhead) { + struct drm_gem_object *gobj; + int handle; + + WARN_ONCE(1, "Still active user space clients!\n"); + spin_lock(&file->table_lock); + idr_for_each_entry(&file->object_idr, gobj, handle) { + WARN_ONCE(1, "And also active allocations!\n"); + drm_gem_object_unreference(gobj); + } + idr_destroy(&file->object_idr); + spin_unlock(&file->table_lock); + } + + mutex_unlock(&ddev->struct_mutex); } /* @@ -252,6 +260,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto handle_lockup; bo = gem_to_amdgpu_bo(gobj); + bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); if (r) goto release_object; @@ -308,7 +318,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) || + if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { drm_gem_object_unreference_unlocked(gobj); return -EPERM; @@ -596,7 +606,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, break; } ttm_eu_backoff_reservation(&ticket, &list); - if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) + if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && + !amdgpu_vm_debug) amdgpu_gem_va_update_vm(adev, bo_va, args->operation); drm_gem_object_unreference_unlocked(gobj); @@ -628,7 +639,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, info.bo_size = robj->gem_base.size; info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; - info.domains = robj->initial_domain; + info.domains = robj->prefered_domains; info.domain_flags = robj->flags; amdgpu_bo_unreserve(robj); if (copy_to_user(out, &info, sizeof(info))) @@ -636,14 +647,18 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { + if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { r = -EPERM; amdgpu_bo_unreserve(robj); break; } - robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU); + robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); + robj->allowed_domains = robj->prefered_domains; + if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) + robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; + amdgpu_bo_unreserve(robj); break; default: @@ -688,38 +703,73 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, } #if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) +{ + struct drm_gem_object *gobj = ptr; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + struct seq_file *m = data; + + unsigned domain; + const char *placement; + unsigned pin_count; + + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + switch (domain) { + case AMDGPU_GEM_DOMAIN_VRAM: + placement = "VRAM"; + break; + case AMDGPU_GEM_DOMAIN_GTT: + placement = " GTT"; + break; + case AMDGPU_GEM_DOMAIN_CPU: + default: + placement = " CPU"; + break; + } + seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", + id, amdgpu_bo_size(bo), placement, + amdgpu_bo_gpu_offset(bo)); + + pin_count = ACCESS_ONCE(bo->pin_count); + if (pin_count) + seq_printf(m, " pin count %d", pin_count); + seq_printf(m, "\n"); + + return 0; +} + static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_bo *rbo; - unsigned i = 0; + struct drm_file *file; + int r; - mutex_lock(&adev->gem.mutex); - list_for_each_entry(rbo, &adev->gem.objects, list) { - unsigned domain; - const char *placement; + r = mutex_lock_interruptible(&dev->struct_mutex); + if (r) + return r; - domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: - placement = "VRAM"; - break; - case AMDGPU_GEM_DOMAIN_GTT: - placement = " GTT"; - break; - case AMDGPU_GEM_DOMAIN_CPU: - default: - placement = " CPU"; - break; - } - seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", - i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20, - placement, (unsigned long)rbo->pid); - i++; + list_for_each_entry(file, &dev->filelist, lhead) { + struct task_struct *task; + + /* + * Although we have a valid reference on file->pid, that does + * not guarantee that the task_struct who called get_pid() is + * still alive (e.g. get_pid(current) => fork() => exit()). + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); + task = pid_task(file->pid, PIDTYPE_PID); + seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), + task ? task->comm : ""); + rcu_read_unlock(); + + spin_lock(&file->table_lock); + idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); + spin_unlock(&file->table_lock); } - mutex_unlock(&adev->gem.mutex); + + mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 9e25edafa721..db14a7bbb8f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -55,10 +55,9 @@ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); * suballocator. * Returns 0 on success, error on failure. */ -int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, +int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib) { - struct amdgpu_device *adev = ring->adev; int r; if (size) { @@ -75,10 +74,8 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } - amdgpu_sync_create(&ib->sync); - - ib->ring = ring; ib->vm = vm; + ib->vm_id = 0; return 0; } @@ -93,7 +90,6 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) { - amdgpu_sync_free(adev, &ib->sync, &ib->fence->base); amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); if (ib->fence) fence_put(&ib->fence->base); @@ -106,6 +102,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule * @owner: owner for creating the fences + * @f: fence created during this submission * * Schedule an IB on the associated ring (all asics). * Returns 0 on success, error on failure. @@ -120,11 +117,13 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ -int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_ib *ibs, void *owner) +int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, + struct amdgpu_ib *ibs, void *owner, + struct fence *last_vm_update, + struct fence **f) { + struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; - struct amdgpu_ring *ring; struct amdgpu_ctx *ctx, *old_ctx; struct amdgpu_vm *vm; unsigned i; @@ -133,7 +132,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (num_ibs == 0) return -EINVAL; - ring = ibs->ring; ctx = ibs->ctx; vm = ibs->vm; @@ -141,39 +139,24 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } - r = amdgpu_sync_wait(&ibs->sync); - if (r) { - dev_err(adev->dev, "IB sync failed (%d).\n", r); - return r; + + if (vm && !ibs->vm_id) { + dev_err(adev->dev, "VM IB without ID\n"); + return -EINVAL; } - r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); + + r = amdgpu_ring_alloc(ring, 256 * num_ibs); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; } - if (vm) { - /* grab a vm id if necessary */ - r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); - if (r) { - amdgpu_ring_unlock_undo(ring); - return r; - } - } - - r = amdgpu_sync_rings(&ibs->sync, ring); - if (r) { - amdgpu_ring_unlock_undo(ring); - dev_err(adev->dev, "failed to sync rings (%d)\n", r); - return r; - } - if (vm) { /* do context switch */ - amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); + amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr); if (ring->funcs->emit_gds_switch) - amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, + amdgpu_ring_emit_gds_switch(ring, ib->vm_id, ib->gds_base, ib->gds_size, ib->gws_base, ib->gws_size, ib->oa_base, ib->oa_size); @@ -186,9 +169,9 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; - if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { + if (ib->ctx != ctx || ib->vm != vm) { ring->current_ctx = old_ctx; - amdgpu_ring_unlock_undo(ring); + amdgpu_ring_undo(ring); return -EINVAL; } amdgpu_ring_emit_ib(ring, ib); @@ -199,14 +182,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); ring->current_ctx = old_ctx; - amdgpu_ring_unlock_undo(ring); + amdgpu_ring_undo(ring); return r; } - if (!amdgpu_enable_scheduler && ib->ctx) - ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, - &ib->fence->base); - /* wrap the last IB with fence */ if (ib->user) { uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); @@ -215,10 +194,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, AMDGPU_FENCE_FLAG_64BIT); } - if (ib->vm) - amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); + if (f) + *f = fence_get(&ib->fence->base); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c new file mode 100644 index 000000000000..90e52f7e17a0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -0,0 +1,166 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_trace.h" + +int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, + struct amdgpu_job **job) +{ + size_t size = sizeof(struct amdgpu_job); + + if (num_ibs == 0) + return -EINVAL; + + size += sizeof(struct amdgpu_ib) * num_ibs; + + *job = kzalloc(size, GFP_KERNEL); + if (!*job) + return -ENOMEM; + + (*job)->adev = adev; + (*job)->ibs = (void *)&(*job)[1]; + (*job)->num_ibs = num_ibs; + + amdgpu_sync_create(&(*job)->sync); + + return 0; +} + +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, + struct amdgpu_job **job) +{ + int r; + + r = amdgpu_job_alloc(adev, 1, job); + if (r) + return r; + + r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); + if (r) + kfree(*job); + + return r; +} + +void amdgpu_job_free(struct amdgpu_job *job) +{ + unsigned i; + + for (i = 0; i < job->num_ibs; ++i) + amdgpu_ib_free(job->adev, &job->ibs[i]); + + amdgpu_bo_unref(&job->uf.bo); + amdgpu_sync_free(&job->sync); + kfree(job); +} + +int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, + struct amd_sched_entity *entity, void *owner, + struct fence **f) +{ + job->ring = ring; + job->base.sched = &ring->sched; + job->base.s_entity = entity; + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); + if (!job->base.s_fence) + return -ENOMEM; + + *f = fence_get(&job->base.s_fence->base); + + job->owner = owner; + amd_sched_entity_push_job(&job->base); + + return 0; +} + +static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) +{ + struct amdgpu_job *job = to_amdgpu_job(sched_job); + struct amdgpu_vm *vm = job->ibs->vm; + + struct fence *fence = amdgpu_sync_get_fence(&job->sync); + + if (fence == NULL && vm && !job->ibs->vm_id) { + struct amdgpu_ring *ring = job->ring; + unsigned i, vm_id; + uint64_t vm_pd_addr; + int r; + + r = amdgpu_vm_grab_id(vm, ring, &job->sync, + &job->base.s_fence->base, + &vm_id, &vm_pd_addr); + if (r) + DRM_ERROR("Error getting VM ID (%d)\n", r); + else { + for (i = 0; i < job->num_ibs; ++i) { + job->ibs[i].vm_id = vm_id; + job->ibs[i].vm_pd_addr = vm_pd_addr; + } + } + + fence = amdgpu_sync_get_fence(&job->sync); + } + + return fence; +} + +static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) +{ + struct fence *fence = NULL; + struct amdgpu_job *job; + int r; + + if (!sched_job) { + DRM_ERROR("job is null\n"); + return NULL; + } + job = to_amdgpu_job(sched_job); + + r = amdgpu_sync_wait(&job->sync); + if (r) { + DRM_ERROR("failed to sync wait (%d)\n", r); + return NULL; + } + + trace_amdgpu_sched_run_job(job); + r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner, + job->sync.last_vm_update, &fence); + if (r) { + DRM_ERROR("Error scheduling IBs (%d)\n", r); + goto err; + } + +err: + amdgpu_job_free(job); + return fence; +} + +struct amd_sched_backend_ops amdgpu_sched_ops = { + .dependency = amdgpu_job_dependency, + .run_job = amdgpu_job_run, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e23843f4d877..7805a8706af7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -447,8 +447,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.max_memory_clock = adev->pm.default_mclk * 10; } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; - dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * - adev->gfx.config.max_shader_engines; + dev_info.num_rb_pipes = adev->gfx.config.num_rbs; dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; dev_info._pad = 0; dev_info.ids_flags = 0; @@ -727,6 +726,12 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, /* Get associated drm_crtc: */ crtc = &adev->mode_info.crtcs[pipe]->base; + if (!crtc) { + /* This can occur on driver load if some component fails to + * initialize completely and driver is unloaded */ + DRM_ERROR("Uninitialized crtc %d\n", pipe); + return -EINVAL; + } /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index b1969f2b2038..d7ec9bd6755f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -48,8 +48,7 @@ struct amdgpu_mn { /* protected by adev->mn_lock */ struct hlist_node node; - /* objects protected by lock */ - struct mutex lock; + /* objects protected by mm->mmap_sem */ struct rb_root objects; }; @@ -73,21 +72,19 @@ static void amdgpu_mn_destroy(struct work_struct *work) struct amdgpu_bo *bo, *next_bo; mutex_lock(&adev->mn_lock); - mutex_lock(&rmn->lock); + down_write(&rmn->mm->mmap_sem); hash_del(&rmn->node); rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, it.rb) { - - interval_tree_remove(&node->it, &rmn->objects); list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { bo->mn = NULL; list_del_init(&bo->mn_list); } kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); mutex_unlock(&adev->mn_lock); - mmu_notifier_unregister(&rmn->mn, rmn->mm); + mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); kfree(rmn); } @@ -129,8 +126,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; - mutex_lock(&rmn->lock); - it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct amdgpu_mn_node *node; @@ -142,7 +137,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, list_for_each_entry(bo, &node->bos, mn_list) { - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, + end)) continue; r = amdgpu_bo_reserve(bo, true); @@ -164,8 +160,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, amdgpu_bo_unreserve(bo); } } - - mutex_unlock(&rmn->lock); } static const struct mmu_notifier_ops amdgpu_mn_ops = { @@ -186,8 +180,8 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) struct amdgpu_mn *rmn; int r; - down_write(&mm->mmap_sem); mutex_lock(&adev->mn_lock); + down_write(&mm->mmap_sem); hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) if (rmn->mm == mm) @@ -202,7 +196,6 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; - mutex_init(&rmn->lock); rmn->objects = RB_ROOT; r = __mmu_notifier_register(&rmn->mn, mm); @@ -212,14 +205,14 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); release_locks: - mutex_unlock(&adev->mn_lock); up_write(&mm->mmap_sem); + mutex_unlock(&adev->mn_lock); return rmn; free_rmn: - mutex_unlock(&adev->mn_lock); up_write(&mm->mmap_sem); + mutex_unlock(&adev->mn_lock); kfree(rmn); return ERR_PTR(r); @@ -249,7 +242,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) INIT_LIST_HEAD(&bos); - mutex_lock(&rmn->lock); + down_write(&rmn->mm->mmap_sem); while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { kfree(node); @@ -263,7 +256,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (!node) { node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); if (!node) { - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); return -ENOMEM; } } @@ -278,7 +271,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) interval_tree_insert(&node->it, &rmn->objects); - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); return 0; } @@ -297,13 +290,15 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) struct list_head *head; mutex_lock(&adev->mn_lock); + rmn = bo->mn; if (rmn == NULL) { mutex_unlock(&adev->mn_lock); return; } - mutex_lock(&rmn->lock); + down_write(&rmn->mm->mmap_sem); + /* save the next list entry for later */ head = bo->mn_list.next; @@ -317,6 +312,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); mutex_unlock(&adev->mn_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index fdc1be8550da..8d432e6901af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -390,7 +390,6 @@ struct amdgpu_crtc { struct drm_display_mode native_mode; u32 pll_id; /* page flipping */ - struct workqueue_struct *pflip_queue; struct amdgpu_flip_work *pflip_works; enum amdgpu_flip_status pflip_status; int deferred_flip_completion; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b8fbbd7699e4..9a025a77958d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -97,9 +97,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); - mutex_lock(&bo->adev->gem.mutex); - list_del_init(&bo->list); - mutex_unlock(&bo->adev->gem.mutex); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); kfree(bo->metadata); @@ -254,12 +251,15 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->adev = adev; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); - bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA); + bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU | + AMDGPU_GEM_DOMAIN_GDS | + AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA); + bo->allowed_domains = bo->prefered_domains; + if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) + bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; bo->flags = flags; @@ -367,7 +367,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, int r, i; unsigned fpfn, lpfn; - if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return -EPERM; if (WARN_ON_ONCE(min_offset > max_offset)) @@ -470,26 +470,6 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); } -void amdgpu_bo_force_delete(struct amdgpu_device *adev) -{ - struct amdgpu_bo *bo, *n; - - if (list_empty(&adev->gem.objects)) { - return; - } - dev_err(adev->dev, "Userspace still has active objects !\n"); - list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { - dev_err(adev->dev, "%p %p %lu %lu force free\n", - &bo->gem_base, bo, (unsigned long)bo->gem_base.size, - *((unsigned long *)&bo->gem_base.refcount)); - mutex_lock(&bo->adev->gem.mutex); - list_del_init(&bo->list); - mutex_unlock(&bo->adev->gem.mutex); - /* this should unref the ttm bo */ - drm_gem_object_unreference_unlocked(&bo->gem_base); - } -} - int amdgpu_bo_init(struct amdgpu_device *adev) { /* Add an MTRR for the VRAM */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 5107fb291bdb..acc08018c6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -149,7 +149,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); int amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_evict_vram(struct amdgpu_device *adev); -void amdgpu_bo_force_delete(struct amdgpu_device *adev); int amdgpu_bo_init(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev); int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7d8d84eaea4a..ff9597ce268c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -113,13 +113,19 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if ((adev->flags & AMD_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + if (adev->pp_enabled) { enum amd_dpm_forced_level level; level = amdgpu_dpm_get_performance_level(adev); return snprintf(buf, PAGE_SIZE, "%s\n", (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); + (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : + (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : + (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown"); } else { enum amdgpu_dpm_forced_level level; @@ -140,12 +146,19 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, enum amdgpu_dpm_forced_level level; int ret = 0; + /* Can't force performance level when the card is off */ + if ((adev->flags & AMD_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (strncmp("low", buf, strlen("low")) == 0) { level = AMDGPU_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { level = AMDGPU_DPM_FORCED_LEVEL_HIGH; } else if (strncmp("auto", buf, strlen("auto")) == 0) { level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + } else if (strncmp("manual", buf, strlen("manual")) == 0) { + level = AMDGPU_DPM_FORCED_LEVEL_MANUAL; } else { count = -EINVAL; goto fail; @@ -157,6 +170,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { count = -EINVAL; + mutex_unlock(&adev->pm.mutex); goto fail; } ret = amdgpu_dpm_force_performance_level(adev, level); @@ -167,8 +181,272 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, mutex_unlock(&adev->pm.mutex); } fail: - mutex_unlock(&adev->pm.mutex); + return count; +} +static ssize_t amdgpu_get_pp_num_states(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct pp_states_info data; + int i, buf_len; + + if (adev->pp_enabled) + amdgpu_dpm_get_pp_num_states(adev, &data); + + buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); + for (i = 0; i < data.nums; i++) + buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, + (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : + (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : + (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : + (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); + + return buf_len; +} + +static ssize_t amdgpu_get_pp_cur_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct pp_states_info data; + enum amd_pm_state_type pm = 0; + int i = 0; + + if (adev->pp_enabled) { + + pm = amdgpu_dpm_get_current_power_state(adev); + amdgpu_dpm_get_pp_num_states(adev, &data); + + for (i = 0; i < data.nums; i++) { + if (pm == data.states[i]) + break; + } + + if (i == data.nums) + i = -EINVAL; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", i); +} + +static ssize_t amdgpu_get_pp_force_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct pp_states_info data; + enum amd_pm_state_type pm = 0; + int i; + + if (adev->pp_force_state_enabled && adev->pp_enabled) { + pm = amdgpu_dpm_get_current_power_state(adev); + amdgpu_dpm_get_pp_num_states(adev, &data); + + for (i = 0; i < data.nums; i++) { + if (pm == data.states[i]) + break; + } + + if (i == data.nums) + i = -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%d\n", i); + + } else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t amdgpu_set_pp_force_state(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + enum amd_pm_state_type state = 0; + long idx; + int ret; + + if (strlen(buf) == 1) + adev->pp_force_state_enabled = false; + else { + ret = kstrtol(buf, 0, &idx); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + struct pp_states_info data; + amdgpu_dpm_get_pp_num_states(adev, &data); + state = data.states[idx]; + /* only set user selected power states */ + if (state != POWER_STATE_TYPE_INTERNAL_BOOT && + state != POWER_STATE_TYPE_DEFAULT) { + amdgpu_dpm_dispatch_task(adev, + AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + adev->pp_force_state_enabled = true; + } + } + } +fail: + return count; +} + +static ssize_t amdgpu_get_pp_table(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + char *table = NULL; + int size, i; + + if (adev->pp_enabled) + size = amdgpu_dpm_get_pp_table(adev, &table); + else + return 0; + + if (size >= PAGE_SIZE) + size = PAGE_SIZE - 1; + + for (i = 0; i < size; i++) { + sprintf(buf + i, "%02x", table[i]); + } + sprintf(buf + i, "\n"); + + return size; +} + +static ssize_t amdgpu_set_pp_table(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + if (adev->pp_enabled) + amdgpu_dpm_set_pp_table(adev, buf, count); + + return count; +} + +static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + ssize_t size = 0; + + if (adev->pp_enabled) + size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + + return size; +} + +static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long level; + + ret = kstrtol(buf, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) + amdgpu_dpm_force_clock_level(adev, PP_SCLK, level); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + ssize_t size = 0; + + if (adev->pp_enabled) + size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + + return size; +} + +static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long level; + + ret = kstrtol(buf, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) + amdgpu_dpm_force_clock_level(adev, PP_MCLK, level); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + ssize_t size = 0; + + if (adev->pp_enabled) + size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + + return size; +} + +static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long level; + + ret = kstrtol(buf, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) + amdgpu_dpm_force_clock_level(adev, PP_PCIE, level); +fail: return count; } @@ -176,14 +454,37 @@ static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amd static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, amdgpu_get_dpm_forced_performance_level, amdgpu_set_dpm_forced_performance_level); +static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); +static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); +static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, + amdgpu_get_pp_force_state, + amdgpu_set_pp_force_state); +static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, + amdgpu_get_pp_table, + amdgpu_set_pp_table); +static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_sclk, + amdgpu_set_pp_dpm_sclk); +static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_mclk, + amdgpu_set_pp_dpm_mclk); +static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_pcie, + amdgpu_set_pp_dpm_pcie); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); + struct drm_device *ddev = adev->ddev; int temp; + /* Can't get temperature when the card is off */ + if ((adev->flags & AMD_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) temp = 0; else @@ -623,22 +924,15 @@ force: amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } - mutex_lock(&adev->ring_lock); - /* update whether vce is active */ ps->vce_active = adev->pm.dpm.vce_active; ret = amdgpu_dpm_pre_set_power_state(adev); if (ret) - goto done; + return; /* update display watermarks based on new power state */ amdgpu_display_bandwidth_update(adev); - /* update displays */ - amdgpu_dpm_display_configuration_changed(adev); - - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; /* wait for the rings to drain */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { @@ -655,6 +949,12 @@ force: amdgpu_dpm_post_set_power_state(adev); + /* update displays */ + amdgpu_dpm_display_configuration_changed(adev); + + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; + adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; + if (adev->pm.funcs->force_performance_level) { if (adev->pm.dpm.thermal_active) { enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; @@ -667,9 +967,6 @@ force: amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); } } - -done: - mutex_unlock(&adev->ring_lock); } void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) @@ -770,6 +1067,44 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file for dpm state\n"); return ret; } + + if (adev->pp_enabled) { + ret = device_create_file(adev->dev, &dev_attr_pp_num_states); + if (ret) { + DRM_ERROR("failed to create device file pp_num_states\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); + if (ret) { + DRM_ERROR("failed to create device file pp_cur_state\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_force_state); + if (ret) { + DRM_ERROR("failed to create device file pp_force_state\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_table); + if (ret) { + DRM_ERROR("failed to create device file pp_table\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); + return ret; + } + } ret = amdgpu_debugfs_pm_init(adev); if (ret) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); @@ -787,6 +1122,15 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) hwmon_device_unregister(adev->pm.int_hwmon_dev); device_remove_file(adev->dev, &dev_attr_power_dpm_state); device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); + if (adev->pp_enabled) { + device_remove_file(adev->dev, &dev_attr_pp_num_states); + device_remove_file(adev->dev, &dev_attr_pp_cur_state); + device_remove_file(adev->dev, &dev_attr_pp_force_state); + device_remove_file(adev->dev, &dev_attr_pp_table); + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + } } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) @@ -802,13 +1146,11 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) int i = 0; amdgpu_display_bandwidth_update(adev); - mutex_lock(&adev->ring_lock); - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->ready) - amdgpu_fence_wait_empty(ring); - } - mutex_unlock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->ready) + amdgpu_fence_wait_empty(ring); + } amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); } else { @@ -847,12 +1189,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; + struct drm_device *ddev = adev->ddev; if (!adev->pm.dpm_enabled) { seq_printf(m, "dpm not enabled\n"); return 0; } - if (adev->pp_enabled) { + if ((adev->flags & AMD_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + seq_printf(m, "PX asic powered off\n"); + } else if (adev->pp_enabled) { amdgpu_dpm_debugfs_print_current_performance_level(adev, m); } else { mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index b9d0d55f6b47..3cb6d6c413c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle) adev->powerplay.pp_handle); #ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) + if (adev->pp_enabled) { amdgpu_pm_sysfs_init(adev); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); + } #endif return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 59f735a933a9..be6388f73ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -73,10 +73,6 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, if (ret) return ERR_PTR(ret); - mutex_lock(&adev->gem.mutex); - list_add_tail(&bo->list, &adev->gem.objects); - mutex_unlock(&adev->gem.mutex); - return &bo->gem_base; } @@ -121,7 +117,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); - if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return ERR_PTR(-EPERM); return drm_gem_prime_export(dev, gobj, flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index d1f234dd2126..56c07e3fdb33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -48,28 +48,6 @@ */ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); -/** - * amdgpu_ring_free_size - update the free size - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring structure holding ring information - * - * Update the free dw slots in the ring buffer (all asics). - */ -void amdgpu_ring_free_size(struct amdgpu_ring *ring) -{ - uint32_t rptr = amdgpu_ring_get_rptr(ring); - - /* This works because ring_size is a power of 2 */ - ring->ring_free_dw = rptr + (ring->ring_size / 4); - ring->ring_free_dw -= ring->wptr; - ring->ring_free_dw &= ring->ptr_mask; - if (!ring->ring_free_dw) { - /* this is an empty ring */ - ring->ring_free_dw = ring->ring_size / 4; - } -} - /** * amdgpu_ring_alloc - allocate space on the ring buffer * @@ -82,53 +60,21 @@ void amdgpu_ring_free_size(struct amdgpu_ring *ring) */ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) { - int r; - - /* make sure we aren't trying to allocate more space than there is on the ring */ - if (ndw > (ring->ring_size / 4)) - return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ - amdgpu_ring_free_size(ring); ndw = (ndw + ring->align_mask) & ~ring->align_mask; - while (ndw > (ring->ring_free_dw - 1)) { - amdgpu_ring_free_size(ring); - if (ndw < ring->ring_free_dw) { - break; - } - r = amdgpu_fence_wait_next(ring); - if (r) - return r; - } + + /* Make sure we aren't trying to allocate more space + * than the maximum for one submission + */ + if (WARN_ON_ONCE(ndw > ring->max_dw)) + return -ENOMEM; + ring->count_dw = ndw; ring->wptr_old = ring->wptr; return 0; } -/** - * amdgpu_ring_lock - lock the ring and allocate space on it - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring structure holding ring information - * @ndw: number of dwords to allocate in the ring buffer - * - * Lock the ring and allocate @ndw dwords in the ring buffer - * (all asics). - * Returns 0 on success, error on failure. - */ -int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) -{ - int r; - - mutex_lock(ring->ring_lock); - r = amdgpu_ring_alloc(ring, ndw); - if (r) { - mutex_unlock(ring->ring_lock); - return r; - } - return 0; -} - /** amdgpu_ring_insert_nop - insert NOP packets * * @ring: amdgpu_ring structure holding ring information @@ -144,6 +90,19 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) amdgpu_ring_write(ring, ring->nop); } +/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets + * + * @ring: amdgpu_ring structure holding ring information + * @ib: IB to add NOP packets to + * + * This is the generic pad_ib function for rings except SDMA + */ +void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) +{ + while (ib->length_dw & ring->align_mask) + ib->ptr[ib->length_dw++] = ring->nop; +} + /** * amdgpu_ring_commit - tell the GPU to execute the new * commands on the ring buffer @@ -167,20 +126,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) amdgpu_ring_set_wptr(ring); } -/** - * amdgpu_ring_unlock_commit - tell the GPU to execute the new - * commands on the ring buffer and unlock it - * - * @ring: amdgpu_ring structure holding ring information - * - * Call amdgpu_ring_commit() then unlock the ring (all asics). - */ -void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring) -{ - amdgpu_ring_commit(ring); - mutex_unlock(ring->ring_lock); -} - /** * amdgpu_ring_undo - reset the wptr * @@ -193,19 +138,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) ring->wptr = ring->wptr_old; } -/** - * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring - * - * @ring: amdgpu_ring structure holding ring information - * - * Call amdgpu_ring_undo() then unlock the ring (all asics). - */ -void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring) -{ - amdgpu_ring_undo(ring); - mutex_unlock(ring->ring_lock); -} - /** * amdgpu_ring_backup - Back up the content of a ring * @@ -218,43 +150,32 @@ unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, { unsigned size, ptr, i; - /* just in case lock the ring */ - mutex_lock(ring->ring_lock); *data = NULL; - if (ring->ring_obj == NULL) { - mutex_unlock(ring->ring_lock); + if (ring->ring_obj == NULL) return 0; - } /* it doesn't make sense to save anything if all fences are signaled */ - if (!amdgpu_fence_count_emitted(ring)) { - mutex_unlock(ring->ring_lock); + if (!amdgpu_fence_count_emitted(ring)) return 0; - } ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); size = ring->wptr + (ring->ring_size / 4); size -= ptr; size &= ring->ptr_mask; - if (size == 0) { - mutex_unlock(ring->ring_lock); + if (size == 0) return 0; - } /* and then save the content of the ring */ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); - if (!*data) { - mutex_unlock(ring->ring_lock); + if (!*data) return 0; - } for (i = 0; i < size; ++i) { (*data)[i] = ring->ring[ptr++]; ptr &= ring->ptr_mask; } - mutex_unlock(ring->ring_lock); return size; } @@ -276,7 +197,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring, return 0; /* restore the saved ring content */ - r = amdgpu_ring_lock(ring, size); + r = amdgpu_ring_alloc(ring, size); if (r) return r; @@ -284,7 +205,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring, amdgpu_ring_write(ring, data[i]); } - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); kfree(data); return 0; } @@ -352,7 +273,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - ring->ring_lock = &adev->ring_lock; /* Align ring size */ rb_bufsz = order_base_2(ring_size / 8); ring_size = (1 << (rb_bufsz + 1)) * 4; @@ -389,7 +309,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } } ring->ptr_mask = (ring->ring_size / 4) - 1; - ring->ring_free_dw = ring->ring_size / 4; + ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4, + amdgpu_sched_hw_submission); if (amdgpu_debugfs_ring_init(adev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); @@ -410,15 +331,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) int r; struct amdgpu_bo *ring_obj; - if (ring->ring_lock == NULL) - return; - - mutex_lock(ring->ring_lock); ring_obj = ring->ring_obj; ring->ready = false; ring->ring = NULL; ring->ring_obj = NULL; - mutex_unlock(ring->ring_lock); amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); @@ -474,29 +390,18 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); uint32_t rptr, wptr, rptr_next; - unsigned count, i, j; - - amdgpu_ring_free_size(ring); - count = (ring->ring_size / 4) - ring->ring_free_dw; + unsigned i; wptr = amdgpu_ring_get_wptr(ring); - seq_printf(m, "wptr: 0x%08x [%5d]\n", - wptr, wptr); + seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); rptr = amdgpu_ring_get_rptr(ring); - seq_printf(m, "rptr: 0x%08x [%5d]\n", - rptr, rptr); - rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); + seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); + seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); - seq_printf(m, "last semaphore signal addr : 0x%016llx\n", - ring->last_semaphore_signal_addr); - seq_printf(m, "last semaphore wait addr : 0x%016llx\n", - ring->last_semaphore_wait_addr); - seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); - seq_printf(m, "%u dwords in ring\n", count); if (!ring->ready) return 0; @@ -505,11 +410,20 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) * packet that is the root issue */ i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; - for (j = 0; j <= (count + 32); j++) { + while (i != rptr) { seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (rptr == i) + if (i == rptr) seq_puts(m, " *"); - if (rptr_next == i) + if (i == rptr_next) + seq_puts(m, " #"); + seq_puts(m, "\n"); + i = (i + 1) & ring->ptr_mask; + } + while (i != wptr) { + seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); + if (i == rptr) + seq_puts(m, " *"); + if (i == rptr_next) seq_puts(m, " #"); seq_puts(m, "\n"); i = (i + 1) & ring->ptr_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8b88edb0434b..2faf03bcda21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -321,8 +321,11 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, int i, r; signed long t; - BUG_ON(align > sa_manager->align); - BUG_ON(size > sa_manager->size); + if (WARN_ON_ONCE(align > sa_manager->align)) + return -EINVAL; + + if (WARN_ON_ONCE(size > sa_manager->size)) + return -EINVAL; *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); if ((*sa_bo) == NULL) { @@ -354,12 +357,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) if (fences[i]) - fences[count++] = fences[i]; + fences[count++] = fence_get(fences[i]); if (count) { spin_unlock(&sa_manager->wq.lock); t = fence_wait_any_timeout(fences, count, false, MAX_SCHEDULE_TIMEOUT); + for (i = 0; i < count; ++i) + fence_put(fences[i]); + r = (t > 0) ? 0 : t; spin_lock(&sa_manager->wq.lock); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c deleted file mode 100644 index 438c05254695..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include -#include -#include -#include -#include "amdgpu.h" -#include "amdgpu_trace.h" - -static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) -{ - struct amdgpu_job *job = to_amdgpu_job(sched_job); - return amdgpu_sync_get_fence(&job->ibs->sync); -} - -static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) -{ - struct amdgpu_fence *fence = NULL; - struct amdgpu_job *job; - int r; - - if (!sched_job) { - DRM_ERROR("job is null\n"); - return NULL; - } - job = to_amdgpu_job(sched_job); - trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); - if (r) { - DRM_ERROR("Error scheduling IBs (%d)\n", r); - goto err; - } - - fence = job->ibs[job->num_ibs - 1].fence; - fence_get(&fence->base); - -err: - if (job->free_job) - job->free_job(job); - - kfree(job); - return fence ? &fence->base : NULL; -} - -struct amd_sched_backend_ops amdgpu_sched_ops = { - .dependency = amdgpu_sched_dependency, - .run_job = amdgpu_sched_run_job, -}; - -int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_ib *ibs, - unsigned num_ibs, - int (*free_job)(struct amdgpu_job *), - void *owner, - struct fence **f) -{ - int r = 0; - if (amdgpu_enable_scheduler) { - struct amdgpu_job *job = - kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); - if (!job) - return -ENOMEM; - job->base.sched = &ring->sched; - job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; - job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); - if (!job->base.s_fence) { - kfree(job); - return -ENOMEM; - } - *f = fence_get(&job->base.s_fence->base); - - job->adev = adev; - job->ibs = ibs; - job->num_ibs = num_ibs; - job->owner = owner; - job->free_job = free_job; - amd_sched_entity_push_job(&job->base); - } else { - r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); - if (r) - return r; - *f = fence_get(&ibs[num_ibs - 1].fence->base); - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c deleted file mode 100644 index 1caaf201b708..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2011 Christian König. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - */ -/* - * Authors: - * Christian König - */ -#include -#include "amdgpu.h" -#include "amdgpu_trace.h" - -int amdgpu_semaphore_create(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore) -{ - int r; - - *semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL); - if (*semaphore == NULL) { - return -ENOMEM; - } - r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, - &(*semaphore)->sa_bo, 8, 8); - if (r) { - kfree(*semaphore); - *semaphore = NULL; - return r; - } - (*semaphore)->waiters = 0; - (*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo); - - *((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; - - return 0; -} - -bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore) -{ - trace_amdgpu_semaphore_signale(ring->idx, semaphore); - - if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) { - --semaphore->waiters; - - /* for debugging lockup only, used by sysfs debug files */ - ring->last_semaphore_signal_addr = semaphore->gpu_addr; - return true; - } - return false; -} - -bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore) -{ - trace_amdgpu_semaphore_wait(ring->idx, semaphore); - - if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) { - ++semaphore->waiters; - - /* for debugging lockup only, used by sysfs debug files */ - ring->last_semaphore_wait_addr = semaphore->gpu_addr; - return true; - } - return false; -} - -void amdgpu_semaphore_free(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore, - struct fence *fence) -{ - if (semaphore == NULL || *semaphore == NULL) { - return; - } - if ((*semaphore)->waiters > 0) { - dev_err(adev->dev, "semaphore %p has more waiters than signalers," - " hardware lockup imminent!\n", *semaphore); - } - amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence); - kfree(*semaphore); - *semaphore = NULL; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 181ce39ef5e5..c15be00de904 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -46,14 +46,6 @@ struct amdgpu_sync_entry { */ void amdgpu_sync_create(struct amdgpu_sync *sync) { - unsigned i; - - for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) - sync->semaphores[i] = NULL; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - sync->sync_to[i] = NULL; - hash_init(sync->fences); sync->last_vm_update = NULL; } @@ -107,7 +99,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct fence *f) { struct amdgpu_sync_entry *e; - struct amdgpu_fence *fence; if (!f) return 0; @@ -116,27 +107,20 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) amdgpu_sync_keep_later(&sync->last_vm_update, f); - fence = to_amdgpu_fence(f); - if (!fence || fence->ring->adev != adev) { - hash_for_each_possible(sync->fences, e, node, f->context) { - if (unlikely(e->fence->context != f->context)) - continue; + hash_for_each_possible(sync->fences, e, node, f->context) { + if (unlikely(e->fence->context != f->context)) + continue; - amdgpu_sync_keep_later(&e->fence, f); - return 0; - } - - e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); - if (!e) - return -ENOMEM; - - hash_add(sync->fences, &e->node, f->context); - e->fence = fence_get(f); + amdgpu_sync_keep_later(&e->fence, f); return 0; } - amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f); + e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); + if (!e) + return -ENOMEM; + hash_add(sync->fences, &e->node, f->context); + e->fence = fence_get(f); return 0; } @@ -153,13 +137,13 @@ static void *amdgpu_sync_get_owner(struct fence *f) } /** - * amdgpu_sync_resv - use the semaphores to sync to a reservation object + * amdgpu_sync_resv - sync to a reservation object * * @sync: sync object to add fences from reservation object to * @resv: reservation object with embedded fence * @shared: true if we should only sync to the exclusive fence * - * Sync to the fence using the semaphore objects + * Sync to the fence */ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, @@ -250,123 +234,17 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync) kfree(e); } - if (amdgpu_enable_semaphores) - return 0; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct fence *fence = sync->sync_to[i]; - if (!fence) - continue; - - r = fence_wait(fence, false); - if (r) - return r; - } - - return 0; -} - -/** - * amdgpu_sync_rings - sync ring to all registered fences - * - * @sync: sync object to use - * @ring: ring that needs sync - * - * Ensure that all registered fences are signaled before letting - * the ring continue. The caller must hold the ring lock. - */ -int amdgpu_sync_rings(struct amdgpu_sync *sync, - struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - unsigned count = 0; - int i, r; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *other = adev->rings[i]; - struct amdgpu_semaphore *semaphore; - struct amdgpu_fence *fence; - - if (!sync->sync_to[i]) - continue; - - fence = to_amdgpu_fence(sync->sync_to[i]); - - /* check if we really need to sync */ - if (!amdgpu_enable_scheduler && - !amdgpu_fence_need_sync(fence, ring)) - continue; - - /* prevent GPU deadlocks */ - if (!other->ready) { - dev_err(adev->dev, "Syncing to a disabled ring!"); - return -EINVAL; - } - - if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { - r = fence_wait(sync->sync_to[i], true); - if (r) - return r; - continue; - } - - if (count >= AMDGPU_NUM_SYNCS) { - /* not enough room, wait manually */ - r = fence_wait(&fence->base, false); - if (r) - return r; - continue; - } - r = amdgpu_semaphore_create(adev, &semaphore); - if (r) - return r; - - sync->semaphores[count++] = semaphore; - - /* allocate enough space for sync command */ - r = amdgpu_ring_alloc(other, 16); - if (r) - return r; - - /* emit the signal semaphore */ - if (!amdgpu_semaphore_emit_signal(other, semaphore)) { - /* signaling wasn't successful wait manually */ - amdgpu_ring_undo(other); - r = fence_wait(&fence->base, false); - if (r) - return r; - continue; - } - - /* we assume caller has already allocated space on waiters ring */ - if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { - /* waiting wasn't successful wait manually */ - amdgpu_ring_undo(other); - r = fence_wait(&fence->base, false); - if (r) - return r; - continue; - } - - amdgpu_ring_commit(other); - amdgpu_fence_note_sync(fence, ring); - } - return 0; } /** * amdgpu_sync_free - free the sync object * - * @adev: amdgpu_device pointer * @sync: sync object to use - * @fence: fence to use for the free * - * Free the sync object by freeing all semaphores in it. + * Free the sync object. */ -void amdgpu_sync_free(struct amdgpu_device *adev, - struct amdgpu_sync *sync, - struct fence *fence) +void amdgpu_sync_free(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; @@ -378,11 +256,5 @@ void amdgpu_sync_free(struct amdgpu_device *adev, kfree(e); } - for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) - amdgpu_semaphore_free(adev, &sync->semaphores[i], fence); - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - fence_put(sync->sync_to[i]); - fence_put(sync->last_vm_update); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 4865615e9c06..05a53f4fc334 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -238,144 +238,10 @@ void amdgpu_test_moves(struct amdgpu_device *adev) amdgpu_do_test_moves(adev); } -static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct fence **fence) -{ - uint32_t handle = ring->idx ^ 0xdeafbeef; - int r; - - if (ring == &adev->uvd.ring) { - r = amdgpu_uvd_get_create_msg(ring, handle, NULL); - if (r) { - DRM_ERROR("Failed to get dummy create msg\n"); - return r; - } - - r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); - if (r) { - DRM_ERROR("Failed to get dummy destroy msg\n"); - return r; - } - - } else if (ring == &adev->vce.ring[0] || - ring == &adev->vce.ring[1]) { - r = amdgpu_vce_get_create_msg(ring, handle, NULL); - if (r) { - DRM_ERROR("Failed to get dummy create msg\n"); - return r; - } - - r = amdgpu_vce_get_destroy_msg(ring, handle, fence); - if (r) { - DRM_ERROR("Failed to get dummy destroy msg\n"); - return r; - } - } else { - struct amdgpu_fence *a_fence = NULL; - r = amdgpu_ring_lock(ring, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ring->idx); - return r; - } - amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence); - amdgpu_ring_unlock_commit(ring); - *fence = &a_fence->base; - } - return 0; -} - void amdgpu_test_ring_sync(struct amdgpu_device *adev, struct amdgpu_ring *ringA, struct amdgpu_ring *ringB) { - struct fence *fence1 = NULL, *fence2 = NULL; - struct amdgpu_semaphore *semaphore = NULL; - int r; - - r = amdgpu_semaphore_create(adev, &semaphore); - if (r) { - DRM_ERROR("Failed to create semaphore\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringA, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringA, semaphore); - amdgpu_ring_unlock_commit(ringA); - - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1); - if (r) - goto out_cleanup; - - r = amdgpu_ring_lock(ringA, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringA, semaphore); - amdgpu_ring_unlock_commit(ringA); - - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2); - if (r) - goto out_cleanup; - - mdelay(1000); - - if (fence_is_signaled(fence1)) { - DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringB, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringB); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringB, semaphore); - amdgpu_ring_unlock_commit(ringB); - - r = fence_wait(fence1, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence 1\n"); - goto out_cleanup; - } - - mdelay(1000); - - if (fence_is_signaled(fence2)) { - DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringB, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringB); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringB, semaphore); - amdgpu_ring_unlock_commit(ringB); - - r = fence_wait(fence2, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence 1\n"); - goto out_cleanup; - } - -out_cleanup: - amdgpu_semaphore_free(adev, &semaphore, NULL); - - if (fence1) - fence_put(fence1); - - if (fence2) - fence_put(fence2); - - if (r) - printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); } static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, @@ -383,109 +249,6 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, struct amdgpu_ring *ringB, struct amdgpu_ring *ringC) { - struct fence *fenceA = NULL, *fenceB = NULL; - struct amdgpu_semaphore *semaphore = NULL; - bool sigA, sigB; - int i, r; - - r = amdgpu_semaphore_create(adev, &semaphore); - if (r) { - DRM_ERROR("Failed to create semaphore\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringA, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringA, semaphore); - amdgpu_ring_unlock_commit(ringA); - - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA); - if (r) - goto out_cleanup; - - r = amdgpu_ring_lock(ringB, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringB, semaphore); - amdgpu_ring_unlock_commit(ringB); - r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB); - if (r) - goto out_cleanup; - - mdelay(1000); - - if (fence_is_signaled(fenceA)) { - DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - if (fence_is_signaled(fenceB)) { - DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringC, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringC); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringC, semaphore); - amdgpu_ring_unlock_commit(ringC); - - for (i = 0; i < 30; ++i) { - mdelay(100); - sigA = fence_is_signaled(fenceA); - sigB = fence_is_signaled(fenceB); - if (sigA || sigB) - break; - } - - if (!sigA && !sigB) { - DRM_ERROR("Neither fence A nor B has been signaled\n"); - goto out_cleanup; - } else if (sigA && sigB) { - DRM_ERROR("Both fence A and B has been signaled\n"); - goto out_cleanup; - } - - DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); - - r = amdgpu_ring_lock(ringC, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringC); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringC, semaphore); - amdgpu_ring_unlock_commit(ringC); - - mdelay(1000); - - r = fence_wait(fenceA, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence A\n"); - goto out_cleanup; - } - r = fence_wait(fenceB, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence B\n"); - goto out_cleanup; - } - -out_cleanup: - amdgpu_semaphore_free(adev, &semaphore, NULL); - - if (fenceA) - fence_put(fenceA); - - if (fenceB) - fence_put(fenceB); - - if (r) - printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); } static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 8f9834ab1bd5..26a5f4acf584 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -38,10 +38,10 @@ TRACE_EVENT(amdgpu_cs, TP_fast_assign( __entry->bo_list = p->bo_list; - __entry->ring = p->ibs[i].ring->idx; - __entry->dw = p->ibs[i].length_dw; + __entry->ring = p->job->ring->idx; + __entry->dw = p->job->ibs[i].length_dw; __entry->fences = amdgpu_fence_count_emitted( - p->ibs[i].ring); + p->job->ring); ), TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", __entry->bo_list, __entry->ring, __entry->dw, @@ -65,7 +65,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __entry->sched_job = &job->base; __entry->ib = job->ibs; __entry->fence = &job->base.s_fence->base; - __entry->ring_name = job->ibs[0].ring->name; + __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", @@ -90,7 +90,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __entry->sched_job = &job->base; __entry->ib = job->ibs; __entry->fence = &job->base.s_fence->base; - __entry->ring_name = job->ibs[0].ring->name; + __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", @@ -100,18 +100,24 @@ TRACE_EVENT(amdgpu_sched_run_job, TRACE_EVENT(amdgpu_vm_grab_id, - TP_PROTO(unsigned vmid, int ring), - TP_ARGS(vmid, ring), + TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, + uint64_t pd_addr), + TP_ARGS(vm, ring, vmid, pd_addr), TP_STRUCT__entry( - __field(u32, vmid) + __field(struct amdgpu_vm *, vm) __field(u32, ring) + __field(u32, vmid) + __field(u64, pd_addr) ), TP_fast_assign( - __entry->vmid = vmid; + __entry->vm = vm; __entry->ring = ring; + __entry->vmid = vmid; + __entry->pd_addr = pd_addr; ), - TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, + __entry->ring, __entry->vmid, __entry->pd_addr) ); TRACE_EVENT(amdgpu_vm_bo_map, @@ -228,8 +234,8 @@ TRACE_EVENT(amdgpu_vm_flush, __entry->ring = ring; __entry->id = id; ), - TP_printk("pd_addr=%010Lx, ring=%u, id=%u", - __entry->pd_addr, __entry->ring, __entry->id) + TP_printk("ring=%u, id=%u, pd_addr=%010Lx", + __entry->ring, __entry->id, __entry->pd_addr) ); TRACE_EVENT(amdgpu_bo_list_set, @@ -247,42 +253,6 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) ); -DECLARE_EVENT_CLASS(amdgpu_semaphore_request, - - TP_PROTO(int ring, struct amdgpu_semaphore *sem), - - TP_ARGS(ring, sem), - - TP_STRUCT__entry( - __field(int, ring) - __field(signed, waiters) - __field(uint64_t, gpu_addr) - ), - - TP_fast_assign( - __entry->ring = ring; - __entry->waiters = sem->waiters; - __entry->gpu_addr = sem->gpu_addr; - ), - - TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, - __entry->waiters, __entry->gpu_addr) -); - -DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale, - - TP_PROTO(int ring, struct amdgpu_semaphore *sem), - - TP_ARGS(ring, sem) -); - -DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait, - - TP_PROTO(int ring, struct amdgpu_semaphore *sem), - - TP_ARGS(ring, sem) -); - #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 55cf05e1c81c..9ccdd189d717 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -77,6 +77,8 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) static int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; int r; adev->mman.mem_global_referenced = false; @@ -106,13 +108,27 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) return r; } + ring = adev->mman.buffer_funcs_ring; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; + r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, + rq, amdgpu_sched_jobs); + if (r != 0) { + DRM_ERROR("Failed setting up TTM BO move run queue.\n"); + drm_global_item_unref(&adev->mman.mem_global_ref); + drm_global_item_unref(&adev->mman.bo_global_ref.ref); + return r; + } + adev->mman.mem_global_referenced = true; + return 0; } static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) { if (adev->mman.mem_global_referenced) { + amd_sched_entity_fini(adev->mman.entity.sched, + &adev->mman.entity); drm_global_item_unref(&adev->mman.bo_global_ref.ref); drm_global_item_unref(&adev->mman.mem_global_ref); adev->mman.mem_global_referenced = false; @@ -499,9 +515,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; - if (current->mm != gtt->usermm) - return -EPERM; - if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { /* check that we only pin down anonymous memory to prevent problems with writeback */ @@ -712,7 +725,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { - while (--i) { + while (i--) { pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); gtt->ttm.dma_address[i] = 0; @@ -773,14 +786,33 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } -bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) +struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + if (gtt == NULL) + return NULL; + + return gtt->usermm; +} + +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned long size; + if (gtt == NULL) return false; - return !!gtt->userptr; + if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr) + return false; + + size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; + if (gtt->userptr > end || gtt->userptr + size <= start) + return false; + + return true; } bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) @@ -996,9 +1028,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, struct fence **fence) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + uint32_t max_bytes; unsigned num_loops, num_dw; - struct amdgpu_ib *ib; unsigned i; int r; @@ -1010,20 +1043,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, while (num_dw & 0x7) num_dw++; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + if (r) return r; - } - - ib->length_dw = 0; if (resv) { - r = amdgpu_sync_resv(adev, &ib->sync, resv, + r = amdgpu_sync_resv(adev, &job->sync, resv, AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); @@ -1034,31 +1059,25 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, for (i = 0; i < num_loops; i++) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); - amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset, - cur_size_in_bytes); + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, + dst_offset, cur_size_in_bytes); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; byte_count -= cur_size_in_bytes; } - amdgpu_vm_pad_ib(adev, ib); - WARN_ON(ib->length_dw > num_dw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - fence); + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; - if (!amdgpu_enable_scheduler) { - amdgpu_ib_free(adev, ib); - kfree(ib); - } return 0; + error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 53f987aeeacf..1de82bf4fc79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -91,6 +91,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -191,6 +193,15 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->uvd.vcpu_bo); + ring = &adev->uvd.ring; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; + r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, + rq, amdgpu_sched_jobs); + if (r != 0) { + DRM_ERROR("Failed setting up UVD run queue.\n"); + return r; + } + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { atomic_set(&adev->uvd.handles[i], 0); adev->uvd.filp[i] = NULL; @@ -210,6 +221,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) if (adev->uvd.vcpu_bo == NULL) return 0; + amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); + r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); if (!r) { amdgpu_bo_kunmap(adev->uvd.vcpu_bo); @@ -241,7 +254,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); if (r) { DRM_ERROR("Error destroying UVD (%d)!\n", r); continue; @@ -295,7 +308,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, handle, + false, &fence); if (r) { DRM_ERROR("Error destroying UVD (%d)!\n", r); continue; @@ -616,7 +630,6 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo; - struct amdgpu_ib *ib; uint32_t cmd, lo, hi; uint64_t start, end; uint64_t addr; @@ -638,9 +651,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; start += addr; - ib = &ctx->parser->ibs[ctx->ib_idx]; - ib->ptr[ctx->data0] = start & 0xFFFFFFFF; - ib->ptr[ctx->data1] = start >> 32; + amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, + lower_32_bits(start)); + amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1, + upper_32_bits(start)); cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; if (cmd < 0x4) { @@ -702,7 +716,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) { - struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; + struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; int i, r; ctx->idx++; @@ -748,7 +762,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) { - struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; + struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; int r; for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { @@ -790,7 +804,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) [0x00000003] = 2048, [0x00000004] = 0xFFFFFFFF, }; - struct amdgpu_ib *ib = &parser->ibs[ib_idx]; + struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; int r; if (ib->length_dw % 16) { @@ -823,22 +837,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return 0; } -static int amdgpu_uvd_free_job( - struct amdgpu_job *job) -{ - amdgpu_ib_free(job->adev, job->ibs); - kfree(job->ibs); - return 0; -} - -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct fence **fence) +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, + bool direct, struct fence **fence) { struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct list_head head; - struct amdgpu_ib *ib = NULL; + struct amdgpu_job *job; + struct amdgpu_ib *ib; struct fence *f = NULL; struct amdgpu_device *adev = ring->adev; uint64_t addr; @@ -862,15 +868,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (r) goto err; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) { - r = -ENOMEM; - goto err; - } - r = amdgpu_ib_get(ring, NULL, 64, ib); - if (r) - goto err1; + r = amdgpu_job_alloc_with_ib(adev, 64, &job); + if (r) + goto err; + + ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); ib->ptr[1] = addr; @@ -882,12 +885,19 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, ib->ptr[i] = PACKET2(0); ib->length_dw = 16; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_uvd_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); - if (r) - goto err2; + if (direct) { + r = amdgpu_ib_schedule(ring, 1, ib, + AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f); + if (r) + goto err_free; + + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &adev->uvd.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &f); + if (r) + goto err_free; + } ttm_eu_fence_buffer_objects(&ticket, &head, f); @@ -895,16 +905,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, *fence = fence_get(f); amdgpu_bo_unref(&bo); fence_put(f); - if (amdgpu_enable_scheduler) - return 0; - amdgpu_ib_free(ring->adev, ib); - kfree(ib); return 0; -err2: - amdgpu_ib_free(ring->adev, ib); -err1: - kfree(ib); + +err_free: + amdgpu_job_free(job); + err: ttm_eu_backoff_reservation(&ticket, &head); return r; @@ -959,11 +965,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, amdgpu_bo_kunmap(bo); amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, fence); + return amdgpu_uvd_send_msg(ring, bo, true, fence); } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + bool direct, struct fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -1001,7 +1007,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, amdgpu_bo_kunmap(bo); amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, fence); + return amdgpu_uvd_send_msg(ring, bo, direct, fence); } static void amdgpu_uvd_idle_work_handler(struct work_struct *work) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 1724c2c86151..9a3b449081a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -31,7 +31,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct fence **fence); int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + bool direct, struct fence **fence); void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index a745eeeb5d82..39c3aa60381a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -74,6 +74,8 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work); */ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) { + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; const char *fw_name; const struct common_firmware_header *hdr; unsigned ucode_version, version_major, version_minor, binary_id; @@ -170,6 +172,16 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) return r; } + + ring = &adev->vce.ring[0]; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; + r = amd_sched_entity_init(&ring->sched, &adev->vce.entity, + rq, amdgpu_sched_jobs); + if (r != 0) { + DRM_ERROR("Failed setting up VCE run queue.\n"); + return r; + } + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { atomic_set(&adev->vce.handles[i], 0); adev->vce.filp[i] = NULL; @@ -190,6 +202,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; + amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); + amdgpu_bo_unref(&adev->vce.vcpu_bo); amdgpu_ring_fini(&adev->vce.ring[0]); @@ -337,7 +351,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) amdgpu_vce_note_usage(adev); - r = amdgpu_vce_get_destroy_msg(ring, handle, NULL); + r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); if (r) DRM_ERROR("Error destroying VCE handle (%d)!\n", r); @@ -346,14 +360,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } } -static int amdgpu_vce_free_job( - struct amdgpu_job *job) -{ - amdgpu_ib_free(job->adev, job->ibs); - kfree(job->ibs); - return 0; -} - /** * amdgpu_vce_get_create_msg - generate a VCE create msg * @@ -368,21 +374,17 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib *ib = NULL; + struct amdgpu_job *job; + struct amdgpu_ib *ib; struct fence *f = NULL; - struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - kfree(ib); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + if (r) return r; - } + + ib = &job->ibs[0]; dummy = ib->gpu_addr + 1024; @@ -423,20 +425,19 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vce_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err; + + amdgpu_job_free(job); if (fence) *fence = fence_get(f); fence_put(f); - if (amdgpu_enable_scheduler) - return 0; + return 0; + err: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } @@ -451,26 +452,20 @@ err: * Close up a stream for HW test or if userspace failed to do so */ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + bool direct, struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib *ib = NULL; + struct amdgpu_job *job; + struct amdgpu_ib *ib; struct fence *f = NULL; - struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); - if (r) { - kfree(ib); - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + if (r) return r; - } + ib = &job->ibs[0]; dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ @@ -490,20 +485,29 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vce_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); - if (r) - goto err; + + if (direct) { + r = amdgpu_ib_schedule(ring, 1, ib, + AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); + if (r) + goto err; + + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &f); + if (r) + goto err; + } + if (fence) *fence = fence_get(f); fence_put(f); - if (amdgpu_enable_scheduler) - return 0; + return 0; + err: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } @@ -521,7 +525,6 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi, unsigned size, uint32_t index) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_ib *ib = &p->ibs[ib_idx]; struct amdgpu_bo *bo; uint64_t addr; @@ -550,8 +553,8 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, addr += amdgpu_bo_gpu_offset(bo); addr -= ((uint64_t)size) * ((uint64_t)index); - ib->ptr[lo] = addr & 0xFFFFFFFF; - ib->ptr[hi] = addr >> 32; + amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); + amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); return 0; } @@ -606,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, */ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) { - struct amdgpu_ib *ib = &p->ibs[ib_idx]; + struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned fb_idx = 0, bs_idx = 0; int session_idx = -1; bool destroyed = false; @@ -742,30 +745,6 @@ out: return r; } -/** - * amdgpu_vce_ring_emit_semaphore - emit a semaphore command - * - * @ring: engine to use - * @semaphore: address of semaphore - * @emit_wait: true=emit wait, false=emit signal - * - */ -bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); - if (!emit_wait) - amdgpu_ring_write(ring, VCE_CMD_END); - - return true; -} - /** * amdgpu_vce_ring_emit_ib - execute indirect buffer * @@ -814,14 +793,14 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - r = amdgpu_ring_lock(ring, 16); + r = amdgpu_ring_alloc(ring, 16); if (r) { DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", ring->idx, r); return r; } amdgpu_ring_write(ring, VCE_CMD_END); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { if (amdgpu_ring_get_rptr(ring) != rptr) @@ -862,7 +841,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_vce_get_destroy_msg(ring, 1, &fence); + r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index ba2da8ee5906..ef99d2370182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -31,12 +31,9 @@ int amdgpu_vce_resume(struct amdgpu_device *adev); int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct fence **fence); int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + bool direct, struct fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); -bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait); void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9599f7559b3d..d9dc8bea5e98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -50,12 +50,15 @@ * SI supports 16. */ +/* Special value that no flush is necessary */ +#define AMDGPU_VM_NO_FLUSH (~0ll) + /** * amdgpu_vm_num_pde - return the number of page directory entries * * @adev: amdgpu_device pointer * - * Calculate the number of page directory entries (cayman+). + * Calculate the number of page directory entries. */ static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) { @@ -67,7 +70,7 @@ static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * - * Calculate the size of the page directory in bytes (cayman+). + * Calculate the size of the page directory in bytes. */ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) { @@ -89,8 +92,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct amdgpu_bo_list_entry *entry) { entry->robj = vm->page_directory; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; entry->priority = 0; entry->tv.bo = &vm->page_directory->tbo; entry->tv.shared = true; @@ -154,142 +155,107 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, * @vm: vm to allocate id for * @ring: ring we want to submit job to * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse * * Allocate an id for the vm, adding fences to the sync obj as necessary. - * - * Global mutex must be locked! */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, struct fence *fence, + unsigned *vm_id, uint64_t *vm_pd_addr) { - struct fence *best[AMDGPU_MAX_RINGS] = {}; - struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; + uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_device *adev = ring->adev; + struct amdgpu_vm_id *id = &vm->ids[ring->idx]; + struct fence *updates = sync->last_vm_update; + int r; - unsigned choices[2] = {}; - unsigned i; + mutex_lock(&adev->vm_manager.lock); /* check if the id is still valid */ - if (vm_id->id) { - unsigned id = vm_id->id; + if (id->mgr_id) { + struct fence *flushed = id->flushed_updates; + bool is_later; long owner; - owner = atomic_long_read(&adev->vm_manager.ids[id].owner); - if (owner == (long)vm) { - trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); + if (!flushed) + is_later = true; + else if (!updates) + is_later = false; + else + is_later = fence_is_later(updates, flushed); + + owner = atomic_long_read(&id->mgr_id->owner); + if (!is_later && owner == (long)id && + pd_addr == id->pd_gpu_addr) { + + fence_put(id->mgr_id->active); + id->mgr_id->active = fence_get(fence); + + list_move_tail(&id->mgr_id->list, + &adev->vm_manager.ids_lru); + + *vm_id = id->mgr_id - adev->vm_manager.ids; + *vm_pd_addr = AMDGPU_VM_NO_FLUSH; + trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, + *vm_pd_addr); + + mutex_unlock(&adev->vm_manager.lock); return 0; } } - /* we definately need to flush */ - vm_id->pd_gpu_addr = ~0ll; + id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, + struct amdgpu_vm_manager_id, + list); - /* skip over VMID 0, since it is the system VM */ - for (i = 1; i < adev->vm_manager.nvm; ++i) { - struct fence *fence = adev->vm_manager.ids[i].active; - struct amdgpu_ring *fring; + r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active); + if (!r) { + fence_put(id->mgr_id->active); + id->mgr_id->active = fence_get(fence); - if (fence == NULL) { - /* found a free one */ - vm_id->id = i; - trace_amdgpu_vm_grab_id(i, ring->idx); - return 0; - } + fence_put(id->flushed_updates); + id->flushed_updates = fence_get(updates); - fring = amdgpu_ring_from_fence(fence); - if (best[fring->idx] == NULL || - fence_is_later(best[fring->idx], fence)) { - best[fring->idx] = fence; - choices[fring == ring ? 0 : 1] = i; - } + id->pd_gpu_addr = pd_addr; + + list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru); + atomic_long_set(&id->mgr_id->owner, (long)id); + + *vm_id = id->mgr_id - adev->vm_manager.ids; + *vm_pd_addr = pd_addr; + trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); } - for (i = 0; i < 2; ++i) { - if (choices[i]) { - struct fence *fence; - - fence = adev->vm_manager.ids[choices[i]].active; - vm_id->id = choices[i]; - - trace_amdgpu_vm_grab_id(choices[i], ring->idx); - return amdgpu_sync_fence(ring->adev, sync, fence); - } - } - - /* should never happen */ - BUG(); - return -EINVAL; + mutex_unlock(&adev->vm_manager.lock); + return r; } /** * amdgpu_vm_flush - hardware flush the vm * * @ring: ring to use for flush - * @vm: vm we want to flush - * @updates: last vm update that we waited for + * @vmid: vmid number to use + * @pd_addr: address of the page directory * - * Flush the vm (cayman+). - * - * Global and local mutex must be locked! + * Emit a VM flush when it is necessary. */ void amdgpu_vm_flush(struct amdgpu_ring *ring, - struct amdgpu_vm *vm, - struct fence *updates) + unsigned vmid, + uint64_t pd_addr) { - uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); - struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; - struct fence *flushed_updates = vm_id->flushed_updates; - bool is_later; - - if (!flushed_updates) - is_later = true; - else if (!updates) - is_later = false; - else - is_later = fence_is_later(updates, flushed_updates); - - if (pd_addr != vm_id->pd_gpu_addr || is_later) { - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); - if (is_later) { - vm_id->flushed_updates = fence_get(updates); - fence_put(flushed_updates); - } - vm_id->pd_gpu_addr = pd_addr; - amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); + if (pd_addr != AMDGPU_VM_NO_FLUSH) { + trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid); + amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr); } } -/** - * amdgpu_vm_fence - remember fence for vm - * - * @adev: amdgpu_device pointer - * @vm: vm we want to fence - * @fence: fence to remember - * - * Fence the vm (cayman+). - * Set the fence used to protect page table and id. - * - * Global and local mutex must be locked! - */ -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence) -{ - struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); - unsigned vm_id = vm->ids[ring->idx].id; - - fence_put(adev->vm_manager.ids[vm_id].active); - adev->vm_manager.ids[vm_id].active = fence_get(fence); - atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); -} - /** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * * @vm: requested vm * @bo: requested buffer object * - * Find @bo inside the requested vm (cayman+). + * Find @bo inside the requested vm. * Search inside the @bos vm list for the requested vm * Returns the found bo_va or NULL if none is found * @@ -312,32 +278,40 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw access flags * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags - * @gtt_flags: GTT hw access flags * * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, - uint32_t flags, uint32_t gtt_flags) + uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { - uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; + if ((gtt == &adev->gart) && (flags == gtt_flags)) { + uint64_t src = gtt->table_addr + (addr >> 12) * 8; amdgpu_vm_copy_pte(adev, ib, pe, src, count); - } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { - amdgpu_vm_write_pte(adev, ib, pe, addr, - count, incr, flags); + } else if (gtt) { + dma_addr_t *pages_addr = gtt->pages_addr; + amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, + count, incr, flags); + + } else if (count < 3) { + amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, + count, incr, flags); } else { amdgpu_vm_set_pte_pde(adev, ib, pe, addr, @@ -345,15 +319,6 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, } } -int amdgpu_vm_free_job(struct amdgpu_job *job) -{ - int i; - for (i = 0; i < job->num_ibs; i++) - amdgpu_ib_free(job->adev, &job->ibs[i]); - kfree(job->ibs); - return 0; -} - /** * amdgpu_vm_clear_bo - initially clear the page dir/table * @@ -363,15 +328,18 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) * need to reserve bo first before calling it. */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; struct fence *fence = NULL; - struct amdgpu_ib *ib; + struct amdgpu_job *job; unsigned entries; uint64_t addr; int r; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + r = reservation_object_reserve_shared(bo->tbo.resv); if (r) return r; @@ -383,56 +351,57 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) + r = amdgpu_job_alloc_with_ib(adev, 64, &job); + if (r) goto error; - r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); + amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, + 0, 0); + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + + WARN_ON(job->ibs[0].length_dw > 64); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; - ib->length_dw = 0; - - amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); - amdgpu_vm_pad_ib(adev, ib); - WARN_ON(ib->length_dw > 64); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &fence); - if (!r) - amdgpu_bo_fence(bo, fence, true); + amdgpu_bo_fence(bo, fence, true); fence_put(fence); - if (amdgpu_enable_scheduler) - return 0; + return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); error: return r; } /** - * amdgpu_vm_map_gart - get the physical address of a gart page + * amdgpu_vm_map_gart - Resolve gart mapping of addr * - * @adev: amdgpu_device pointer + * @pages_addr: optional DMA address to use for lookup * @addr: the unmapped addr * * Look up the physical address of the page that the pte resolves - * to (cayman+). - * Returns the physical address of the page. + * to and return the pointer for the page table entry. */ -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) +uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; - /* page table offset */ - result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; + if (pages_addr) { + /* page table offset */ + result = pages_addr[addr >> PAGE_SHIFT]; - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); + + } else { + /* No mapping required */ + result = addr; + } + + result &= 0xFFFFFFFFFFFFF000ULL; return result; } @@ -446,45 +415,37 @@ uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) * @end: end of GPU address range * * Allocates new page tables if necessary - * and updates the page directory (cayman+). + * and updates the page directory. * Returns 0 for success, error for failure. - * - * Global and local mutex must be locked! */ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; struct amdgpu_bo *pd = vm->page_directory; uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; + struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *fence = NULL; int r; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + /* padding, etc. */ ndw = 64; /* assume the worst case */ ndw += vm->max_pde_used * 6; - /* update too big for an IB */ - if (ndw > 0xfffff) - return -ENOMEM; - - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); + if (r) return r; - } - ib->length_dw = 0; + + ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -504,9 +465,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, ib, last_pde, - last_pt, count, incr, - AMDGPU_PTE_VALID, 0); + amdgpu_vm_update_pages(adev, NULL, 0, ib, + last_pde, last_pt, + count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -518,17 +480,16 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, - incr, AMDGPU_PTE_VALID, 0); + amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); if (ib->length_dw != 0) { - amdgpu_vm_pad_ib(adev, ib); - amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); + amdgpu_ring_pad_ib(ring, ib); + amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, + AMDGPU_FENCE_OWNER_VM); WARN_ON(ib->length_dw > ndw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &fence); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; @@ -536,18 +497,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, fence_put(vm->page_directory_fence); vm->page_directory_fence = fence_get(fence); fence_put(fence); - } - if (!amdgpu_enable_scheduler || ib->length_dw == 0) { - amdgpu_ib_free(adev, ib); - kfree(ib); + } else { + amdgpu_job_free(job); } return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } @@ -555,20 +513,20 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw mapping flags * @ib: IB for the update * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags - * @gtt_flags: GTT hw mapping flags - * - * Global and local mutex must be locked! */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_ib *ib, uint64_t pe_start, uint64_t pe_end, - uint64_t addr, uint32_t flags, - uint32_t gtt_flags) + uint64_t addr, uint32_t flags) { /** * The MC L1 TLB supports variable sized pages, based on a fragment @@ -598,36 +556,39 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, unsigned count; + /* Abort early if there isn't anything to do */ + if (pe_start == pe_end) + return; + /* system pages are non continuously */ - if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) || - (frag_start >= frag_end)) { + if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, + addr, count, AMDGPU_GPU_PAGE_SIZE, + flags); return; } /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, + count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags, - gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, + AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, + count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -635,122 +596,105 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw mapping flags * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range * @dst: destination address to map to * @flags: mapping flags * - * Update the page tables in the range @start - @end (cayman+). - * - * Global and local mutex must be locked! + * Update the page tables in the range @start - @end. */ -static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_ib *ib, - uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags, - uint32_t gtt_flags) +static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, + struct amdgpu_vm *vm, + struct amdgpu_ib *ib, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) { - uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t last_pte = ~0, last_dst = ~0; - void *owner = AMDGPU_FENCE_OWNER_VM; - unsigned count = 0; - uint64_t addr; + const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - /* sync to everything on unmapping */ - if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_UNDEFINED; + uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; + uint64_t addr; /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> amdgpu_vm_block_size; struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; unsigned nptes; - uint64_t pte; - int r; - - amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); - r = reservation_object_reserve_shared(pt->tbo.resv); - if (r) - return r; + uint64_t pe_start; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - pte = amdgpu_bo_gpu_offset(pt); - pte += (addr & mask) * 8; + pe_start = amdgpu_bo_gpu_offset(pt); + pe_start += (addr & mask) * 8; - if ((last_pte + 8 * count) != pte) { + if (last_pe_end != pe_start) { - if (count) { - amdgpu_vm_frag_ptes(adev, ib, last_pte, - last_pte + 8 * count, - last_dst, flags, - gtt_flags); - } + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, + last_pe_start, last_pe_end, + last_dst, flags); - count = nptes; - last_pte = pte; + last_pe_start = pe_start; + last_pe_end = pe_start + 8 * nptes; last_dst = dst; } else { - count += nptes; + last_pe_end += 8 * nptes; } addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - if (count) { - amdgpu_vm_frag_ptes(adev, ib, last_pte, - last_pte + 8 * count, - last_dst, flags, gtt_flags); - } - - return 0; + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, + last_pe_start, last_pe_end, + last_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer - * @vm: requested vm - * @mapping: mapped range and flags to use for the update - * @addr: addr to set the area to + * @gtt: GART instance to use for mapping * @gtt_flags: flags as they are used for GTT + * @vm: requested vm + * @start: start of mapped range + * @last: last mapped entry + * @flags: flags for the entries + * @addr: addr to set the area to * @fence: optional resulting fence * - * Fill in the page table entries for @mapping. + * Fill in the page table entries between @start and @last. * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved and mutex must be locked! */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_vm *vm, - struct amdgpu_bo_va_mapping *mapping, - uint64_t addr, uint32_t gtt_flags, + uint64_t start, uint64_t last, + uint32_t flags, uint64_t addr, struct fence **fence) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; + void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; - uint32_t flags = gtt_flags; + struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *f = NULL; int r; - /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here - * but in case of something, we filter the flags in first place - */ - if (!(mapping->flags & AMDGPU_PTE_READABLE)) - flags &= ~AMDGPU_PTE_READABLE; - if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) - flags &= ~AMDGPU_PTE_WRITEABLE; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - trace_amdgpu_vm_bo_update(mapping); + /* sync to everything on unmapping */ + if (!(flags & AMDGPU_PTE_VALID)) + owner = AMDGPU_FENCE_OWNER_UNDEFINED; - nptes = mapping->it.last - mapping->it.start + 1; + nptes = last - start + 1; /* * reserve space for one command every (1 << BLOCK_SIZE) @@ -761,11 +705,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { + if ((gtt == &adev->gart) && (flags == gtt_flags)) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (flags & AMDGPU_PTE_SYSTEM) { + } else if (gtt) { /* header for write data commands */ ndw += ncmds * 4; @@ -780,38 +724,28 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ndw += 2 * 10; } - /* update too big for an IB */ - if (ndw > 0xfffff) - return -ENOMEM; - - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); + if (r) return r; - } - ib->length_dw = 0; + ib = &job->ibs[0]; - r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, - mapping->it.last + 1, addr + mapping->offset, - flags, gtt_flags); + r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, + owner); + if (r) + goto error_free; - if (r) { - amdgpu_ib_free(adev, ib); - kfree(ib); - return r; - } + r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); + if (r) + goto error_free; - amdgpu_vm_pad_ib(adev, ib); + amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, + addr, flags); + + amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > ndw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &f); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error_free; @@ -821,18 +755,75 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, *fence = fence_get(f); } fence_put(f); - if (!amdgpu_enable_scheduler) { - amdgpu_ib_free(adev, ib); - kfree(ib); - } return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } +/** + * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks + * + * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @vm: requested vm + * @mapping: mapped range and flags to use for the update + * @addr: addr to set the area to + * @gtt_flags: flags as they are used for GTT + * @fence: optional resulting fence + * + * Split the mapping into smaller chunks so that each update fits + * into a SDMA IB. + * Returns 0 for success, -EINVAL for failure. + */ +static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, + struct amdgpu_vm *vm, + struct amdgpu_bo_va_mapping *mapping, + uint64_t addr, struct fence **fence) +{ + const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; + + uint64_t start = mapping->it.start; + uint32_t flags = gtt_flags; + int r; + + /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here + * but in case of something, we filter the flags in first place + */ + if (!(mapping->flags & AMDGPU_PTE_READABLE)) + flags &= ~AMDGPU_PTE_READABLE; + if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) + flags &= ~AMDGPU_PTE_WRITEABLE; + + trace_amdgpu_vm_bo_update(mapping); + + addr += mapping->offset; + + if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) + return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + start, mapping->it.last, + flags, addr, fence); + + while (start != mapping->it.last + 1) { + uint64_t last; + + last = min((uint64_t)mapping->it.last, start + max_size); + r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + start, last, flags, addr, + fence); + if (r) + return r; + + start = last + 1; + addr += max_size; + } + + return 0; +} + /** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * @@ -851,14 +842,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_gart *gtt = NULL; uint32_t flags; uint64_t addr; int r; if (mem) { addr = (u64)mem->start << PAGE_SHIFT; - if (mem->mem_type != TTM_PL_TT) + switch (mem->mem_type) { + case TTM_PL_TT: + gtt = &bo_va->bo->adev->gart; + break; + + case TTM_PL_VRAM: addr += adev->vm_manager.vram_base_offset; + break; + + default: + break; + } } else { addr = 0; } @@ -871,8 +873,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, - flags, &bo_va->last_pt_update); + r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, + &bo_va->last_pt_update); if (r) return r; } @@ -918,7 +920,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); spin_unlock(&vm->freed_lock); - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, + 0, NULL); kfree(mapping); if (r) return r; @@ -976,7 +979,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, * @vm: requested vm * @bo: amdgpu buffer object * - * Add @bo into the requested vm (cayman+). + * Add @bo into the requested vm. * Add @bo to the list of bos associated with the vm * Returns newly added bo_va or NULL for failure * @@ -1117,15 +1120,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, */ pt->parent = amdgpu_bo_ref(vm->page_directory); - r = amdgpu_vm_clear_bo(adev, pt); + r = amdgpu_vm_clear_bo(adev, vm, pt); if (r) { amdgpu_bo_unref(&pt); goto error_free; } entry->robj = pt; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; entry->priority = 0; entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; @@ -1210,7 +1211,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @bo_va: requested bo_va * - * Remove @bo_va->bo from the requested vm (cayman+). + * Remove @bo_va->bo from the requested vm. * * Object have to be reserved! */ @@ -1255,7 +1256,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, * @vm: requested vm * @bo: amdgpu buffer object * - * Mark @bo as invalid (cayman+). + * Mark @bo as invalid. */ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo) @@ -1276,17 +1277,20 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @vm: requested vm * - * Init @vm fields (cayman+). + * Init @vm fields. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT * 8); unsigned pd_size, pd_entries; + unsigned ring_instance; + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - vm->ids[i].id = 0; + vm->ids[i].mgr_id = NULL; vm->ids[i].flushed_updates = NULL; } vm->va = RB_ROOT; @@ -1306,6 +1310,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) return -ENOMEM; } + /* create scheduler entity for page table updates */ + + ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); + ring_instance %= adev->vm_manager.vm_pte_num_rings; + ring = adev->vm_manager.vm_pte_rings[ring_instance]; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; + r = amd_sched_entity_init(&ring->sched, &vm->entity, + rq, amdgpu_sched_jobs); + if (r) + return r; + vm->page_directory_fence = NULL; r = amdgpu_bo_create(adev, pd_size, align, true, @@ -1313,22 +1328,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) AMDGPU_GEM_CREATE_NO_CPU_ACCESS, NULL, NULL, &vm->page_directory); if (r) - return r; + goto error_free_sched_entity; + r = amdgpu_bo_reserve(vm->page_directory, false); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } - r = amdgpu_vm_clear_bo(adev, vm->page_directory); + if (r) + goto error_free_page_directory; + + r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); amdgpu_bo_unreserve(vm->page_directory); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } + if (r) + goto error_free_page_directory; return 0; + +error_free_page_directory: + amdgpu_bo_unref(&vm->page_directory); + vm->page_directory = NULL; + +error_free_sched_entity: + amd_sched_entity_fini(&ring->sched, &vm->entity); + + return r; } /** @@ -1337,7 +1357,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) * @adev: amdgpu_device pointer * @vm: requested vm * - * Tear down @vm (cayman+). + * Tear down @vm. * Unbind the VM and remove all bos from the vm bo list */ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) @@ -1345,6 +1365,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amdgpu_bo_va_mapping *mapping, *tmp; int i; + amd_sched_entity_fini(vm->entity.sched, &vm->entity); + if (!RB_EMPTY_ROOT(&vm->va)) { dev_err(adev->dev, "still active bo inside vm\n"); } @@ -1365,13 +1387,34 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - unsigned id = vm->ids[i].id; + struct amdgpu_vm_id *id = &vm->ids[i]; - atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, - (long)vm, 0); - fence_put(vm->ids[i].flushed_updates); + if (id->mgr_id) + atomic_long_cmpxchg(&id->mgr_id->owner, + (long)id, 0); + fence_put(id->flushed_updates); } +} +/** + * amdgpu_vm_manager_init - init the VM manager + * + * @adev: amdgpu_device pointer + * + * Initialize the VM manager structures + */ +void amdgpu_vm_manager_init(struct amdgpu_device *adev) +{ + unsigned i; + + INIT_LIST_HEAD(&adev->vm_manager.ids_lru); + + /* skip over VMID 0, since it is the system VM */ + for (i = 1; i < adev->vm_manager.num_ids; ++i) + list_add_tail(&adev->vm_manager.ids[i].list, + &adev->vm_manager.ids_lru); + + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 21aacc1f45c1..bf731e9f643e 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned lane_num, i, max_pix_clock; - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { - max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_NUTMEG) { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + max_pix_clock = (lane_num * 270000 * 8) / bpp; if (max_pix_clock >= pix_clock) { *dp_lanes = lane_num; - *dp_rate = link_rates[i]; + *dp_rate = 270000; return 0; } } + } else { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; + *dp_rate = link_rates[i]; + return 0; + } + } + } } return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 8b4731d4e10e..474ca02b0949 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -31,6 +31,7 @@ #include "ci_dpm.h" #include "gfx_v7_0.h" #include "atom.h" +#include "amd_pcie.h" #include #include "smu/smu_7_0_1_d.h" @@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev) u8 frev, crev; struct ci_power_info *pi; int ret; - u32 mask; pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); if (pi == NULL) return -ENOMEM; adev->pm.dpm.priv = pi; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret) - pi->sys_pcie_mask = 0; - else - pi->sys_pcie_mask = mask; + pi->sys_pcie_mask = + (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> + CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; + pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index fd9c9588ef46..192ab13e9f05 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1059,257 +1059,6 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void cik_print_gpu_status_regs(struct amdgpu_device *adev) -{ - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(mmGRBM_STATUS)); - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(mmGRBM_STATUS2)); - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(mmGRBM_STATUS_SE0)); - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(mmGRBM_STATUS_SE1)); - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", - RREG32(mmGRBM_STATUS_SE2)); - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", - RREG32(mmGRBM_STATUS_SE3)); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); - dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT1)); - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT2)); - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT3)); - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", - RREG32(mmCP_CPF_BUSY_STAT)); - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPF_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPC_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); -} - -/** - * cik_gpu_check_soft_reset - check which blocks are busy - * - * @adev: amdgpu_device pointer - * - * Check which blocks are busy and return the relevant reset - * mask to be used by cik_gpu_soft_reset(). - * Returns a mask of the blocks to be reset. - */ -u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev) -{ - u32 reset_mask = 0; - u32 tmp; - - /* GRBM_STATUS */ - tmp = RREG32(mmGRBM_STATUS); - if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | - GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | - GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | - GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | - GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) - reset_mask |= AMDGPU_RESET_GFX; - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_CP; - - /* GRBM_STATUS2 */ - tmp = RREG32(mmGRBM_STATUS2); - if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_RLC; - - /* SDMA0_STATUS_REG */ - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA; - - /* SDMA1_STATUS_REG */ - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA1; - - /* SRBM_STATUS2 */ - tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA; - - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA1; - - /* SRBM_STATUS */ - tmp = RREG32(mmSRBM_STATUS); - - if (tmp & SRBM_STATUS__IH_BUSY_MASK) - reset_mask |= AMDGPU_RESET_IH; - - if (tmp & SRBM_STATUS__SEM_BUSY_MASK) - reset_mask |= AMDGPU_RESET_SEM; - - if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) - reset_mask |= AMDGPU_RESET_GRBM; - - if (tmp & SRBM_STATUS__VMC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VMC; - - if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_MC; - - if (amdgpu_display_is_display_hung(adev)) - reset_mask |= AMDGPU_RESET_DISPLAY; - - /* Skip MC reset as it's mostly likely not hung, just busy */ - if (reset_mask & AMDGPU_RESET_MC) { - DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); - reset_mask &= ~AMDGPU_RESET_MC; - } - - return reset_mask; -} - -/** - * cik_gpu_soft_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * @reset_mask: mask of which blocks to reset - * - * Soft reset the blocks specified in @reset_mask. - */ -static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) -{ - struct amdgpu_mode_mc_save save; - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - u32 tmp; - - if (reset_mask == 0) - return; - - dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); - - cik_print_gpu_status_regs(adev); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); - - /* disable CG/PG */ - - /* stop the rlc */ - gfx_v7_0_rlc_stop(adev); - - /* Disable GFX parsing/prefetching */ - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); - - /* Disable MEC parsing/prefetching */ - WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); - - if (reset_mask & AMDGPU_RESET_DMA) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - } - if (reset_mask & AMDGPU_RESET_DMA1) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - } - - gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } - - if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) - grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | - GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; - - if (reset_mask & AMDGPU_RESET_CP) { - grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; - - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; - } - - if (reset_mask & AMDGPU_RESET_DMA) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - - if (reset_mask & AMDGPU_RESET_DMA1) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; - - if (reset_mask & AMDGPU_RESET_DISPLAY) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; - - if (reset_mask & AMDGPU_RESET_RLC) - grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; - - if (reset_mask & AMDGPU_RESET_SEM) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK; - - if (reset_mask & AMDGPU_RESET_IH) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; - - if (reset_mask & AMDGPU_RESET_GRBM) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; - - if (reset_mask & AMDGPU_RESET_VMC) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK; - - if (!(adev->flags & AMD_IS_APU)) { - if (reset_mask & AMDGPU_RESET_MC) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK; - } - - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } - - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } - - /* Wait a little for things to settle down */ - udelay(50); - - gmc_v7_0_mc_resume(adev, &save); - udelay(50); - - cik_print_gpu_status_regs(adev); -} - struct kv_reset_save_regs { u32 gmcon_reng_execute; u32 gmcon_misc; @@ -1405,45 +1154,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; struct kv_reset_save_regs kv_save = { 0 }; - u32 tmp, i; + u32 i; dev_info(adev->dev, "GPU pci config reset\n"); - /* disable dpm? */ - - /* disable cg/pg */ - - /* Disable GFX parsing/prefetching */ - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | - CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); - - /* Disable MEC parsing/prefetching */ - WREG32(mmCP_MEC_CNTL, - CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); - - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - /* XXX other engines? */ - - /* halt the rlc, disable cp internal ints */ - gfx_v7_0_rlc_stop(adev); - - udelay(50); - - /* disable mem access */ - gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timed out !\n"); - } - if (adev->flags & AMD_IS_APU) kv_save_regs_for_reset(adev, &kv_save); @@ -1489,26 +1204,11 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu */ static int cik_asic_reset(struct amdgpu_device *adev) { - u32 reset_mask; + cik_set_bios_scratch_engine_hung(adev, true); - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); + cik_gpu_pci_config_reset(adev); - if (reset_mask) - cik_set_bios_scratch_engine_hung(adev, true); - - /* try soft reset */ - cik_gpu_soft_reset(adev, reset_mask); - - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); - - /* try pci config reset */ - if (reset_mask && amdgpu_hard_reset) - cik_gpu_pci_config_reset(adev); - - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); - - if (!reset_mask) - cik_set_bios_scratch_engine_hung(adev, false); + cik_set_bios_scratch_engine_hung(adev, false); return 0; } @@ -1762,6 +1462,9 @@ static void cik_program_aspm(struct amdgpu_device *adev) if (amdgpu_aspm == 0) return; + if (pci_is_root_bus(adev->pdev->bus)) + return; + /* XXX double check APUs */ if (adev->flags & AMD_IS_APU) return; @@ -2332,72 +2035,72 @@ static int cik_common_early_init(void *handle) switch (adev->asic_type) { case CHIP_BONAIRE: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CGTS_LS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_MC_LS | - AMDGPU_CG_SUPPORT_MC_MGCG | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; case CHIP_HAWAII: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_MC_LS | - AMDGPU_CG_SUPPORT_MC_MGCG | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; adev->external_rev_id = 0x28; break; case CHIP_KAVERI: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CGTS_LS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = - /*AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG |*/ - AMDGPU_PG_SUPPORT_UVD | - /*AMDGPU_PG_SUPPORT_VCE | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS | - AMDGPU_PG_SUPPORT_ACP | - AMDGPU_PG_SUPPORT_SAMU |*/ + /*AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG |*/ + AMD_PG_SUPPORT_UVD | + /*AMD_PG_SUPPORT_VCE | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_ACP | + AMD_PG_SUPPORT_SAMU |*/ 0; if (adev->pdev->device == 0x1312 || adev->pdev->device == 0x1316 || @@ -2409,29 +2112,29 @@ static int cik_common_early_init(void *handle) case CHIP_KABINI: case CHIP_MULLINS: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CGTS_LS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = - /*AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | */ - AMDGPU_PG_SUPPORT_UVD | - /*AMDGPU_PG_SUPPORT_VCE | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS | - AMDGPU_PG_SUPPORT_SAMU |*/ + /*AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | */ + AMD_PG_SUPPORT_UVD | + /*AMD_PG_SUPPORT_VCE | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_SAMU |*/ 0; if (adev->asic_type == CHIP_KABINI) { if (adev->rev_id == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 5f712ceddf08..266db15daf2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -212,7 +212,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 extra_bits = ib->vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 4) @@ -294,30 +294,6 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); } -/** - * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring - * - * @ring: amdgpu_ring structure holding ring information - * @semaphore: amdgpu semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (CIK). - */ -static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; - - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); - amdgpu_ring_write(ring, addr & 0xfffffff8); - amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); - - return true; -} - /** * cik_sdma_gfx_stop - stop the gfx async dma engines * @@ -417,6 +393,9 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], + adev->gfx.config.gb_addr_config & 0x70); + WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); @@ -584,7 +563,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ring_lock(ring, 5); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); @@ -595,7 +574,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); amdgpu_ring_write(ring, 1); /* number of DWs to follow */ amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -645,7 +624,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err0; @@ -657,9 +636,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err1; @@ -738,7 +716,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, * Update PTEs by writing them manually using sDMA (CIK). */ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { @@ -757,14 +735,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & AMDGPU_PTE_SYSTEM) { - value = amdgpu_vm_map_gart(ib->ring->adev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & AMDGPU_PTE_VALID) { - value = addr; - } else { - value = 0; - } + value = amdgpu_vm_map_gart(pages_addr, addr); addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; @@ -827,9 +798,9 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, * @ib: indirect buffer to fill with padding * */ -static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) +static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); u32 pad_count; int i; @@ -885,7 +856,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, { u32 orig, data; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); } else { @@ -906,7 +877,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, { u32 orig, data; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); data |= 0x100; if (orig != data) @@ -1097,6 +1068,8 @@ static void cik_sdma_print_status(void *handle) i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { cik_srbm_select(adev, 0, 0, 0, j); @@ -1297,12 +1270,12 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { .parse_cs = NULL, .emit_ib = cik_sdma_ring_emit_ib, .emit_fence = cik_sdma_ring_emit_fence, - .emit_semaphore = cik_sdma_ring_emit_semaphore, .emit_vm_flush = cik_sdma_ring_emit_vm_flush, .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, .test_ring = cik_sdma_ring_test_ring, .test_ib = cik_sdma_ring_test_ib, .insert_nop = cik_sdma_ring_insert_nop, + .pad_ib = cik_sdma_ring_pad_ib, }; static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) @@ -1399,14 +1372,18 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { .copy_pte = cik_sdma_vm_copy_pte, .write_pte = cik_sdma_vm_write_pte, .set_pte_pde = cik_sdma_vm_set_pte_pde, - .pad_ib = cik_sdma_vm_pad_ib, }; static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) { + unsigned i; + if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; + for (i = 0; i < adev->sdma.num_instances; i++) + adev->vm_manager.vm_pte_rings[i] = + &adev->sdma.instance[i].ring; + + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 7f6d457f250a..60d4493206dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -46,9 +46,6 @@ #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 -#define CIK_RB_BITMAP_WIDTH_PER_SH 2 -#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4 - #define AMDGPU_NUM_OF_VMIDS 8 #define PIPEID(x) ((x) << 0) diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 4dd17f2dd905..e7ef2261ff4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->gfx_pg_threshold = 500; pi->caps_fps = true; /* uvd */ - pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; + pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; pi->caps_uvd_dpm = true; /* vce */ - pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; + pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; pi->caps_vce_dpm = true; /* acp */ - pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; + pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; pi->caps_acp_dpm = true; pi->caps_stable_power_state = false; @@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) AMD_PG_STATE_GATE); cz_enable_vce_dpm(adev, false); - /* TODO: to figure out why vce can't be poweroff. */ - /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */ + cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); pi->vce_power_gated = true; } else { cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); @@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) } } else { /*pi->caps_vce_pg*/ cz_update_vce_dpm(adev); - cz_enable_vce_dpm(adev, true); + cz_enable_vce_dpm(adev, !gate); } - - return; } const struct amd_ip_funcs cz_dpm_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 093599aba64b..e3ff809a0cae 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2670,7 +2670,6 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_crtc_cleanup(crtc); - destroy_workqueue(amdgpu_crtc->pflip_queue); kfree(amdgpu_crtc); } @@ -2890,7 +2889,6 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); adev->mode_info.crtcs[index] = amdgpu_crtc; amdgpu_crtc->max_cursor_width = 128; @@ -3366,7 +3364,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + schedule_work(&works->unpin_work); return 0; } @@ -3624,16 +3622,8 @@ dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode) } -static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = { .dpms = dce_v10_0_ext_dpms, - .mode_fixup = dce_v10_0_ext_mode_fixup, .prepare = dce_v10_0_ext_prepare, .mode_set = dce_v10_0_ext_mode_set, .commit = dce_v10_0_ext_commit, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 8e67249d4367..6b6c9b6879ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2661,7 +2661,6 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_crtc_cleanup(crtc); - destroy_workqueue(amdgpu_crtc->pflip_queue); kfree(amdgpu_crtc); } @@ -2881,7 +2880,6 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); adev->mode_info.crtcs[index] = amdgpu_crtc; amdgpu_crtc->max_cursor_width = 128; @@ -3361,7 +3359,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + schedule_work(&works->unpin_work); return 0; } @@ -3619,16 +3617,8 @@ dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) } -static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { .dpms = dce_v11_0_ext_dpms, - .mode_fixup = dce_v11_0_ext_mode_fixup, .prepare = dce_v11_0_ext_prepare, .mode_set = dce_v11_0_ext_mode_set, .commit = dce_v11_0_ext_commit, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index d0e128c24813..56bea36a6b18 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2582,7 +2582,6 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_crtc_cleanup(crtc); - destroy_workqueue(amdgpu_crtc->pflip_queue); kfree(amdgpu_crtc); } @@ -2809,7 +2808,6 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); adev->mode_info.crtcs[index] = amdgpu_crtc; amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; @@ -3375,7 +3373,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + schedule_work(&works->unpin_work); return 0; } @@ -3554,16 +3552,8 @@ dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode) } -static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = { .dpms = dce_v8_0_ext_dpms, - .mode_fixup = dce_v8_0_ext_mode_fixup, .prepare = dce_v8_0_ext_prepare, .mode_set = dce_v8_0_ext_mode_set, .commit = dce_v8_0_ext_commit, diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index e35340afd3db..b336c918d6a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -272,6 +272,12 @@ static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev) if (!adev->pm.fw) return -EINVAL; + /* Skip SMC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; amdgpu_ucode_print_smc_hdr(&hdr->header); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 6c76139de1c9..4411b94775db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -31,8 +31,6 @@ #include "amdgpu_ucode.h" #include "clearstate_ci.h" -#include "uvd/uvd_4_2_d.h" - #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" @@ -1006,9 +1004,15 @@ out: */ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) { - const u32 num_tile_mode_states = 32; - const u32 num_secondary_tile_mode_states = 16; - u32 reg_offset, gb_tile_moden, split_equal_to_row_size; + const u32 num_tile_mode_states = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + const u32 num_secondary_tile_mode_states = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + u32 reg_offset, split_equal_to_row_size; + uint32_t *tile, *macrotile; + + tile = adev->gfx.config.tile_mode_array; + macrotile = adev->gfx.config.macrotile_mode_array; switch (adev->gfx.config.mem_row_size_in_kb) { case 1: @@ -1023,832 +1027,531 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) break; } + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + tile[reg_offset] = 0; + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + macrotile[reg_offset] = 0; + switch (adev->asic_type) { case CHIP_BONAIRE: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 7: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[7] = (TILE_SPLIT(split_equal_to_row_size)); + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P4_16x16)); + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[12] = (TILE_SPLIT(split_equal_to_row_size)); + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[17] = (TILE_SPLIT(split_equal_to_row_size)); + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[23] = (TILE_SPLIT(split_equal_to_row_size)); + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[30] = (TILE_SPLIT(split_equal_to_row_size)); - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P4_16x16)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); break; case CHIP_HAWAII: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 7: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); break; case CHIP_KABINI: case CHIP_KAVERI: case CHIP_MULLINS: default: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 7: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[7] = (TILE_SPLIT(split_equal_to_row_size)); + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[12] = (TILE_SPLIT(split_equal_to_row_size)); + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[17] = (TILE_SPLIT(split_equal_to_row_size)); + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[23] = (TILE_SPLIT(split_equal_to_row_size)); + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[30] = (TILE_SPLIT(split_equal_to_row_size)); - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P2)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); break; } } @@ -1893,45 +1596,31 @@ void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) */ static u32 gfx_v7_0_create_bitmask(u32 bit_width) { - u32 i, mask = 0; - - for (i = 0; i < bit_width; i++) { - mask <<= 1; - mask |= 1; - } - return mask; + return (u32)((1ULL << bit_width) - 1); } /** - * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs + * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs * * @adev: amdgpu_device pointer - * @max_rb_num: max RBs (render backends) for the asic - * @se_num: number of SEs (shader engines) for the asic - * @sh_per_se: number of SH blocks per SE for the asic * - * Calculates the bitmask of disabled RBs (CIK). - * Returns the disabled RB bitmask. + * Calculates the bitmask of enabled RBs (CIK). + * Returns the enabled RB bitmask. */ -static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev, - u32 max_rb_num_per_se, - u32 sh_per_se) +static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; data = RREG32(mmCC_RB_BACKEND_DISABLE); - if (data & 1) - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - else - data = 0; - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; - mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se); + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); - return data & mask; + return (~data) & mask; } /** @@ -1940,73 +1629,31 @@ static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @se_num: number of SEs (shader engines) for the asic * @sh_per_se: number of SH blocks per SE for the asic - * @max_rb_num: max RBs (render backends) for the asic * * Configures per-SE/SH RB registers (CIK). */ -static void gfx_v7_0_setup_rb(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 max_rb_num_per_se) +static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, mask; - u32 disabled_rbs = 0; - u32 enabled_rbs = 0; + u32 data; + u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v7_0_select_se_sh(adev, i, j); - data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); - if (adev->asic_type == CHIP_HAWAII) - disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); - else - disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); + data = gfx_v7_0_get_rb_active_bitmap(adev); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + rb_bitmap_width_per_sh); } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); - mask = 1; - for (i = 0; i < max_rb_num_per_se * se_num; i++) { - if (!(disabled_rbs & mask)) - enabled_rbs |= mask; - mask <<= 1; - } - - adev->gfx.config.backend_enable_mask = enabled_rbs; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - gfx_v7_0_select_se_sh(adev, i, 0xffffffff); - data = 0; - for (j = 0; j < sh_per_se; j++) { - switch (enabled_rbs & 3) { - case 0: - if (j == 0) - data |= (RASTER_CONFIG_RB_MAP_3 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - else - data |= (RASTER_CONFIG_RB_MAP_0 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - break; - case 1: - data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); - break; - case 2: - data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); - break; - case 3: - default: - data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); - break; - } - enabled_rbs >>= 2; - } - WREG32(mmPA_SC_RASTER_CONFIG, data); - } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.config.backend_enable_mask = active_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); } /** @@ -2059,192 +1706,23 @@ static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) */ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) { - u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; - u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; - u32 sh_mem_cfg; - u32 tmp; + u32 tmp, sh_mem_cfg; int i; - switch (adev->asic_type) { - case CHIP_BONAIRE: - adev->gfx.config.max_shader_engines = 2; - adev->gfx.config.max_tile_pipes = 4; - adev->gfx.config.max_cu_per_sh = 7; - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_backends_per_se = 2; - adev->gfx.config.max_texture_channel_caches = 4; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 32; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; - break; - case CHIP_HAWAII: - adev->gfx.config.max_shader_engines = 4; - adev->gfx.config.max_tile_pipes = 16; - adev->gfx.config.max_cu_per_sh = 11; - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_backends_per_se = 4; - adev->gfx.config.max_texture_channel_caches = 16; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 32; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; - break; - case CHIP_KAVERI: - adev->gfx.config.max_shader_engines = 1; - adev->gfx.config.max_tile_pipes = 4; - if ((adev->pdev->device == 0x1304) || - (adev->pdev->device == 0x1305) || - (adev->pdev->device == 0x130C) || - (adev->pdev->device == 0x130F) || - (adev->pdev->device == 0x1310) || - (adev->pdev->device == 0x1311) || - (adev->pdev->device == 0x131C)) { - adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1309) || - (adev->pdev->device == 0x130A) || - (adev->pdev->device == 0x130D) || - (adev->pdev->device == 0x1313) || - (adev->pdev->device == 0x131D)) { - adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1306) || - (adev->pdev->device == 0x1307) || - (adev->pdev->device == 0x130B) || - (adev->pdev->device == 0x130E) || - (adev->pdev->device == 0x1315) || - (adev->pdev->device == 0x131B)) { - adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; - } else { - adev->gfx.config.max_cu_per_sh = 3; - adev->gfx.config.max_backends_per_se = 1; - } - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_texture_channel_caches = 4; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 16; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; - break; - case CHIP_KABINI: - case CHIP_MULLINS: - default: - adev->gfx.config.max_shader_engines = 1; - adev->gfx.config.max_tile_pipes = 2; - adev->gfx.config.max_cu_per_sh = 2; - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_backends_per_se = 1; - adev->gfx.config.max_texture_channel_caches = 2; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 16; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; - break; - } - WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT)); - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); - adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); - mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; - - adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; - adev->gfx.config.mem_max_burst_length_bytes = 256; - if (adev->flags & AMD_IS_APU) { - /* Get memory bank mapping mode. */ - tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); - dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); - dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); - - tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); - dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); - dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); - - /* Validate settings in case only one DIMM installed. */ - if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) - dimm00_addr_map = 0; - if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) - dimm01_addr_map = 0; - if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) - dimm10_addr_map = 0; - if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) - dimm11_addr_map = 0; - - /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ - /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ - if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) - adev->gfx.config.mem_row_size_in_kb = 2; - else - adev->gfx.config.mem_row_size_in_kb = 1; - } else { - tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; - adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; - if (adev->gfx.config.mem_row_size_in_kb > 4) - adev->gfx.config.mem_row_size_in_kb = 4; - } - /* XXX use MC settings? */ - adev->gfx.config.shader_engine_tile_size = 32; - adev->gfx.config.num_gpus = 1; - adev->gfx.config.multi_gpu_tile_size = 64; - - /* fix up row size */ - gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; - switch (adev->gfx.config.mem_row_size_in_kb) { - case 1: - default: - gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); - break; - case 2: - gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); - break; - case 4: - gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); - break; - } - adev->gfx.config.gb_addr_config = gb_addr_config; - - WREG32(mmGB_ADDR_CONFIG, gb_addr_config); - WREG32(mmHDP_ADDR_CONFIG, gb_addr_config); - WREG32(mmDMIF_ADDR_CALC, gb_addr_config); - WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70); - WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70); - WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); - WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); - WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); gfx_v7_0_tiling_mode_table_init(adev); - gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_backends_per_se); + gfx_v7_0_setup_rb(adev); /* set HW defaults for 3D engine */ WREG32(mmCP_MEQ_THRESHOLDS, - (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | - (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); + (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | + (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); mutex_lock(&adev->grbm_idx_mutex); /* @@ -2255,7 +1733,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ - sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); mutex_lock(&adev->srbm_mutex); @@ -2379,7 +1857,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_gfx_scratch_free(adev, scratch); @@ -2388,7 +1866,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -2516,36 +1994,6 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, upper_32_bits(seq)); } -/** - * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring - * - * @ring: amdgpu ring buffer object - * @semaphore: amdgpu semaphore object - * @emit_wait: Is this a sempahore wait? - * - * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP - * from running ahead of semaphore waits. - */ -static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; - - amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); - amdgpu_ring_write(ring, addr & 0xffffffff); - amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); - - if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { - /* Prevent the PFP from running ahead of the semaphore wait */ - amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); - amdgpu_ring_write(ring, 0x0); - } - - return true; -} - /* * IB stuff */ @@ -2593,8 +2041,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2622,8 +2069,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2661,7 +2107,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err1; @@ -2671,9 +2117,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err2; @@ -2842,7 +2287,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v7_0_cp_gfx_enable(adev, true); - r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8); + r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); return r; @@ -2911,7 +2356,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); return 0; } @@ -2989,21 +2434,14 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 wptr; - wptr = RREG32(mmCP_RB0_WPTR); - - return wptr; + return RREG32(mmCP_RB0_WPTR); } static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) @@ -3016,21 +2454,13 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) { - u32 wptr; - /* XXX check if swapping is necessary on BE */ - wptr = ring->adev->wb.wb[ring->wptr_offs]; - - return wptr; + return ring->adev->wb.wb[ring->wptr_offs]; } static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) @@ -3125,21 +2555,6 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) return 0; } -/** - * gfx_v7_0_cp_compute_start - start the compute queues - * - * @adev: amdgpu_device pointer - * - * Enable the compute queues. - * Returns 0 for success, error for failure. - */ -static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev) -{ - gfx_v7_0_cp_compute_enable(adev, true); - - return 0; -} - /** * gfx_v7_0_cp_compute_fini - stop the compute queues * @@ -3330,9 +2745,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) u32 *buf; struct bonaire_mqd *mqd; - r = gfx_v7_0_cp_compute_start(adev); - if (r) - return r; + gfx_v7_0_cp_compute_enable(adev, true); /* fix up chicken bits */ tmp = RREG32(mmCP_CPF_DEBUG); @@ -3628,6 +3041,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, 0xffffffff); + amdgpu_ring_write(ring, 4); /* poll interval */ + if (usepfp) { /* synce CE with ME to prevent CE fetch CEIB before context switch done */ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); @@ -4109,7 +3535,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { gfx_v7_0_enable_gui_idle_interrupt(adev, true); tmp = gfx_v7_0_halt_rlc(adev); @@ -4147,9 +3573,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp = 0; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { - if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { - if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { orig = data = RREG32(mmCP_MEM_SLP_CNTL); data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; if (orig != data) @@ -4176,14 +3602,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) gfx_v7_0_update_rlc(adev, tmp); - if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) { orig = data = RREG32(mmCGTS_SM_CTRL_REG); data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; - if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && - (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) + if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) && + (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS)) data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; @@ -4249,7 +3675,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; else data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; @@ -4263,7 +3689,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; else data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; @@ -4276,7 +3702,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP)) data &= ~0x8000; else data |= 0x8000; @@ -4289,7 +3715,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS)) data &= ~0x2000; else data |= 0x2000; @@ -4370,7 +3796,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, { u32 data, orig; - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { orig = data = RREG32(mmRLC_PG_CNTL); data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; if (orig != data) @@ -4395,28 +3821,19 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } -static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev, - u32 se, u32 sh) +static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) { - u32 mask = 0, tmp, tmp1; - int i; + u32 data, mask; - gfx_v7_0_select_se_sh(adev, se, sh); - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - tmp &= 0xffff0000; + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; - tmp |= tmp1; - tmp >>= 16; + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh); - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { - mask <<= 1; - mask |= 1; - } - - return (~tmp) & mask; + return (~data) & mask; } static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) @@ -4442,7 +3859,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)) data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; else data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; @@ -4456,7 +3873,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)) data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; else data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; @@ -4623,15 +4040,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, static void gfx_v7_0_init_pg(struct amdgpu_device *adev) { - if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); - if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { gfx_v7_0_init_gfx_cgpg(adev); gfx_v7_0_enable_cp_pg(adev, true); gfx_v7_0_enable_gds_pg(adev, true); @@ -4643,14 +4060,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev) static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) { - if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_update_gfx_pg(adev, false); - if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { gfx_v7_0_enable_cp_pg(adev, false); gfx_v7_0_enable_gds_pg(adev, false); } @@ -4754,6 +4171,172 @@ static int gfx_v7_0_late_init(void *handle) return 0; } +static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) +{ + u32 gb_addr_config; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; + u32 tmp; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->gfx.config.max_shader_engines = 2; + adev->gfx.config.max_tile_pipes = 4; + adev->gfx.config.max_cu_per_sh = 7; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_HAWAII: + adev->gfx.config.max_shader_engines = 4; + adev->gfx.config.max_tile_pipes = 16; + adev->gfx.config.max_cu_per_sh = 11; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 16; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_KAVERI: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 4; + if ((adev->pdev->device == 0x1304) || + (adev->pdev->device == 0x1305) || + (adev->pdev->device == 0x130C) || + (adev->pdev->device == 0x130F) || + (adev->pdev->device == 0x1310) || + (adev->pdev->device == 0x1311) || + (adev->pdev->device == 0x131C)) { + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; + } else if ((adev->pdev->device == 0x1309) || + (adev->pdev->device == 0x130A) || + (adev->pdev->device == 0x130D) || + (adev->pdev->device == 0x1313) || + (adev->pdev->device == 0x131D)) { + adev->gfx.config.max_cu_per_sh = 6; + adev->gfx.config.max_backends_per_se = 2; + } else if ((adev->pdev->device == 0x1306) || + (adev->pdev->device == 0x1307) || + (adev->pdev->device == 0x130B) || + (adev->pdev->device == 0x130E) || + (adev->pdev->device == 0x1315) || + (adev->pdev->device == 0x131B)) { + adev->gfx.config.max_cu_per_sh = 4; + adev->gfx.config.max_backends_per_se = 1; + } else { + adev->gfx.config.max_cu_per_sh = 3; + adev->gfx.config.max_backends_per_se = 1; + } + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + default: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 2; + adev->gfx.config.max_cu_per_sh = 2; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 2; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + } + + mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); + adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); + mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; + adev->gfx.config.mem_max_burst_length_bytes = 256; + if (adev->flags & AMD_IS_APU) { + /* Get memory bank mapping mode. */ + tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); + dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); + dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + /* Validate settings in case only one DIMM installed. */ + if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) + dimm00_addr_map = 0; + if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) + dimm01_addr_map = 0; + if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) + dimm10_addr_map = 0; + if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) + dimm11_addr_map = 0; + + /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ + /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ + if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) + adev->gfx.config.mem_row_size_in_kb = 2; + else + adev->gfx.config.mem_row_size_in_kb = 1; + } else { + tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; + adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; + if (adev->gfx.config.mem_row_size_in_kb > 4) + adev->gfx.config.mem_row_size_in_kb = 4; + } + /* XXX use MC settings? */ + adev->gfx.config.shader_engine_tile_size = 32; + adev->gfx.config.num_gpus = 1; + adev->gfx.config.multi_gpu_tile_size = 64; + + /* fix up row size */ + gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + default: + gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + case 2: + gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + case 4: + gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + } + adev->gfx.config.gb_addr_config = gb_addr_config; +} + static int gfx_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; @@ -4857,6 +4440,10 @@ static int gfx_v7_0_sw_init(void *handle) if (r) return r; + adev->gfx.ce_ram_size = 0x8000; + + gfx_v7_0_gpu_early_init(adev); + return r; } @@ -4897,8 +4484,6 @@ static int gfx_v7_0_hw_init(void *handle) if (r) return r; - adev->gfx.ce_ram_size = 0x8000; - return r; } @@ -5015,16 +4600,6 @@ static void gfx_v7_0_print_status(void *handle) RREG32(mmHDP_ADDR_CONFIG)); dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", RREG32(mmDMIF_ADDR_CALC)); - dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); - dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", RREG32(mmCP_MEQ_THRESHOLDS)); @@ -5527,14 +5102,14 @@ static int gfx_v7_0_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) gate = true; - if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_update_gfx_pg(adev, gate); - if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { gfx_v7_0_enable_cp_pg(adev, gate); gfx_v7_0_enable_gds_pg(adev, gate); } @@ -5567,13 +5142,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .parse_cs = NULL, .emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx, - .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { @@ -5583,13 +5158,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .parse_cs = NULL, .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, - .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5659,7 +5234,7 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info) + struct amdgpu_cu_info *cu_info) { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; @@ -5667,16 +5242,19 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, if (!adev || !cu_info) return -EINVAL; + memset(cu_info, 0, sizeof(*cu_info)); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j); + bitmap = gfx_v7_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < 16; k ++) { if (bitmap & mask) { if (counter < 2) ao_bitmap |= mask; @@ -5688,9 +5266,11 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; - mutex_unlock(&adev->grbm_idx_mutex); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 8f8ec37ecd88..1b85c001f860 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -43,9 +43,6 @@ #include "gca/gfx_8_0_sh_mask.h" #include "gca/gfx_8_0_enum.h" -#include "uvd/uvd_5_0_d.h" -#include "uvd/uvd_5_0_sh_mask.h" - #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" @@ -652,7 +649,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -662,7 +659,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -699,7 +696,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err1; @@ -709,9 +706,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err2; @@ -1171,7 +1167,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* allocate an indirect buffer to put the commands in */ memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, total_size, &ib); + r = amdgpu_ib_get(adev, NULL, total_size, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; @@ -1266,9 +1262,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); /* shedule the ib on the ring */ - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) { DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); goto fail; @@ -2574,11 +2569,6 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } } -static u32 gfx_v8_0_create_bitmask(u32 bit_width) -{ - return (u32)((1ULL << bit_width) - 1); -} - void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) { u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -2599,89 +2589,49 @@ void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) WREG32(mmGRBM_GFX_INDEX, data); } -static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev, - u32 max_rb_num_per_se, - u32 sh_per_se) +static u32 gfx_v8_0_create_bitmask(u32 bit_width) +{ + return (u32)((1ULL << bit_width) - 1); +} + +static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; data = RREG32(mmCC_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; - mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se); + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); - return data & mask; + return (~data) & mask; } -static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 max_rb_num_per_se) +static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, mask; - u32 disabled_rbs = 0; - u32 enabled_rbs = 0; + u32 data; + u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v8_0_select_se_sh(adev, i, j); - data = gfx_v8_0_get_rb_disabled(adev, - max_rb_num_per_se, sh_per_se); - disabled_rbs |= data << ((i * sh_per_se + j) * - RB_BITMAP_WIDTH_PER_SH); + data = gfx_v8_0_get_rb_active_bitmap(adev); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + rb_bitmap_width_per_sh); } } gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); - mask = 1; - for (i = 0; i < max_rb_num_per_se * se_num; i++) { - if (!(disabled_rbs & mask)) - enabled_rbs |= mask; - mask <<= 1; - } - - adev->gfx.config.backend_enable_mask = enabled_rbs; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - gfx_v8_0_select_se_sh(adev, i, 0xffffffff); - data = RREG32(mmPA_SC_RASTER_CONFIG); - for (j = 0; j < sh_per_se; j++) { - switch (enabled_rbs & 3) { - case 0: - if (j == 0) - data |= (RASTER_CONFIG_RB_MAP_3 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - else - data |= (RASTER_CONFIG_RB_MAP_0 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - break; - case 1: - data |= (RASTER_CONFIG_RB_MAP_0 << - (i * sh_per_se + j) * 2); - break; - case 2: - data |= (RASTER_CONFIG_RB_MAP_3 << - (i * sh_per_se + j) * 2); - break; - case 3: - default: - data |= (RASTER_CONFIG_RB_MAP_2 << - (i * sh_per_se + j) * 2); - break; - } - enabled_rbs >>= 2; - } - WREG32(mmPA_SC_RASTER_CONFIG, data); - } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.config.backend_enable_mask = active_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); } /** @@ -2741,19 +2691,10 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); - WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, - adev->gfx.config.gb_addr_config & 0x70); - WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, - adev->gfx.config.gb_addr_config & 0x70); - WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); gfx_v8_0_tiling_mode_table_init(adev); - gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_backends_per_se); + gfx_v8_0_setup_rb(adev); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -3062,7 +3003,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v8_0_cp_gfx_enable(adev, true); - r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4); + r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); return r; @@ -3126,7 +3067,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x8000); amdgpu_ring_write(ring, 0x8000); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); return 0; } @@ -3226,13 +3167,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) udelay(50); } -static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev) -{ - gfx_v8_0_cp_compute_enable(adev, true); - - return 0; -} - static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) { const struct gfx_firmware_header_v1_0 *mec_hdr; @@ -3802,9 +3736,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_PQ_STATUS, tmp); } - r = gfx_v8_0_cp_compute_start(adev); - if (r) - return r; + gfx_v8_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; @@ -4016,16 +3948,6 @@ static void gfx_v8_0_print_status(void *handle) RREG32(mmHDP_ADDR_CONFIG)); dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", RREG32(mmDMIF_ADDR_CALC)); - dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); - dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", RREG32(mmCP_MEQ_THRESHOLDS)); @@ -4699,8 +4621,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -4729,8 +4650,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + control |= ib->length_dw | (ib->vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -4762,54 +4682,17 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, } -/** - * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring - * - * @ring: amdgpu ring buffer object - * @semaphore: amdgpu semaphore object - * @emit_wait: Is this a sempahore wait? - * - * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP - * from running ahead of semaphore waits. - */ -static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; - - if (ring->adev->asic_type == CHIP_TOPAZ || - ring->adev->asic_type == CHIP_TONGA || - ring->adev->asic_type == CHIP_FIJI) - /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */ - return false; - else { - amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - amdgpu_ring_write(ring, sel); - } - - if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { - /* Prevent the PFP from running ahead of the semaphore wait */ - amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); - amdgpu_ring_write(ring, 0x0); - } - - return true; -} - static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); - uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint32_t seq = ring->fence_drv.sync_seq; uint64_t addr = ring->fence_drv.gpu_addr; amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3))); /* equal */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); @@ -4995,7 +4878,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, case AMDGPU_IRQ_STATE_ENABLE: cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 0); + PRIV_REG_INT_ENABLE, 1); WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); break; default: @@ -5145,13 +5028,13 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .parse_cs = NULL, .emit_ib = gfx_v8_0_ring_emit_ib_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx, - .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .test_ring = gfx_v8_0_ring_test_ring, .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { @@ -5161,13 +5044,13 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .parse_cs = NULL, .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, - .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .test_ring = gfx_v8_0_ring_test_ring, .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5236,32 +5119,23 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) } } -static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev, - u32 se, u32 sh) +static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { - u32 mask = 0, tmp, tmp1; - int i; + u32 data, mask; - gfx_v8_0_select_se_sh(adev, se, sh); - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - tmp &= 0xffff0000; + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; - tmp |= tmp1; - tmp >>= 16; + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh); - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { - mask <<= 1; - mask |= 1; - } - - return (~tmp) & mask; + return (~data) & mask; } int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info) + struct amdgpu_cu_info *cu_info) { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; @@ -5269,16 +5143,19 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, if (!adev || !cu_info) return -EINVAL; + memset(cu_info, 0, sizeof(*cu_info)); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j); + bitmap = gfx_v8_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < 16; k ++) { if (bitmap & mask) { if (counter < 2) ao_bitmap |= mask; @@ -5290,9 +5167,11 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; - mutex_unlock(&adev->grbm_idx_mutex); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 8aa2991ab379..711840a23bd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -694,7 +694,8 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; + amdgpu_vm_manager_init(adev); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) { @@ -792,7 +793,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) data |= mc_cg_ls_en[i]; else data &= ~mc_cg_ls_en[i]; @@ -809,7 +810,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) data |= mc_cg_en[i]; else data &= ~mc_cg_en[i]; @@ -825,7 +826,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, orig = data = RREG32_PCIE(ixPCIE_CNTL2); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); @@ -848,7 +849,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, orig = data = RREG32(mmHDP_HOST_PATH_CNTL); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); else data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); @@ -864,7 +865,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, orig = data = RREG32(mmHDP_MEM_POWER_LS); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); else data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); @@ -926,10 +927,6 @@ static int gmc_v7_0_sw_init(void *handle) int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_gem_init(adev); - if (r) - return r; - r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; @@ -1010,7 +1007,7 @@ static int gmc_v7_0_sw_fini(void *handle) adev->vm_manager.enabled = false; } gmc_v7_0_gart_fini(adev); - amdgpu_gem_fini(adev); + amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 3efd45546241..757803ae7c4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -252,6 +252,12 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) if (!adev->mc.fw) return -EINVAL; + /* Skip MC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; amdgpu_ucode_print_mc_hdr(&hdr->header); @@ -774,7 +780,8 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; + amdgpu_vm_manager_init(adev); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) { @@ -880,10 +887,6 @@ static int gmc_v8_0_sw_init(void *handle) int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_gem_init(adev); - if (r) - return r; - r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; @@ -964,7 +967,7 @@ static int gmc_v8_0_sw_fini(void *handle) adev->vm_manager.enabled = false; } gmc_v8_0_gart_fini(adev); - amdgpu_gem_fini(adev); + amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 090486c18249..52ee08193295 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -279,6 +279,12 @@ static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) if (!adev->pm.fw) return -EINVAL; + /* Skip SMC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; amdgpu_ucode_print_smc_hdr(&hdr->header); diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 7e9154c7f1db..654d76723bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ - pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; + pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; pi->caps_uvd_dpm = true; - pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; - pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; - pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; + pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; + pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; + pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; pi->caps_stable_p_state = false; ret = kv_parse_sys_info_table(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 2cf50180cc51..dddb8d6a81f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -244,7 +244,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 vmid = ib->vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 2) @@ -334,31 +334,6 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } -/** - * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring - * - * @ring: amdgpu_ring structure holding ring information - * @semaphore: amdgpu semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (VI). - */ -static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 sig = emit_wait ? 0 : 1; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | - SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); - amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - return true; -} - /** * sdma_v2_4_gfx_stop - stop the gfx async dma engines * @@ -459,6 +434,9 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], + adev->gfx.config.gb_addr_config & 0x70); + WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); /* Set ring buffer size in dwords */ @@ -636,7 +614,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ring_lock(ring, 5); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); @@ -649,7 +627,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -699,7 +677,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err0; @@ -716,9 +694,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err1; @@ -797,7 +774,7 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, * Update PTEs by writing them manually using sDMA (CIK). */ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { @@ -816,14 +793,7 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & AMDGPU_PTE_SYSTEM) { - value = amdgpu_vm_map_gart(ib->ring->adev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & AMDGPU_PTE_VALID) { - value = addr; - } else { - value = 0; - } + value = amdgpu_vm_map_gart(pages_addr, addr); addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; @@ -881,14 +851,14 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw + * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ -static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) +static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); u32 pad_count; int i; @@ -1111,6 +1081,8 @@ static void sdma_v2_4_print_status(void *handle) i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { vi_srbm_select(adev, 0, 0, 0, j); @@ -1302,12 +1274,12 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { .parse_cs = NULL, .emit_ib = sdma_v2_4_ring_emit_ib, .emit_fence = sdma_v2_4_ring_emit_fence, - .emit_semaphore = sdma_v2_4_ring_emit_semaphore, .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, .test_ring = sdma_v2_4_ring_test_ring, .test_ib = sdma_v2_4_ring_test_ib, .insert_nop = sdma_v2_4_ring_insert_nop, + .pad_ib = sdma_v2_4_ring_pad_ib, }; static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) @@ -1405,14 +1377,18 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { .copy_pte = sdma_v2_4_vm_copy_pte, .write_pte = sdma_v2_4_vm_write_pte, .set_pte_pde = sdma_v2_4_vm_set_pte_pde, - .pad_ib = sdma_v2_4_vm_pad_ib, }; static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) { + unsigned i; + if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; + for (i = 0; i < adev->sdma.num_instances; i++) + adev->vm_manager.vm_pte_rings[i] = + &adev->sdma.instance[i].ring; + + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index ad54c46751b0..19e02f7a06f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -355,7 +355,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 vmid = ib->vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 2) @@ -444,32 +444,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } - -/** - * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring - * - * @ring: amdgpu_ring structure holding ring information - * @semaphore: amdgpu semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (VI). - */ -static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 sig = emit_wait ? 0 : 1; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | - SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); - amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - return true; -} - /** * sdma_v3_0_gfx_stop - stop the gfx async dma engines * @@ -596,6 +570,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], + adev->gfx.config.gb_addr_config & 0x70); + WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); /* Set ring buffer size in dwords */ @@ -788,7 +765,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ring_lock(ring, 5); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); @@ -801,7 +778,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -851,7 +828,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err0; @@ -868,9 +845,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err1; @@ -948,7 +924,7 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, * Update PTEs by writing them manually using sDMA (CIK). */ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { @@ -967,14 +943,7 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & AMDGPU_PTE_SYSTEM) { - value = amdgpu_vm_map_gart(ib->ring->adev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & AMDGPU_PTE_VALID) { - value = addr; - } else { - value = 0; - } + value = amdgpu_vm_map_gart(pages_addr, addr); addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; @@ -1032,14 +1001,14 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw + * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ -static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) +static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); u32 pad_count; int i; @@ -1275,6 +1244,8 @@ static void sdma_v3_0_print_status(void *handle) i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n", i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { vi_srbm_select(adev, 0, 0, 0, j); @@ -1570,12 +1541,12 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { .parse_cs = NULL, .emit_ib = sdma_v3_0_ring_emit_ib, .emit_fence = sdma_v3_0_ring_emit_fence, - .emit_semaphore = sdma_v3_0_ring_emit_semaphore, .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, .test_ring = sdma_v3_0_ring_test_ring, .test_ib = sdma_v3_0_ring_test_ib, .insert_nop = sdma_v3_0_ring_insert_nop, + .pad_ib = sdma_v3_0_ring_pad_ib, }; static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) @@ -1673,14 +1644,18 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { .copy_pte = sdma_v3_0_vm_copy_pte, .write_pte = sdma_v3_0_vm_write_pte, .set_pte_pde = sdma_v3_0_vm_set_pte_pde, - .pad_ib = sdma_v3_0_vm_pad_ib, }; static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) { + unsigned i; + if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; + for (i = 0; i < adev->sdma.num_instances; i++) + adev->vm_manager.vm_pte_rings[i] = + &adev->sdma.instance[i].ring; + + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 361c49a82323..083893dd68c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -272,6 +272,12 @@ static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev) if (!adev->pm.fw) return -EINVAL; + /* Skip SMC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; amdgpu_ucode_print_smc_hdr(&hdr->header); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5e9f73af83a8..c606ccb38d8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -164,7 +164,7 @@ static int uvd_v4_2_hw_init(void *handle) goto done; } - r = amdgpu_ring_lock(ring, 10); + r = amdgpu_ring_alloc(ring, 10); if (r) { DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done; @@ -189,7 +189,7 @@ static int uvd_v4_2_hw_init(void *handle) amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); amdgpu_ring_write(ring, 3); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); done: /* lower clocks again */ @@ -438,33 +438,6 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq amdgpu_ring_write(ring, 2); } -/** - * uvd_v4_2_ring_emit_semaphore - emit semaphore command - * - * @ring: amdgpu_ring pointer - * @semaphore: semaphore to emit commands for - * @emit_wait: true if we should emit a wait command - * - * Emit a semaphore command (either wait or signal) to the UVD ring. - */ -static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); - - return true; -} - /** * uvd_v4_2_ring_test_ring - register write test * @@ -480,7 +453,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) int r; WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -488,7 +461,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) @@ -549,7 +522,7 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; @@ -603,6 +576,10 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) addr = (adev->uvd.gpu_addr >> 32) & 0xFF; WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + uvd_v4_2_init_cg(adev); } @@ -611,7 +588,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, { u32 orig, data; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); data = 0xfff; WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); @@ -804,6 +781,13 @@ static void uvd_v4_2_print_status(void *handle) RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", RREG32(mmUVD_CONTEXT_ID)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); + } static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, @@ -830,6 +814,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle, bool gate = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) + return 0; + if (state == AMD_CG_STATE_GATE) gate = true; @@ -848,7 +835,10 @@ static int uvd_v4_2_set_powergating_state(void *handle, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) + return 0; if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); @@ -882,10 +872,10 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v4_2_ring_emit_ib, .emit_fence = uvd_v4_2_ring_emit_fence, - .emit_semaphore = uvd_v4_2_ring_emit_semaphore, .test_ring = uvd_v4_2_ring_test_ring, .test_ib = uvd_v4_2_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 38864f562981..e3c852d9d79a 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -160,7 +160,7 @@ static int uvd_v5_0_hw_init(void *handle) goto done; } - r = amdgpu_ring_lock(ring, 10); + r = amdgpu_ring_alloc(ring, 10); if (r) { DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done; @@ -185,7 +185,7 @@ static int uvd_v5_0_hw_init(void *handle) amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); amdgpu_ring_write(ring, 3); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); done: /* lower clocks again */ @@ -279,6 +279,10 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) size = AMDGPU_UVD_HEAP_SIZE; WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); + + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } /** @@ -482,33 +486,6 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq amdgpu_ring_write(ring, 2); } -/** - * uvd_v5_0_ring_emit_semaphore - emit semaphore command - * - * @ring: amdgpu_ring pointer - * @semaphore: semaphore to emit commands for - * @emit_wait: true if we should emit a wait command - * - * Emit a semaphore command (either wait or signal) to the UVD ring. - */ -static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); - - return true; -} - /** * uvd_v5_0_ring_test_ring - register write test * @@ -524,7 +501,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) int r; WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -532,7 +509,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) @@ -595,7 +572,7 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; @@ -751,6 +728,12 @@ static void uvd_v5_0_print_status(void *handle) RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", RREG32(mmUVD_CONTEXT_ID)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); } static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, @@ -774,6 +757,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) + return 0; + return 0; } @@ -789,6 +777,9 @@ static int uvd_v5_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) + return 0; + if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); return 0; @@ -821,10 +812,10 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v5_0_ring_emit_ib, .emit_fence = uvd_v5_0_ring_emit_fence, - .emit_semaphore = uvd_v5_0_ring_emit_semaphore, .test_ring = uvd_v5_0_ring_test_ring, .test_ib = uvd_v5_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 3d5913926436..3375e614ac67 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -157,7 +157,7 @@ static int uvd_v6_0_hw_init(void *handle) goto done; } - r = amdgpu_ring_lock(ring, 10); + r = amdgpu_ring_alloc(ring, 10); if (r) { DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done; @@ -182,7 +182,7 @@ static int uvd_v6_0_hw_init(void *handle) amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); amdgpu_ring_write(ring, 3); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); done: if (!r) @@ -277,6 +277,10 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) size = AMDGPU_UVD_HEAP_SIZE; WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); + + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, @@ -532,7 +536,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); /* Set dynamic clock gating in S/W control mode */ - if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { + if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { if (adev->flags & AMD_IS_APU) cz_set_uvd_clock_gating_branches(adev, false); else @@ -721,33 +725,6 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq amdgpu_ring_write(ring, 2); } -/** - * uvd_v6_0_ring_emit_semaphore - emit semaphore command - * - * @ring: amdgpu_ring pointer - * @semaphore: semaphore to emit commands for - * @emit_wait: true if we should emit a wait command - * - * Emit a semaphore command (either wait or signal) to the UVD ring. - */ -static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); - - return true; -} - /** * uvd_v6_0_ring_test_ring - register write test * @@ -763,7 +740,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) int r; WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -771,7 +748,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) @@ -827,7 +804,7 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; @@ -974,6 +951,12 @@ static void uvd_v6_0_print_status(void *handle) RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", RREG32(mmUVD_CONTEXT_ID)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); } static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, @@ -1000,7 +983,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; if (enable) { @@ -1030,6 +1013,9 @@ static int uvd_v6_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) + return 0; + if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); return 0; @@ -1062,10 +1048,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, - .emit_semaphore = uvd_v6_0_ring_emit_semaphore, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = uvd_v6_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 52ac7a8f1e58..c7e885bcfd41 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) { bool sw_cg = false; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { if (sw_cg) vce_v2_0_set_sw_cg(adev, true); else @@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) + return 0; + if (state == AMD_PG_STATE_GATE) /* XXX do we need a vce_v2_0_stop()? */ return 0; @@ -639,10 +642,10 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .parse_cs = amdgpu_vce_ring_parse_cs, .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, - .emit_semaphore = amdgpu_vce_ring_emit_semaphore, .test_ring = amdgpu_vce_ring_test_ring, .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index e99af81e4aec..ce468ee5da2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) WREG32_P(mmVCE_STATUS, 0, ~1); /* Set Clock-Gating off */ - if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) + if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) vce_v3_0_set_vce_sw_clock_gating(adev, false); if (r) { @@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; - if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; mutex_lock(&adev->grbm_idx_mutex); @@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) + return 0; + if (state == AMD_PG_STATE_GATE) /* XXX do we need a vce_v3_0_stop()? */ return 0; @@ -759,10 +762,10 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .parse_cs = amdgpu_vce_ring_parse_cs, .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, - .emit_semaphore = amdgpu_vce_ring_emit_semaphore, .test_ring = amdgpu_vce_ring_test_ring, .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 89f5a1ff6f43..b72cf063df1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -74,6 +74,9 @@ #include "uvd_v6_0.h" #include "vce_v3_0.h" #include "amdgpu_powerplay.h" +#if defined(CONFIG_DRM_AMD_ACP) +#include "amdgpu_acp.h" +#endif /* * Indirect registers accessor @@ -571,374 +574,12 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void vi_print_gpu_status_regs(struct amdgpu_device *adev) -{ - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(mmGRBM_STATUS)); - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(mmGRBM_STATUS2)); - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(mmGRBM_STATUS_SE0)); - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(mmGRBM_STATUS_SE1)); - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", - RREG32(mmGRBM_STATUS_SE2)); - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", - RREG32(mmGRBM_STATUS_SE3)); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); - if (adev->sdma.num_instances > 1) { - dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); - } - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT1)); - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT2)); - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT3)); - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", - RREG32(mmCP_CPF_BUSY_STAT)); - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPF_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPC_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); -} - -/** - * vi_gpu_check_soft_reset - check which blocks are busy - * - * @adev: amdgpu_device pointer - * - * Check which blocks are busy and return the relevant reset - * mask to be used by vi_gpu_soft_reset(). - * Returns a mask of the blocks to be reset. - */ -u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev) -{ - u32 reset_mask = 0; - u32 tmp; - - /* GRBM_STATUS */ - tmp = RREG32(mmGRBM_STATUS); - if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | - GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | - GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | - GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | - GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) - reset_mask |= AMDGPU_RESET_GFX; - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_CP; - - /* GRBM_STATUS2 */ - tmp = RREG32(mmGRBM_STATUS2); - if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_RLC; - - if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK | - GRBM_STATUS2__CPC_BUSY_MASK | - GRBM_STATUS2__CPG_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_CP; - - /* SRBM_STATUS2 */ - tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA; - - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA1; - - /* SRBM_STATUS */ - tmp = RREG32(mmSRBM_STATUS); - - if (tmp & SRBM_STATUS__IH_BUSY_MASK) - reset_mask |= AMDGPU_RESET_IH; - - if (tmp & SRBM_STATUS__SEM_BUSY_MASK) - reset_mask |= AMDGPU_RESET_SEM; - - if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) - reset_mask |= AMDGPU_RESET_GRBM; - - if (adev->asic_type != CHIP_TOPAZ) { - if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK | - SRBM_STATUS__UVD_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_UVD; - } - - if (tmp & SRBM_STATUS__VMC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VMC; - - if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_MC; - - /* SDMA0_STATUS_REG */ - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA; - - /* SDMA1_STATUS_REG */ - if (adev->sdma.num_instances > 1) { - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA1; - } -#if 0 - /* VCE_STATUS */ - if (adev->asic_type != CHIP_TOPAZ) { - tmp = RREG32(mmVCE_STATUS); - if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VCE; - if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VCE1; - - } - - if (adev->asic_type != CHIP_TOPAZ) { - if (amdgpu_display_is_display_hung(adev)) - reset_mask |= AMDGPU_RESET_DISPLAY; - } -#endif - - /* Skip MC reset as it's mostly likely not hung, just busy */ - if (reset_mask & AMDGPU_RESET_MC) { - DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); - reset_mask &= ~AMDGPU_RESET_MC; - } - - return reset_mask; -} - -/** - * vi_gpu_soft_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * @reset_mask: mask of which blocks to reset - * - * Soft reset the blocks specified in @reset_mask. - */ -static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) -{ - struct amdgpu_mode_mc_save save; - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - u32 tmp; - - if (reset_mask == 0) - return; - - dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); - - vi_print_gpu_status_regs(adev); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); - - /* disable CG/PG */ - - /* stop the rlc */ - //XXX - //gfx_v8_0_rlc_stop(adev); - - /* Disable GFX parsing/prefetching */ - tmp = RREG32(mmCP_ME_CNTL); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); - WREG32(mmCP_ME_CNTL, tmp); - - /* Disable MEC parsing/prefetching */ - tmp = RREG32(mmCP_MEC_CNTL); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - WREG32(mmCP_MEC_CNTL, tmp); - - if (reset_mask & AMDGPU_RESET_DMA) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - } - if (reset_mask & AMDGPU_RESET_DMA1) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - } - - gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } - - if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) { - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); - } - - if (reset_mask & AMDGPU_RESET_CP) { - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); - } - - if (reset_mask & AMDGPU_RESET_DMA) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1); - - if (reset_mask & AMDGPU_RESET_DMA1) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1); - - if (reset_mask & AMDGPU_RESET_DISPLAY) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1); - - if (reset_mask & AMDGPU_RESET_RLC) - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - - if (reset_mask & AMDGPU_RESET_SEM) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); - - if (reset_mask & AMDGPU_RESET_IH) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1); - - if (reset_mask & AMDGPU_RESET_GRBM) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); - - if (reset_mask & AMDGPU_RESET_VMC) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); - - if (reset_mask & AMDGPU_RESET_UVD) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); - - if (reset_mask & AMDGPU_RESET_VCE) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - - if (reset_mask & AMDGPU_RESET_VCE) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - - if (!(adev->flags & AMD_IS_APU)) { - if (reset_mask & AMDGPU_RESET_MC) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); - } - - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } - - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } - - /* Wait a little for things to settle down */ - udelay(50); - - gmc_v8_0_mc_resume(adev, &save); - udelay(50); - - vi_print_gpu_status_regs(adev); -} - static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; - u32 tmp, i; + u32 i; dev_info(adev->dev, "GPU pci config reset\n"); - /* disable dpm? */ - - /* disable cg/pg */ - - /* Disable GFX parsing/prefetching */ - tmp = RREG32(mmCP_ME_CNTL); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); - WREG32(mmCP_ME_CNTL, tmp); - - /* Disable MEC parsing/prefetching */ - tmp = RREG32(mmCP_MEC_CNTL); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - WREG32(mmCP_MEC_CNTL, tmp); - - /* Disable GFX parsing/prefetching */ - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | - CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); - - /* Disable MEC parsing/prefetching */ - WREG32(mmCP_MEC_CNTL, - CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); - - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - - /* XXX other engines? */ - - /* halt the rlc, disable cp internal ints */ - //XXX - //gfx_v8_0_rlc_stop(adev); - - udelay(50); - - /* disable mem access */ - gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timed out !\n"); - } - /* disable BM */ pci_clear_master(adev->pdev); /* reset */ @@ -978,26 +619,11 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun */ static int vi_asic_reset(struct amdgpu_device *adev) { - u32 reset_mask; + vi_set_bios_scratch_engine_hung(adev, true); - reset_mask = vi_gpu_check_soft_reset(adev); + vi_gpu_pci_config_reset(adev); - if (reset_mask) - vi_set_bios_scratch_engine_hung(adev, true); - - /* try soft reset */ - vi_gpu_soft_reset(adev, reset_mask); - - reset_mask = vi_gpu_check_soft_reset(adev); - - /* try pci config reset */ - if (reset_mask && amdgpu_hard_reset) - vi_gpu_pci_config_reset(adev); - - reset_mask = vi_gpu_check_soft_reset(adev); - - if (!reset_mask) - vi_set_bios_scratch_engine_hung(adev, false); + vi_set_bios_scratch_engine_hung(adev, false); return 0; } @@ -1347,6 +973,15 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = .rev = 0, .funcs = &vce_v3_0_ip_funcs, }, +#if defined(CONFIG_DRM_AMD_ACP) + { + .type = AMD_IP_BLOCK_TYPE_ACP, + .major = 2, + .minor = 2, + .rev = 0, + .funcs = &acp_ip_funcs, + }, +#endif }; int vi_set_ip_blocks(struct amdgpu_device *adev) @@ -1457,8 +1092,7 @@ static int vi_common_early_init(void *handle) case CHIP_STONEY: adev->has_uvd = true; adev->cg_flags = 0; - /* Disable UVD pg */ - adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE; + adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index d98aa9d82fa1..ace49976f7be 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -71,8 +71,6 @@ #define VMID(x) ((x) << 4) #define QUEUEID(x) ((x) << 8) -#define RB_BITMAP_WIDTH_PER_SH 2 - #define MC_SEQ_MISC0__MT__MASK 0xf0000000 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 #define MC_SEQ_MISC0__MT__DDR2 0x20000000 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index c34c393e9aea..d5e19b5fbbfb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers( union SQ_CMD_BITS *in_reg_sq_cmd, union GRBM_GFX_INDEX_BITS *in_reg_gfx_index) { - int status; + int status = 0; union SQ_CMD_BITS reg_sq_cmd; union GRBM_GFX_INDEX_BITS reg_gfx_index; struct HsaDbgWaveMsgAMDGen2 *pMsg; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index ca8410e8683d..850a5623661f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -59,18 +59,23 @@ module_param(send_sigterm, int, 0444); MODULE_PARM_DESC(send_sigterm, "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); -bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) +static int amdkfd_init_completed; + +int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) { + if (!amdkfd_init_completed) + return -EPROBE_DEFER; + /* * Only one interface version is supported, * no kfd/kgd version skew allowed. */ if (interface_version != KFD_INTERFACE_VERSION) - return false; + return -EINVAL; *g2f = &kgd2kfd; - return true; + return 0; } EXPORT_SYMBOL(kgd2kfd_init); @@ -111,6 +116,8 @@ static int __init kfd_module_init(void) kfd_process_create_wq(); + amdkfd_init_completed = 1; + dev_info(kfd_device, "Initialized module\n"); return 0; @@ -125,6 +132,8 @@ err_pasid: static void __exit kfd_module_exit(void) { + amdkfd_init_completed = 0; + kfd_process_destroy_wq(); kfd_topology_shutdown(); kfd_chardev_exit(); diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h index 496360eb3fba..50e893325141 100644 --- a/drivers/gpu/drm/amd/include/amd_acpi.h +++ b/drivers/gpu/drm/amd/include/amd_acpi.h @@ -340,6 +340,8 @@ struct atcs_pref_req_output { # define ATPX_FIXED_NOT_SUPPORTED (1 << 9) # define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10) # define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11) +# define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12) +# define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14) #define ATPX_FUNCTION_POWER_CONTROL 0x2 /* ARG0: ATPX_FUNCTION_POWER_CONTROL * ARG1: diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 1195d06f55bc..04e4090666fb 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -73,6 +73,7 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_SDMA, AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_ACP, }; enum amd_clockgating_state { @@ -85,6 +86,38 @@ enum amd_powergating_state { AMD_PG_STATE_UNGATE, }; +/* CG flags */ +#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) +#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) +#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2) +#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3) +#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4) +#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5) +#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6) +#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7) +#define AMD_CG_SUPPORT_MC_LS (1 << 8) +#define AMD_CG_SUPPORT_MC_MGCG (1 << 9) +#define AMD_CG_SUPPORT_SDMA_LS (1 << 10) +#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11) +#define AMD_CG_SUPPORT_BIF_LS (1 << 12) +#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13) +#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14) +#define AMD_CG_SUPPORT_HDP_LS (1 << 15) +#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16) + +/* PG flags */ +#define AMD_PG_SUPPORT_GFX_PG (1 << 0) +#define AMD_PG_SUPPORT_GFX_SMG (1 << 1) +#define AMD_PG_SUPPORT_GFX_DMG (1 << 2) +#define AMD_PG_SUPPORT_UVD (1 << 3) +#define AMD_PG_SUPPORT_VCE (1 << 4) +#define AMD_PG_SUPPORT_CP (1 << 5) +#define AMD_PG_SUPPORT_GDS (1 << 6) +#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7) +#define AMD_PG_SUPPORT_SDMA (1 << 8) +#define AMD_PG_SUPPORT_ACP (1 << 9) +#define AMD_PG_SUPPORT_SAMU (1 << 10) + enum amd_pm_state_type { /* not used for dpm */ POWER_STATE_TYPE_DEFAULT, diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h index dc52ea0df4b4..d3ccf5a86de0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h @@ -1379,6 +1379,7 @@ #define mmDC_GPIO_PAD_STRENGTH_1 0x1978 #define mmDC_GPIO_PAD_STRENGTH_2 0x1979 #define mmPHY_AUX_CNTL 0x197f +#define mmDC_GPIO_I2CPAD_MASK 0x1974 #define mmDC_GPIO_I2CPAD_A 0x1975 #define mmDC_GPIO_I2CPAD_EN 0x1976 #define mmDC_GPIO_I2CPAD_Y 0x1977 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h new file mode 100644 index 000000000000..6bea30ef3df5 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h @@ -0,0 +1,1117 @@ +/* + * DCE_8_0 Register documentation + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DCE_8_0_ENUM_H +#define DCE_8_0_ENUM_H + +typedef enum SurfaceEndian { + ENDIAN_NONE = 0x0, + ENDIAN_8IN16 = 0x1, + ENDIAN_8IN32 = 0x2, + ENDIAN_8IN64 = 0x3, +} SurfaceEndian; +typedef enum ArrayMode { + ARRAY_LINEAR_GENERAL = 0x0, + ARRAY_LINEAR_ALIGNED = 0x1, + ARRAY_1D_TILED_THIN1 = 0x2, + ARRAY_1D_TILED_THICK = 0x3, + ARRAY_2D_TILED_THIN1 = 0x4, + ARRAY_PRT_TILED_THIN1 = 0x5, + ARRAY_PRT_2D_TILED_THIN1 = 0x6, + ARRAY_2D_TILED_THICK = 0x7, + ARRAY_2D_TILED_XTHICK = 0x8, + ARRAY_PRT_TILED_THICK = 0x9, + ARRAY_PRT_2D_TILED_THICK = 0xa, + ARRAY_PRT_3D_TILED_THIN1 = 0xb, + ARRAY_3D_TILED_THIN1 = 0xc, + ARRAY_3D_TILED_THICK = 0xd, + ARRAY_3D_TILED_XTHICK = 0xe, + ARRAY_PRT_3D_TILED_THICK = 0xf, +} ArrayMode; +typedef enum PipeTiling { + CONFIG_1_PIPE = 0x0, + CONFIG_2_PIPE = 0x1, + CONFIG_4_PIPE = 0x2, + CONFIG_8_PIPE = 0x3, +} PipeTiling; +typedef enum BankTiling { + CONFIG_4_BANK = 0x0, + CONFIG_8_BANK = 0x1, +} BankTiling; +typedef enum GroupInterleave { + CONFIG_256B_GROUP = 0x0, + CONFIG_512B_GROUP = 0x1, +} GroupInterleave; +typedef enum RowTiling { + CONFIG_1KB_ROW = 0x0, + CONFIG_2KB_ROW = 0x1, + CONFIG_4KB_ROW = 0x2, + CONFIG_8KB_ROW = 0x3, + CONFIG_1KB_ROW_OPT = 0x4, + CONFIG_2KB_ROW_OPT = 0x5, + CONFIG_4KB_ROW_OPT = 0x6, + CONFIG_8KB_ROW_OPT = 0x7, +} RowTiling; +typedef enum BankSwapBytes { + CONFIG_128B_SWAPS = 0x0, + CONFIG_256B_SWAPS = 0x1, + CONFIG_512B_SWAPS = 0x2, + CONFIG_1KB_SWAPS = 0x3, +} BankSwapBytes; +typedef enum SampleSplitBytes { + CONFIG_1KB_SPLIT = 0x0, + CONFIG_2KB_SPLIT = 0x1, + CONFIG_4KB_SPLIT = 0x2, + CONFIG_8KB_SPLIT = 0x3, +} SampleSplitBytes; +typedef enum NumPipes { + ADDR_CONFIG_1_PIPE = 0x0, + ADDR_CONFIG_2_PIPE = 0x1, + ADDR_CONFIG_4_PIPE = 0x2, + ADDR_CONFIG_8_PIPE = 0x3, +} NumPipes; +typedef enum PipeInterleaveSize { + ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0, + ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1, +} PipeInterleaveSize; +typedef enum BankInterleaveSize { + ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0, + ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1, + ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2, + ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3, +} BankInterleaveSize; +typedef enum NumShaderEngines { + ADDR_CONFIG_1_SHADER_ENGINE = 0x0, + ADDR_CONFIG_2_SHADER_ENGINE = 0x1, +} NumShaderEngines; +typedef enum ShaderEngineTileSize { + ADDR_CONFIG_SE_TILE_16 = 0x0, + ADDR_CONFIG_SE_TILE_32 = 0x1, +} ShaderEngineTileSize; +typedef enum NumGPUs { + ADDR_CONFIG_1_GPU = 0x0, + ADDR_CONFIG_2_GPU = 0x1, + ADDR_CONFIG_4_GPU = 0x2, +} NumGPUs; +typedef enum MultiGPUTileSize { + ADDR_CONFIG_GPU_TILE_16 = 0x0, + ADDR_CONFIG_GPU_TILE_32 = 0x1, + ADDR_CONFIG_GPU_TILE_64 = 0x2, + ADDR_CONFIG_GPU_TILE_128 = 0x3, +} MultiGPUTileSize; +typedef enum RowSize { + ADDR_CONFIG_1KB_ROW = 0x0, + ADDR_CONFIG_2KB_ROW = 0x1, + ADDR_CONFIG_4KB_ROW = 0x2, +} RowSize; +typedef enum NumLowerPipes { + ADDR_CONFIG_1_LOWER_PIPES = 0x0, + ADDR_CONFIG_2_LOWER_PIPES = 0x1, +} NumLowerPipes; +typedef enum DebugBlockId { + DBG_CLIENT_BLKID_RESERVED = 0x0, + DBG_CLIENT_BLKID_dbg = 0x1, + DBG_CLIENT_BLKID_uvdu_0 = 0x2, + DBG_CLIENT_BLKID_uvdu_1 = 0x3, + DBG_CLIENT_BLKID_uvdu_2 = 0x4, + DBG_CLIENT_BLKID_uvdu_3 = 0x5, + DBG_CLIENT_BLKID_uvdu_4 = 0x6, + DBG_CLIENT_BLKID_uvdu_5 = 0x7, + DBG_CLIENT_BLKID_uvdu_6 = 0x8, + DBG_CLIENT_BLKID_uvdm_0 = 0x9, + DBG_CLIENT_BLKID_uvdm_1 = 0xa, + DBG_CLIENT_BLKID_uvdm_2 = 0xb, + DBG_CLIENT_BLKID_uvdm_3 = 0xc, + DBG_CLIENT_BLKID_vcea_0 = 0xd, + DBG_CLIENT_BLKID_vcea_1 = 0xe, + DBG_CLIENT_BLKID_vcea_2 = 0xf, + DBG_CLIENT_BLKID_vcea_3 = 0x10, + DBG_CLIENT_BLKID_vcea_4 = 0x11, + DBG_CLIENT_BLKID_vcea_5 = 0x12, + DBG_CLIENT_BLKID_vcea_6 = 0x13, + DBG_CLIENT_BLKID_vceb_0 = 0x14, + DBG_CLIENT_BLKID_vceb_1 = 0x15, + DBG_CLIENT_BLKID_vceb_2 = 0x16, + DBG_CLIENT_BLKID_dco = 0x17, + DBG_CLIENT_BLKID_xdma = 0x18, + DBG_CLIENT_BLKID_smu_0 = 0x19, + DBG_CLIENT_BLKID_smu_1 = 0x1a, + DBG_CLIENT_BLKID_smu_2 = 0x1b, + DBG_CLIENT_BLKID_gck = 0x1c, + DBG_CLIENT_BLKID_tmonw0 = 0x1d, + DBG_CLIENT_BLKID_tmonw1 = 0x1e, + DBG_CLIENT_BLKID_grbm = 0x1f, + DBG_CLIENT_BLKID_rlc = 0x20, + DBG_CLIENT_BLKID_ds0 = 0x21, + DBG_CLIENT_BLKID_cpg_0 = 0x22, + DBG_CLIENT_BLKID_cpg_1 = 0x23, + DBG_CLIENT_BLKID_cpc_0 = 0x24, + DBG_CLIENT_BLKID_cpc_1 = 0x25, + DBG_CLIENT_BLKID_cpf = 0x26, + DBG_CLIENT_BLKID_scf0 = 0x27, + DBG_CLIENT_BLKID_scf1 = 0x28, + DBG_CLIENT_BLKID_scf2 = 0x29, + DBG_CLIENT_BLKID_scf3 = 0x2a, + DBG_CLIENT_BLKID_pc0 = 0x2b, + DBG_CLIENT_BLKID_pc1 = 0x2c, + DBG_CLIENT_BLKID_pc2 = 0x2d, + DBG_CLIENT_BLKID_pc3 = 0x2e, + DBG_CLIENT_BLKID_vgt0 = 0x2f, + DBG_CLIENT_BLKID_vgt1 = 0x30, + DBG_CLIENT_BLKID_vgt2 = 0x31, + DBG_CLIENT_BLKID_vgt3 = 0x32, + DBG_CLIENT_BLKID_sx00 = 0x33, + DBG_CLIENT_BLKID_sx10 = 0x34, + DBG_CLIENT_BLKID_sx20 = 0x35, + DBG_CLIENT_BLKID_sx30 = 0x36, + DBG_CLIENT_BLKID_cb001 = 0x37, + DBG_CLIENT_BLKID_cb200 = 0x38, + DBG_CLIENT_BLKID_cb201 = 0x39, + DBG_CLIENT_BLKID_cbr0 = 0x3a, + DBG_CLIENT_BLKID_cb000 = 0x3b, + DBG_CLIENT_BLKID_cb101 = 0x3c, + DBG_CLIENT_BLKID_cb300 = 0x3d, + DBG_CLIENT_BLKID_cb301 = 0x3e, + DBG_CLIENT_BLKID_cbr1 = 0x3f, + DBG_CLIENT_BLKID_cb100 = 0x40, + DBG_CLIENT_BLKID_ia0 = 0x41, + DBG_CLIENT_BLKID_ia1 = 0x42, + DBG_CLIENT_BLKID_bci0 = 0x43, + DBG_CLIENT_BLKID_bci1 = 0x44, + DBG_CLIENT_BLKID_bci2 = 0x45, + DBG_CLIENT_BLKID_bci3 = 0x46, + DBG_CLIENT_BLKID_pa0 = 0x47, + DBG_CLIENT_BLKID_pa1 = 0x48, + DBG_CLIENT_BLKID_spim0 = 0x49, + DBG_CLIENT_BLKID_spim1 = 0x4a, + DBG_CLIENT_BLKID_spim2 = 0x4b, + DBG_CLIENT_BLKID_spim3 = 0x4c, + DBG_CLIENT_BLKID_sdma = 0x4d, + DBG_CLIENT_BLKID_ih = 0x4e, + DBG_CLIENT_BLKID_sem = 0x4f, + DBG_CLIENT_BLKID_srbm = 0x50, + DBG_CLIENT_BLKID_hdp = 0x51, + DBG_CLIENT_BLKID_acp_0 = 0x52, + DBG_CLIENT_BLKID_acp_1 = 0x53, + DBG_CLIENT_BLKID_sam = 0x54, + DBG_CLIENT_BLKID_mcc0 = 0x55, + DBG_CLIENT_BLKID_mcc1 = 0x56, + DBG_CLIENT_BLKID_mcc2 = 0x57, + DBG_CLIENT_BLKID_mcc3 = 0x58, + DBG_CLIENT_BLKID_mcd0 = 0x59, + DBG_CLIENT_BLKID_mcd1 = 0x5a, + DBG_CLIENT_BLKID_mcd2 = 0x5b, + DBG_CLIENT_BLKID_mcd3 = 0x5c, + DBG_CLIENT_BLKID_mcb = 0x5d, + DBG_CLIENT_BLKID_vmc = 0x5e, + DBG_CLIENT_BLKID_gmcon = 0x5f, + DBG_CLIENT_BLKID_gdc_0 = 0x60, + DBG_CLIENT_BLKID_gdc_1 = 0x61, + DBG_CLIENT_BLKID_gdc_2 = 0x62, + DBG_CLIENT_BLKID_gdc_3 = 0x63, + DBG_CLIENT_BLKID_gdc_4 = 0x64, + DBG_CLIENT_BLKID_gdc_5 = 0x65, + DBG_CLIENT_BLKID_gdc_6 = 0x66, + DBG_CLIENT_BLKID_gdc_7 = 0x67, + DBG_CLIENT_BLKID_gdc_8 = 0x68, + DBG_CLIENT_BLKID_gdc_9 = 0x69, + DBG_CLIENT_BLKID_gdc_10 = 0x6a, + DBG_CLIENT_BLKID_gdc_11 = 0x6b, + DBG_CLIENT_BLKID_gdc_12 = 0x6c, + DBG_CLIENT_BLKID_gdc_13 = 0x6d, + DBG_CLIENT_BLKID_gdc_14 = 0x6e, + DBG_CLIENT_BLKID_gdc_15 = 0x6f, + DBG_CLIENT_BLKID_gdc_16 = 0x70, + DBG_CLIENT_BLKID_gdc_17 = 0x71, + DBG_CLIENT_BLKID_gdc_18 = 0x72, + DBG_CLIENT_BLKID_gdc_19 = 0x73, + DBG_CLIENT_BLKID_gdc_20 = 0x74, + DBG_CLIENT_BLKID_gdc_21 = 0x75, + DBG_CLIENT_BLKID_gdc_22 = 0x76, + DBG_CLIENT_BLKID_wd = 0x77, + DBG_CLIENT_BLKID_sdma_0 = 0x78, + DBG_CLIENT_BLKID_sdma_1 = 0x79, +} DebugBlockId; +typedef enum DebugBlockId_OLD { + DBG_BLOCK_ID_RESERVED = 0x0, + DBG_BLOCK_ID_DBG = 0x1, + DBG_BLOCK_ID_VMC = 0x2, + DBG_BLOCK_ID_PDMA = 0x3, + DBG_BLOCK_ID_CG = 0x4, + DBG_BLOCK_ID_SRBM = 0x5, + DBG_BLOCK_ID_GRBM = 0x6, + DBG_BLOCK_ID_RLC = 0x7, + DBG_BLOCK_ID_CSC = 0x8, + DBG_BLOCK_ID_SEM = 0x9, + DBG_BLOCK_ID_IH = 0xa, + DBG_BLOCK_ID_SC = 0xb, + DBG_BLOCK_ID_SQ = 0xc, + DBG_BLOCK_ID_AVP = 0xd, + DBG_BLOCK_ID_GMCON = 0xe, + DBG_BLOCK_ID_SMU = 0xf, + DBG_BLOCK_ID_DMA0 = 0x10, + DBG_BLOCK_ID_DMA1 = 0x11, + DBG_BLOCK_ID_SPIM = 0x12, + DBG_BLOCK_ID_GDS = 0x13, + DBG_BLOCK_ID_SPIS = 0x14, + DBG_BLOCK_ID_UNUSED0 = 0x15, + DBG_BLOCK_ID_PA0 = 0x16, + DBG_BLOCK_ID_PA1 = 0x17, + DBG_BLOCK_ID_CP0 = 0x18, + DBG_BLOCK_ID_CP1 = 0x19, + DBG_BLOCK_ID_CP2 = 0x1a, + DBG_BLOCK_ID_UNUSED1 = 0x1b, + DBG_BLOCK_ID_UVDU = 0x1c, + DBG_BLOCK_ID_UVDM = 0x1d, + DBG_BLOCK_ID_VCE = 0x1e, + DBG_BLOCK_ID_UNUSED2 = 0x1f, + DBG_BLOCK_ID_VGT0 = 0x20, + DBG_BLOCK_ID_VGT1 = 0x21, + DBG_BLOCK_ID_IA = 0x22, + DBG_BLOCK_ID_UNUSED3 = 0x23, + DBG_BLOCK_ID_SCT0 = 0x24, + DBG_BLOCK_ID_SCT1 = 0x25, + DBG_BLOCK_ID_SPM0 = 0x26, + DBG_BLOCK_ID_SPM1 = 0x27, + DBG_BLOCK_ID_TCAA = 0x28, + DBG_BLOCK_ID_TCAB = 0x29, + DBG_BLOCK_ID_TCCA = 0x2a, + DBG_BLOCK_ID_TCCB = 0x2b, + DBG_BLOCK_ID_MCC0 = 0x2c, + DBG_BLOCK_ID_MCC1 = 0x2d, + DBG_BLOCK_ID_MCC2 = 0x2e, + DBG_BLOCK_ID_MCC3 = 0x2f, + DBG_BLOCK_ID_SX0 = 0x30, + DBG_BLOCK_ID_SX1 = 0x31, + DBG_BLOCK_ID_SX2 = 0x32, + DBG_BLOCK_ID_SX3 = 0x33, + DBG_BLOCK_ID_UNUSED4 = 0x34, + DBG_BLOCK_ID_UNUSED5 = 0x35, + DBG_BLOCK_ID_UNUSED6 = 0x36, + DBG_BLOCK_ID_UNUSED7 = 0x37, + DBG_BLOCK_ID_PC0 = 0x38, + DBG_BLOCK_ID_PC1 = 0x39, + DBG_BLOCK_ID_UNUSED8 = 0x3a, + DBG_BLOCK_ID_UNUSED9 = 0x3b, + DBG_BLOCK_ID_UNUSED10 = 0x3c, + DBG_BLOCK_ID_UNUSED11 = 0x3d, + DBG_BLOCK_ID_MCB = 0x3e, + DBG_BLOCK_ID_UNUSED12 = 0x3f, + DBG_BLOCK_ID_SCB0 = 0x40, + DBG_BLOCK_ID_SCB1 = 0x41, + DBG_BLOCK_ID_UNUSED13 = 0x42, + DBG_BLOCK_ID_UNUSED14 = 0x43, + DBG_BLOCK_ID_SCF0 = 0x44, + DBG_BLOCK_ID_SCF1 = 0x45, + DBG_BLOCK_ID_UNUSED15 = 0x46, + DBG_BLOCK_ID_UNUSED16 = 0x47, + DBG_BLOCK_ID_BCI0 = 0x48, + DBG_BLOCK_ID_BCI1 = 0x49, + DBG_BLOCK_ID_BCI2 = 0x4a, + DBG_BLOCK_ID_BCI3 = 0x4b, + DBG_BLOCK_ID_UNUSED17 = 0x4c, + DBG_BLOCK_ID_UNUSED18 = 0x4d, + DBG_BLOCK_ID_UNUSED19 = 0x4e, + DBG_BLOCK_ID_UNUSED20 = 0x4f, + DBG_BLOCK_ID_CB00 = 0x50, + DBG_BLOCK_ID_CB01 = 0x51, + DBG_BLOCK_ID_CB02 = 0x52, + DBG_BLOCK_ID_CB03 = 0x53, + DBG_BLOCK_ID_CB04 = 0x54, + DBG_BLOCK_ID_UNUSED21 = 0x55, + DBG_BLOCK_ID_UNUSED22 = 0x56, + DBG_BLOCK_ID_UNUSED23 = 0x57, + DBG_BLOCK_ID_CB10 = 0x58, + DBG_BLOCK_ID_CB11 = 0x59, + DBG_BLOCK_ID_CB12 = 0x5a, + DBG_BLOCK_ID_CB13 = 0x5b, + DBG_BLOCK_ID_CB14 = 0x5c, + DBG_BLOCK_ID_UNUSED24 = 0x5d, + DBG_BLOCK_ID_UNUSED25 = 0x5e, + DBG_BLOCK_ID_UNUSED26 = 0x5f, + DBG_BLOCK_ID_TCP0 = 0x60, + DBG_BLOCK_ID_TCP1 = 0x61, + DBG_BLOCK_ID_TCP2 = 0x62, + DBG_BLOCK_ID_TCP3 = 0x63, + DBG_BLOCK_ID_TCP4 = 0x64, + DBG_BLOCK_ID_TCP5 = 0x65, + DBG_BLOCK_ID_TCP6 = 0x66, + DBG_BLOCK_ID_TCP7 = 0x67, + DBG_BLOCK_ID_TCP8 = 0x68, + DBG_BLOCK_ID_TCP9 = 0x69, + DBG_BLOCK_ID_TCP10 = 0x6a, + DBG_BLOCK_ID_TCP11 = 0x6b, + DBG_BLOCK_ID_TCP12 = 0x6c, + DBG_BLOCK_ID_TCP13 = 0x6d, + DBG_BLOCK_ID_TCP14 = 0x6e, + DBG_BLOCK_ID_TCP15 = 0x6f, + DBG_BLOCK_ID_TCP16 = 0x70, + DBG_BLOCK_ID_TCP17 = 0x71, + DBG_BLOCK_ID_TCP18 = 0x72, + DBG_BLOCK_ID_TCP19 = 0x73, + DBG_BLOCK_ID_TCP20 = 0x74, + DBG_BLOCK_ID_TCP21 = 0x75, + DBG_BLOCK_ID_TCP22 = 0x76, + DBG_BLOCK_ID_TCP23 = 0x77, + DBG_BLOCK_ID_TCP_RESERVED0 = 0x78, + DBG_BLOCK_ID_TCP_RESERVED1 = 0x79, + DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a, + DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b, + DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c, + DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d, + DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e, + DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f, + DBG_BLOCK_ID_DB00 = 0x80, + DBG_BLOCK_ID_DB01 = 0x81, + DBG_BLOCK_ID_DB02 = 0x82, + DBG_BLOCK_ID_DB03 = 0x83, + DBG_BLOCK_ID_DB04 = 0x84, + DBG_BLOCK_ID_UNUSED27 = 0x85, + DBG_BLOCK_ID_UNUSED28 = 0x86, + DBG_BLOCK_ID_UNUSED29 = 0x87, + DBG_BLOCK_ID_DB10 = 0x88, + DBG_BLOCK_ID_DB11 = 0x89, + DBG_BLOCK_ID_DB12 = 0x8a, + DBG_BLOCK_ID_DB13 = 0x8b, + DBG_BLOCK_ID_DB14 = 0x8c, + DBG_BLOCK_ID_UNUSED30 = 0x8d, + DBG_BLOCK_ID_UNUSED31 = 0x8e, + DBG_BLOCK_ID_UNUSED32 = 0x8f, + DBG_BLOCK_ID_TCC0 = 0x90, + DBG_BLOCK_ID_TCC1 = 0x91, + DBG_BLOCK_ID_TCC2 = 0x92, + DBG_BLOCK_ID_TCC3 = 0x93, + DBG_BLOCK_ID_TCC4 = 0x94, + DBG_BLOCK_ID_TCC5 = 0x95, + DBG_BLOCK_ID_TCC6 = 0x96, + DBG_BLOCK_ID_TCC7 = 0x97, + DBG_BLOCK_ID_SPS00 = 0x98, + DBG_BLOCK_ID_SPS01 = 0x99, + DBG_BLOCK_ID_SPS02 = 0x9a, + DBG_BLOCK_ID_SPS10 = 0x9b, + DBG_BLOCK_ID_SPS11 = 0x9c, + DBG_BLOCK_ID_SPS12 = 0x9d, + DBG_BLOCK_ID_UNUSED33 = 0x9e, + DBG_BLOCK_ID_UNUSED34 = 0x9f, + DBG_BLOCK_ID_TA00 = 0xa0, + DBG_BLOCK_ID_TA01 = 0xa1, + DBG_BLOCK_ID_TA02 = 0xa2, + DBG_BLOCK_ID_TA03 = 0xa3, + DBG_BLOCK_ID_TA04 = 0xa4, + DBG_BLOCK_ID_TA05 = 0xa5, + DBG_BLOCK_ID_TA06 = 0xa6, + DBG_BLOCK_ID_TA07 = 0xa7, + DBG_BLOCK_ID_TA08 = 0xa8, + DBG_BLOCK_ID_TA09 = 0xa9, + DBG_BLOCK_ID_TA0A = 0xaa, + DBG_BLOCK_ID_TA0B = 0xab, + DBG_BLOCK_ID_UNUSED35 = 0xac, + DBG_BLOCK_ID_UNUSED36 = 0xad, + DBG_BLOCK_ID_UNUSED37 = 0xae, + DBG_BLOCK_ID_UNUSED38 = 0xaf, + DBG_BLOCK_ID_TA10 = 0xb0, + DBG_BLOCK_ID_TA11 = 0xb1, + DBG_BLOCK_ID_TA12 = 0xb2, + DBG_BLOCK_ID_TA13 = 0xb3, + DBG_BLOCK_ID_TA14 = 0xb4, + DBG_BLOCK_ID_TA15 = 0xb5, + DBG_BLOCK_ID_TA16 = 0xb6, + DBG_BLOCK_ID_TA17 = 0xb7, + DBG_BLOCK_ID_TA18 = 0xb8, + DBG_BLOCK_ID_TA19 = 0xb9, + DBG_BLOCK_ID_TA1A = 0xba, + DBG_BLOCK_ID_TA1B = 0xbb, + DBG_BLOCK_ID_UNUSED39 = 0xbc, + DBG_BLOCK_ID_UNUSED40 = 0xbd, + DBG_BLOCK_ID_UNUSED41 = 0xbe, + DBG_BLOCK_ID_UNUSED42 = 0xbf, + DBG_BLOCK_ID_TD00 = 0xc0, + DBG_BLOCK_ID_TD01 = 0xc1, + DBG_BLOCK_ID_TD02 = 0xc2, + DBG_BLOCK_ID_TD03 = 0xc3, + DBG_BLOCK_ID_TD04 = 0xc4, + DBG_BLOCK_ID_TD05 = 0xc5, + DBG_BLOCK_ID_TD06 = 0xc6, + DBG_BLOCK_ID_TD07 = 0xc7, + DBG_BLOCK_ID_TD08 = 0xc8, + DBG_BLOCK_ID_TD09 = 0xc9, + DBG_BLOCK_ID_TD0A = 0xca, + DBG_BLOCK_ID_TD0B = 0xcb, + DBG_BLOCK_ID_UNUSED43 = 0xcc, + DBG_BLOCK_ID_UNUSED44 = 0xcd, + DBG_BLOCK_ID_UNUSED45 = 0xce, + DBG_BLOCK_ID_UNUSED46 = 0xcf, + DBG_BLOCK_ID_TD10 = 0xd0, + DBG_BLOCK_ID_TD11 = 0xd1, + DBG_BLOCK_ID_TD12 = 0xd2, + DBG_BLOCK_ID_TD13 = 0xd3, + DBG_BLOCK_ID_TD14 = 0xd4, + DBG_BLOCK_ID_TD15 = 0xd5, + DBG_BLOCK_ID_TD16 = 0xd6, + DBG_BLOCK_ID_TD17 = 0xd7, + DBG_BLOCK_ID_TD18 = 0xd8, + DBG_BLOCK_ID_TD19 = 0xd9, + DBG_BLOCK_ID_TD1A = 0xda, + DBG_BLOCK_ID_TD1B = 0xdb, + DBG_BLOCK_ID_UNUSED47 = 0xdc, + DBG_BLOCK_ID_UNUSED48 = 0xdd, + DBG_BLOCK_ID_UNUSED49 = 0xde, + DBG_BLOCK_ID_UNUSED50 = 0xdf, + DBG_BLOCK_ID_MCD0 = 0xe0, + DBG_BLOCK_ID_MCD1 = 0xe1, + DBG_BLOCK_ID_MCD2 = 0xe2, + DBG_BLOCK_ID_MCD3 = 0xe3, + DBG_BLOCK_ID_MCD4 = 0xe4, + DBG_BLOCK_ID_MCD5 = 0xe5, + DBG_BLOCK_ID_UNUSED51 = 0xe6, + DBG_BLOCK_ID_UNUSED52 = 0xe7, +} DebugBlockId_OLD; +typedef enum DebugBlockId_BY2 { + DBG_BLOCK_ID_RESERVED_BY2 = 0x0, + DBG_BLOCK_ID_VMC_BY2 = 0x1, + DBG_BLOCK_ID_CG_BY2 = 0x2, + DBG_BLOCK_ID_GRBM_BY2 = 0x3, + DBG_BLOCK_ID_CSC_BY2 = 0x4, + DBG_BLOCK_ID_IH_BY2 = 0x5, + DBG_BLOCK_ID_SQ_BY2 = 0x6, + DBG_BLOCK_ID_GMCON_BY2 = 0x7, + DBG_BLOCK_ID_DMA0_BY2 = 0x8, + DBG_BLOCK_ID_SPIM_BY2 = 0x9, + DBG_BLOCK_ID_SPIS_BY2 = 0xa, + DBG_BLOCK_ID_PA0_BY2 = 0xb, + DBG_BLOCK_ID_CP0_BY2 = 0xc, + DBG_BLOCK_ID_CP2_BY2 = 0xd, + DBG_BLOCK_ID_UVDU_BY2 = 0xe, + DBG_BLOCK_ID_VCE_BY2 = 0xf, + DBG_BLOCK_ID_VGT0_BY2 = 0x10, + DBG_BLOCK_ID_IA_BY2 = 0x11, + DBG_BLOCK_ID_SCT0_BY2 = 0x12, + DBG_BLOCK_ID_SPM0_BY2 = 0x13, + DBG_BLOCK_ID_TCAA_BY2 = 0x14, + DBG_BLOCK_ID_TCCA_BY2 = 0x15, + DBG_BLOCK_ID_MCC0_BY2 = 0x16, + DBG_BLOCK_ID_MCC2_BY2 = 0x17, + DBG_BLOCK_ID_SX0_BY2 = 0x18, + DBG_BLOCK_ID_SX2_BY2 = 0x19, + DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a, + DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b, + DBG_BLOCK_ID_PC0_BY2 = 0x1c, + DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d, + DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e, + DBG_BLOCK_ID_MCB_BY2 = 0x1f, + DBG_BLOCK_ID_SCB0_BY2 = 0x20, + DBG_BLOCK_ID_UNUSED13_BY2 = 0x21, + DBG_BLOCK_ID_SCF0_BY2 = 0x22, + DBG_BLOCK_ID_UNUSED15_BY2 = 0x23, + DBG_BLOCK_ID_BCI0_BY2 = 0x24, + DBG_BLOCK_ID_BCI2_BY2 = 0x25, + DBG_BLOCK_ID_UNUSED17_BY2 = 0x26, + DBG_BLOCK_ID_UNUSED19_BY2 = 0x27, + DBG_BLOCK_ID_CB00_BY2 = 0x28, + DBG_BLOCK_ID_CB02_BY2 = 0x29, + DBG_BLOCK_ID_CB04_BY2 = 0x2a, + DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b, + DBG_BLOCK_ID_CB10_BY2 = 0x2c, + DBG_BLOCK_ID_CB12_BY2 = 0x2d, + DBG_BLOCK_ID_CB14_BY2 = 0x2e, + DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f, + DBG_BLOCK_ID_TCP0_BY2 = 0x30, + DBG_BLOCK_ID_TCP2_BY2 = 0x31, + DBG_BLOCK_ID_TCP4_BY2 = 0x32, + DBG_BLOCK_ID_TCP6_BY2 = 0x33, + DBG_BLOCK_ID_TCP8_BY2 = 0x34, + DBG_BLOCK_ID_TCP10_BY2 = 0x35, + DBG_BLOCK_ID_TCP12_BY2 = 0x36, + DBG_BLOCK_ID_TCP14_BY2 = 0x37, + DBG_BLOCK_ID_TCP16_BY2 = 0x38, + DBG_BLOCK_ID_TCP18_BY2 = 0x39, + DBG_BLOCK_ID_TCP20_BY2 = 0x3a, + DBG_BLOCK_ID_TCP22_BY2 = 0x3b, + DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c, + DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d, + DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e, + DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f, + DBG_BLOCK_ID_DB00_BY2 = 0x40, + DBG_BLOCK_ID_DB02_BY2 = 0x41, + DBG_BLOCK_ID_DB04_BY2 = 0x42, + DBG_BLOCK_ID_UNUSED28_BY2 = 0x43, + DBG_BLOCK_ID_DB10_BY2 = 0x44, + DBG_BLOCK_ID_DB12_BY2 = 0x45, + DBG_BLOCK_ID_DB14_BY2 = 0x46, + DBG_BLOCK_ID_UNUSED31_BY2 = 0x47, + DBG_BLOCK_ID_TCC0_BY2 = 0x48, + DBG_BLOCK_ID_TCC2_BY2 = 0x49, + DBG_BLOCK_ID_TCC4_BY2 = 0x4a, + DBG_BLOCK_ID_TCC6_BY2 = 0x4b, + DBG_BLOCK_ID_SPS00_BY2 = 0x4c, + DBG_BLOCK_ID_SPS02_BY2 = 0x4d, + DBG_BLOCK_ID_SPS11_BY2 = 0x4e, + DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f, + DBG_BLOCK_ID_TA00_BY2 = 0x50, + DBG_BLOCK_ID_TA02_BY2 = 0x51, + DBG_BLOCK_ID_TA04_BY2 = 0x52, + DBG_BLOCK_ID_TA06_BY2 = 0x53, + DBG_BLOCK_ID_TA08_BY2 = 0x54, + DBG_BLOCK_ID_TA0A_BY2 = 0x55, + DBG_BLOCK_ID_UNUSED35_BY2 = 0x56, + DBG_BLOCK_ID_UNUSED37_BY2 = 0x57, + DBG_BLOCK_ID_TA10_BY2 = 0x58, + DBG_BLOCK_ID_TA12_BY2 = 0x59, + DBG_BLOCK_ID_TA14_BY2 = 0x5a, + DBG_BLOCK_ID_TA16_BY2 = 0x5b, + DBG_BLOCK_ID_TA18_BY2 = 0x5c, + DBG_BLOCK_ID_TA1A_BY2 = 0x5d, + DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e, + DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f, + DBG_BLOCK_ID_TD00_BY2 = 0x60, + DBG_BLOCK_ID_TD02_BY2 = 0x61, + DBG_BLOCK_ID_TD04_BY2 = 0x62, + DBG_BLOCK_ID_TD06_BY2 = 0x63, + DBG_BLOCK_ID_TD08_BY2 = 0x64, + DBG_BLOCK_ID_TD0A_BY2 = 0x65, + DBG_BLOCK_ID_UNUSED43_BY2 = 0x66, + DBG_BLOCK_ID_UNUSED45_BY2 = 0x67, + DBG_BLOCK_ID_TD10_BY2 = 0x68, + DBG_BLOCK_ID_TD12_BY2 = 0x69, + DBG_BLOCK_ID_TD14_BY2 = 0x6a, + DBG_BLOCK_ID_TD16_BY2 = 0x6b, + DBG_BLOCK_ID_TD18_BY2 = 0x6c, + DBG_BLOCK_ID_TD1A_BY2 = 0x6d, + DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e, + DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f, + DBG_BLOCK_ID_MCD0_BY2 = 0x70, + DBG_BLOCK_ID_MCD2_BY2 = 0x71, + DBG_BLOCK_ID_MCD4_BY2 = 0x72, + DBG_BLOCK_ID_UNUSED51_BY2 = 0x73, +} DebugBlockId_BY2; +typedef enum DebugBlockId_BY4 { + DBG_BLOCK_ID_RESERVED_BY4 = 0x0, + DBG_BLOCK_ID_CG_BY4 = 0x1, + DBG_BLOCK_ID_CSC_BY4 = 0x2, + DBG_BLOCK_ID_SQ_BY4 = 0x3, + DBG_BLOCK_ID_DMA0_BY4 = 0x4, + DBG_BLOCK_ID_SPIS_BY4 = 0x5, + DBG_BLOCK_ID_CP0_BY4 = 0x6, + DBG_BLOCK_ID_UVDU_BY4 = 0x7, + DBG_BLOCK_ID_VGT0_BY4 = 0x8, + DBG_BLOCK_ID_SCT0_BY4 = 0x9, + DBG_BLOCK_ID_TCAA_BY4 = 0xa, + DBG_BLOCK_ID_MCC0_BY4 = 0xb, + DBG_BLOCK_ID_SX0_BY4 = 0xc, + DBG_BLOCK_ID_UNUSED4_BY4 = 0xd, + DBG_BLOCK_ID_PC0_BY4 = 0xe, + DBG_BLOCK_ID_UNUSED10_BY4 = 0xf, + DBG_BLOCK_ID_SCB0_BY4 = 0x10, + DBG_BLOCK_ID_SCF0_BY4 = 0x11, + DBG_BLOCK_ID_BCI0_BY4 = 0x12, + DBG_BLOCK_ID_UNUSED17_BY4 = 0x13, + DBG_BLOCK_ID_CB00_BY4 = 0x14, + DBG_BLOCK_ID_CB04_BY4 = 0x15, + DBG_BLOCK_ID_CB10_BY4 = 0x16, + DBG_BLOCK_ID_CB14_BY4 = 0x17, + DBG_BLOCK_ID_TCP0_BY4 = 0x18, + DBG_BLOCK_ID_TCP4_BY4 = 0x19, + DBG_BLOCK_ID_TCP8_BY4 = 0x1a, + DBG_BLOCK_ID_TCP12_BY4 = 0x1b, + DBG_BLOCK_ID_TCP16_BY4 = 0x1c, + DBG_BLOCK_ID_TCP20_BY4 = 0x1d, + DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e, + DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f, + DBG_BLOCK_ID_DB_BY4 = 0x20, + DBG_BLOCK_ID_DB04_BY4 = 0x21, + DBG_BLOCK_ID_DB10_BY4 = 0x22, + DBG_BLOCK_ID_DB14_BY4 = 0x23, + DBG_BLOCK_ID_TCC0_BY4 = 0x24, + DBG_BLOCK_ID_TCC4_BY4 = 0x25, + DBG_BLOCK_ID_SPS00_BY4 = 0x26, + DBG_BLOCK_ID_SPS11_BY4 = 0x27, + DBG_BLOCK_ID_TA00_BY4 = 0x28, + DBG_BLOCK_ID_TA04_BY4 = 0x29, + DBG_BLOCK_ID_TA08_BY4 = 0x2a, + DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b, + DBG_BLOCK_ID_TA10_BY4 = 0x2c, + DBG_BLOCK_ID_TA14_BY4 = 0x2d, + DBG_BLOCK_ID_TA18_BY4 = 0x2e, + DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f, + DBG_BLOCK_ID_TD00_BY4 = 0x30, + DBG_BLOCK_ID_TD04_BY4 = 0x31, + DBG_BLOCK_ID_TD08_BY4 = 0x32, + DBG_BLOCK_ID_UNUSED43_BY4 = 0x33, + DBG_BLOCK_ID_TD10_BY4 = 0x34, + DBG_BLOCK_ID_TD14_BY4 = 0x35, + DBG_BLOCK_ID_TD18_BY4 = 0x36, + DBG_BLOCK_ID_UNUSED47_BY4 = 0x37, + DBG_BLOCK_ID_MCD0_BY4 = 0x38, + DBG_BLOCK_ID_MCD4_BY4 = 0x39, +} DebugBlockId_BY4; +typedef enum DebugBlockId_BY8 { + DBG_BLOCK_ID_RESERVED_BY8 = 0x0, + DBG_BLOCK_ID_CSC_BY8 = 0x1, + DBG_BLOCK_ID_DMA0_BY8 = 0x2, + DBG_BLOCK_ID_CP0_BY8 = 0x3, + DBG_BLOCK_ID_VGT0_BY8 = 0x4, + DBG_BLOCK_ID_TCAA_BY8 = 0x5, + DBG_BLOCK_ID_SX0_BY8 = 0x6, + DBG_BLOCK_ID_PC0_BY8 = 0x7, + DBG_BLOCK_ID_SCB0_BY8 = 0x8, + DBG_BLOCK_ID_BCI0_BY8 = 0x9, + DBG_BLOCK_ID_CB00_BY8 = 0xa, + DBG_BLOCK_ID_CB10_BY8 = 0xb, + DBG_BLOCK_ID_TCP0_BY8 = 0xc, + DBG_BLOCK_ID_TCP8_BY8 = 0xd, + DBG_BLOCK_ID_TCP16_BY8 = 0xe, + DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf, + DBG_BLOCK_ID_DB00_BY8 = 0x10, + DBG_BLOCK_ID_DB10_BY8 = 0x11, + DBG_BLOCK_ID_TCC0_BY8 = 0x12, + DBG_BLOCK_ID_SPS00_BY8 = 0x13, + DBG_BLOCK_ID_TA00_BY8 = 0x14, + DBG_BLOCK_ID_TA08_BY8 = 0x15, + DBG_BLOCK_ID_TA10_BY8 = 0x16, + DBG_BLOCK_ID_TA18_BY8 = 0x17, + DBG_BLOCK_ID_TD00_BY8 = 0x18, + DBG_BLOCK_ID_TD08_BY8 = 0x19, + DBG_BLOCK_ID_TD10_BY8 = 0x1a, + DBG_BLOCK_ID_TD18_BY8 = 0x1b, + DBG_BLOCK_ID_MCD0_BY8 = 0x1c, +} DebugBlockId_BY8; +typedef enum DebugBlockId_BY16 { + DBG_BLOCK_ID_RESERVED_BY16 = 0x0, + DBG_BLOCK_ID_DMA0_BY16 = 0x1, + DBG_BLOCK_ID_VGT0_BY16 = 0x2, + DBG_BLOCK_ID_SX0_BY16 = 0x3, + DBG_BLOCK_ID_SCB0_BY16 = 0x4, + DBG_BLOCK_ID_CB00_BY16 = 0x5, + DBG_BLOCK_ID_TCP0_BY16 = 0x6, + DBG_BLOCK_ID_TCP16_BY16 = 0x7, + DBG_BLOCK_ID_DB00_BY16 = 0x8, + DBG_BLOCK_ID_TCC0_BY16 = 0x9, + DBG_BLOCK_ID_TA00_BY16 = 0xa, + DBG_BLOCK_ID_TA10_BY16 = 0xb, + DBG_BLOCK_ID_TD00_BY16 = 0xc, + DBG_BLOCK_ID_TD10_BY16 = 0xd, + DBG_BLOCK_ID_MCD0_BY16 = 0xe, +} DebugBlockId_BY16; +typedef enum CompareRef { + REF_NEVER = 0x0, + REF_LESS = 0x1, + REF_EQUAL = 0x2, + REF_LEQUAL = 0x3, + REF_GREATER = 0x4, + REF_NOTEQUAL = 0x5, + REF_GEQUAL = 0x6, + REF_ALWAYS = 0x7, +} CompareRef; +typedef enum ReadSize { + READ_256_BITS = 0x0, + READ_512_BITS = 0x1, +} ReadSize; +typedef enum DepthFormat { + DEPTH_INVALID = 0x0, + DEPTH_16 = 0x1, + DEPTH_X8_24 = 0x2, + DEPTH_8_24 = 0x3, + DEPTH_X8_24_FLOAT = 0x4, + DEPTH_8_24_FLOAT = 0x5, + DEPTH_32_FLOAT = 0x6, + DEPTH_X24_8_32_FLOAT = 0x7, +} DepthFormat; +typedef enum ZFormat { + Z_INVALID = 0x0, + Z_16 = 0x1, + Z_24 = 0x2, + Z_32_FLOAT = 0x3, +} ZFormat; +typedef enum StencilFormat { + STENCIL_INVALID = 0x0, + STENCIL_8 = 0x1, +} StencilFormat; +typedef enum CmaskMode { + CMASK_CLEAR_NONE = 0x0, + CMASK_CLEAR_ONE = 0x1, + CMASK_CLEAR_ALL = 0x2, + CMASK_ANY_EXPANDED = 0x3, + CMASK_ALPHA0_FRAG1 = 0x4, + CMASK_ALPHA0_FRAG2 = 0x5, + CMASK_ALPHA0_FRAG4 = 0x6, + CMASK_ALPHA0_FRAGS = 0x7, + CMASK_ALPHA1_FRAG1 = 0x8, + CMASK_ALPHA1_FRAG2 = 0x9, + CMASK_ALPHA1_FRAG4 = 0xa, + CMASK_ALPHA1_FRAGS = 0xb, + CMASK_ALPHAX_FRAG1 = 0xc, + CMASK_ALPHAX_FRAG2 = 0xd, + CMASK_ALPHAX_FRAG4 = 0xe, + CMASK_ALPHAX_FRAGS = 0xf, +} CmaskMode; +typedef enum QuadExportFormat { + EXPORT_UNUSED = 0x0, + EXPORT_32_R = 0x1, + EXPORT_32_GR = 0x2, + EXPORT_32_AR = 0x3, + EXPORT_FP16_ABGR = 0x4, + EXPORT_UNSIGNED16_ABGR = 0x5, + EXPORT_SIGNED16_ABGR = 0x6, + EXPORT_32_ABGR = 0x7, +} QuadExportFormat; +typedef enum QuadExportFormatOld { + EXPORT_4P_32BPC_ABGR = 0x0, + EXPORT_4P_16BPC_ABGR = 0x1, + EXPORT_4P_32BPC_GR = 0x2, + EXPORT_4P_32BPC_AR = 0x3, + EXPORT_2P_32BPC_ABGR = 0x4, + EXPORT_8P_32BPC_R = 0x5, +} QuadExportFormatOld; +typedef enum ColorFormat { + COLOR_INVALID = 0x0, + COLOR_8 = 0x1, + COLOR_16 = 0x2, + COLOR_8_8 = 0x3, + COLOR_32 = 0x4, + COLOR_16_16 = 0x5, + COLOR_10_11_11 = 0x6, + COLOR_11_11_10 = 0x7, + COLOR_10_10_10_2 = 0x8, + COLOR_2_10_10_10 = 0x9, + COLOR_8_8_8_8 = 0xa, + COLOR_32_32 = 0xb, + COLOR_16_16_16_16 = 0xc, + COLOR_RESERVED_13 = 0xd, + COLOR_32_32_32_32 = 0xe, + COLOR_RESERVED_15 = 0xf, + COLOR_5_6_5 = 0x10, + COLOR_1_5_5_5 = 0x11, + COLOR_5_5_5_1 = 0x12, + COLOR_4_4_4_4 = 0x13, + COLOR_8_24 = 0x14, + COLOR_24_8 = 0x15, + COLOR_X24_8_32_FLOAT = 0x16, + COLOR_RESERVED_23 = 0x17, +} ColorFormat; +typedef enum SurfaceFormat { + FMT_INVALID = 0x0, + FMT_8 = 0x1, + FMT_16 = 0x2, + FMT_8_8 = 0x3, + FMT_32 = 0x4, + FMT_16_16 = 0x5, + FMT_10_11_11 = 0x6, + FMT_11_11_10 = 0x7, + FMT_10_10_10_2 = 0x8, + FMT_2_10_10_10 = 0x9, + FMT_8_8_8_8 = 0xa, + FMT_32_32 = 0xb, + FMT_16_16_16_16 = 0xc, + FMT_32_32_32 = 0xd, + FMT_32_32_32_32 = 0xe, + FMT_RESERVED_4 = 0xf, + FMT_5_6_5 = 0x10, + FMT_1_5_5_5 = 0x11, + FMT_5_5_5_1 = 0x12, + FMT_4_4_4_4 = 0x13, + FMT_8_24 = 0x14, + FMT_24_8 = 0x15, + FMT_X24_8_32_FLOAT = 0x16, + FMT_RESERVED_33 = 0x17, + FMT_11_11_10_FLOAT = 0x18, + FMT_16_FLOAT = 0x19, + FMT_32_FLOAT = 0x1a, + FMT_16_16_FLOAT = 0x1b, + FMT_8_24_FLOAT = 0x1c, + FMT_24_8_FLOAT = 0x1d, + FMT_32_32_FLOAT = 0x1e, + FMT_10_11_11_FLOAT = 0x1f, + FMT_16_16_16_16_FLOAT = 0x20, + FMT_3_3_2 = 0x21, + FMT_6_5_5 = 0x22, + FMT_32_32_32_32_FLOAT = 0x23, + FMT_RESERVED_36 = 0x24, + FMT_1 = 0x25, + FMT_1_REVERSED = 0x26, + FMT_GB_GR = 0x27, + FMT_BG_RG = 0x28, + FMT_32_AS_8 = 0x29, + FMT_32_AS_8_8 = 0x2a, + FMT_5_9_9_9_SHAREDEXP = 0x2b, + FMT_8_8_8 = 0x2c, + FMT_16_16_16 = 0x2d, + FMT_16_16_16_FLOAT = 0x2e, + FMT_4_4 = 0x2f, + FMT_32_32_32_FLOAT = 0x30, + FMT_BC1 = 0x31, + FMT_BC2 = 0x32, + FMT_BC3 = 0x33, + FMT_BC4 = 0x34, + FMT_BC5 = 0x35, + FMT_BC6 = 0x36, + FMT_BC7 = 0x37, + FMT_32_AS_32_32_32_32 = 0x38, + FMT_APC3 = 0x39, + FMT_APC4 = 0x3a, + FMT_APC5 = 0x3b, + FMT_APC6 = 0x3c, + FMT_APC7 = 0x3d, + FMT_CTX1 = 0x3e, + FMT_RESERVED_63 = 0x3f, +} SurfaceFormat; +typedef enum BUF_DATA_FORMAT { + BUF_DATA_FORMAT_INVALID = 0x0, + BUF_DATA_FORMAT_8 = 0x1, + BUF_DATA_FORMAT_16 = 0x2, + BUF_DATA_FORMAT_8_8 = 0x3, + BUF_DATA_FORMAT_32 = 0x4, + BUF_DATA_FORMAT_16_16 = 0x5, + BUF_DATA_FORMAT_10_11_11 = 0x6, + BUF_DATA_FORMAT_11_11_10 = 0x7, + BUF_DATA_FORMAT_10_10_10_2 = 0x8, + BUF_DATA_FORMAT_2_10_10_10 = 0x9, + BUF_DATA_FORMAT_8_8_8_8 = 0xa, + BUF_DATA_FORMAT_32_32 = 0xb, + BUF_DATA_FORMAT_16_16_16_16 = 0xc, + BUF_DATA_FORMAT_32_32_32 = 0xd, + BUF_DATA_FORMAT_32_32_32_32 = 0xe, + BUF_DATA_FORMAT_RESERVED_15 = 0xf, +} BUF_DATA_FORMAT; +typedef enum IMG_DATA_FORMAT { + IMG_DATA_FORMAT_INVALID = 0x0, + IMG_DATA_FORMAT_8 = 0x1, + IMG_DATA_FORMAT_16 = 0x2, + IMG_DATA_FORMAT_8_8 = 0x3, + IMG_DATA_FORMAT_32 = 0x4, + IMG_DATA_FORMAT_16_16 = 0x5, + IMG_DATA_FORMAT_10_11_11 = 0x6, + IMG_DATA_FORMAT_11_11_10 = 0x7, + IMG_DATA_FORMAT_10_10_10_2 = 0x8, + IMG_DATA_FORMAT_2_10_10_10 = 0x9, + IMG_DATA_FORMAT_8_8_8_8 = 0xa, + IMG_DATA_FORMAT_32_32 = 0xb, + IMG_DATA_FORMAT_16_16_16_16 = 0xc, + IMG_DATA_FORMAT_32_32_32 = 0xd, + IMG_DATA_FORMAT_32_32_32_32 = 0xe, + IMG_DATA_FORMAT_RESERVED_15 = 0xf, + IMG_DATA_FORMAT_5_6_5 = 0x10, + IMG_DATA_FORMAT_1_5_5_5 = 0x11, + IMG_DATA_FORMAT_5_5_5_1 = 0x12, + IMG_DATA_FORMAT_4_4_4_4 = 0x13, + IMG_DATA_FORMAT_8_24 = 0x14, + IMG_DATA_FORMAT_24_8 = 0x15, + IMG_DATA_FORMAT_X24_8_32 = 0x16, + IMG_DATA_FORMAT_RESERVED_23 = 0x17, + IMG_DATA_FORMAT_RESERVED_24 = 0x18, + IMG_DATA_FORMAT_RESERVED_25 = 0x19, + IMG_DATA_FORMAT_RESERVED_26 = 0x1a, + IMG_DATA_FORMAT_RESERVED_27 = 0x1b, + IMG_DATA_FORMAT_RESERVED_28 = 0x1c, + IMG_DATA_FORMAT_RESERVED_29 = 0x1d, + IMG_DATA_FORMAT_RESERVED_30 = 0x1e, + IMG_DATA_FORMAT_RESERVED_31 = 0x1f, + IMG_DATA_FORMAT_GB_GR = 0x20, + IMG_DATA_FORMAT_BG_RG = 0x21, + IMG_DATA_FORMAT_5_9_9_9 = 0x22, + IMG_DATA_FORMAT_BC1 = 0x23, + IMG_DATA_FORMAT_BC2 = 0x24, + IMG_DATA_FORMAT_BC3 = 0x25, + IMG_DATA_FORMAT_BC4 = 0x26, + IMG_DATA_FORMAT_BC5 = 0x27, + IMG_DATA_FORMAT_BC6 = 0x28, + IMG_DATA_FORMAT_BC7 = 0x29, + IMG_DATA_FORMAT_RESERVED_42 = 0x2a, + IMG_DATA_FORMAT_RESERVED_43 = 0x2b, + IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c, + IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d, + IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e, + IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f, + IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30, + IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31, + IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32, + IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33, + IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34, + IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35, + IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36, + IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37, + IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38, + IMG_DATA_FORMAT_4_4 = 0x39, + IMG_DATA_FORMAT_6_5_5 = 0x3a, + IMG_DATA_FORMAT_1 = 0x3b, + IMG_DATA_FORMAT_1_REVERSED = 0x3c, + IMG_DATA_FORMAT_32_AS_8 = 0x3d, + IMG_DATA_FORMAT_32_AS_8_8 = 0x3e, + IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f, +} IMG_DATA_FORMAT; +typedef enum BUF_NUM_FORMAT { + BUF_NUM_FORMAT_UNORM = 0x0, + BUF_NUM_FORMAT_SNORM = 0x1, + BUF_NUM_FORMAT_USCALED = 0x2, + BUF_NUM_FORMAT_SSCALED = 0x3, + BUF_NUM_FORMAT_UINT = 0x4, + BUF_NUM_FORMAT_SINT = 0x5, + BUF_NUM_FORMAT_SNORM_OGL = 0x6, + BUF_NUM_FORMAT_FLOAT = 0x7, +} BUF_NUM_FORMAT; +typedef enum IMG_NUM_FORMAT { + IMG_NUM_FORMAT_UNORM = 0x0, + IMG_NUM_FORMAT_SNORM = 0x1, + IMG_NUM_FORMAT_USCALED = 0x2, + IMG_NUM_FORMAT_SSCALED = 0x3, + IMG_NUM_FORMAT_UINT = 0x4, + IMG_NUM_FORMAT_SINT = 0x5, + IMG_NUM_FORMAT_SNORM_OGL = 0x6, + IMG_NUM_FORMAT_FLOAT = 0x7, + IMG_NUM_FORMAT_RESERVED_8 = 0x8, + IMG_NUM_FORMAT_SRGB = 0x9, + IMG_NUM_FORMAT_UBNORM = 0xa, + IMG_NUM_FORMAT_UBNORM_OGL = 0xb, + IMG_NUM_FORMAT_UBINT = 0xc, + IMG_NUM_FORMAT_UBSCALED = 0xd, + IMG_NUM_FORMAT_RESERVED_14 = 0xe, + IMG_NUM_FORMAT_RESERVED_15 = 0xf, +} IMG_NUM_FORMAT; +typedef enum TileType { + ARRAY_COLOR_TILE = 0x0, + ARRAY_DEPTH_TILE = 0x1, +} TileType; +typedef enum NonDispTilingOrder { + ADDR_SURF_MICRO_TILING_DISPLAY = 0x0, + ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1, +} NonDispTilingOrder; +typedef enum MicroTileMode { + ADDR_SURF_DISPLAY_MICRO_TILING = 0x0, + ADDR_SURF_THIN_MICRO_TILING = 0x1, + ADDR_SURF_DEPTH_MICRO_TILING = 0x2, + ADDR_SURF_ROTATED_MICRO_TILING = 0x3, + ADDR_SURF_THICK_MICRO_TILING = 0x4, +} MicroTileMode; +typedef enum TileSplit { + ADDR_SURF_TILE_SPLIT_64B = 0x0, + ADDR_SURF_TILE_SPLIT_128B = 0x1, + ADDR_SURF_TILE_SPLIT_256B = 0x2, + ADDR_SURF_TILE_SPLIT_512B = 0x3, + ADDR_SURF_TILE_SPLIT_1KB = 0x4, + ADDR_SURF_TILE_SPLIT_2KB = 0x5, + ADDR_SURF_TILE_SPLIT_4KB = 0x6, +} TileSplit; +typedef enum SampleSplit { + ADDR_SURF_SAMPLE_SPLIT_1 = 0x0, + ADDR_SURF_SAMPLE_SPLIT_2 = 0x1, + ADDR_SURF_SAMPLE_SPLIT_4 = 0x2, + ADDR_SURF_SAMPLE_SPLIT_8 = 0x3, +} SampleSplit; +typedef enum PipeConfig { + ADDR_SURF_P2 = 0x0, + ADDR_SURF_P2_RESERVED0 = 0x1, + ADDR_SURF_P2_RESERVED1 = 0x2, + ADDR_SURF_P2_RESERVED2 = 0x3, + ADDR_SURF_P4_8x16 = 0x4, + ADDR_SURF_P4_16x16 = 0x5, + ADDR_SURF_P4_16x32 = 0x6, + ADDR_SURF_P4_32x32 = 0x7, + ADDR_SURF_P8_16x16_8x16 = 0x8, + ADDR_SURF_P8_16x32_8x16 = 0x9, + ADDR_SURF_P8_32x32_8x16 = 0xa, + ADDR_SURF_P8_16x32_16x16 = 0xb, + ADDR_SURF_P8_32x32_16x16 = 0xc, + ADDR_SURF_P8_32x32_16x32 = 0xd, + ADDR_SURF_P8_32x64_32x32 = 0xe, +} PipeConfig; +typedef enum NumBanks { + ADDR_SURF_2_BANK = 0x0, + ADDR_SURF_4_BANK = 0x1, + ADDR_SURF_8_BANK = 0x2, + ADDR_SURF_16_BANK = 0x3, +} NumBanks; +typedef enum BankWidth { + ADDR_SURF_BANK_WIDTH_1 = 0x0, + ADDR_SURF_BANK_WIDTH_2 = 0x1, + ADDR_SURF_BANK_WIDTH_4 = 0x2, + ADDR_SURF_BANK_WIDTH_8 = 0x3, +} BankWidth; +typedef enum BankHeight { + ADDR_SURF_BANK_HEIGHT_1 = 0x0, + ADDR_SURF_BANK_HEIGHT_2 = 0x1, + ADDR_SURF_BANK_HEIGHT_4 = 0x2, + ADDR_SURF_BANK_HEIGHT_8 = 0x3, +} BankHeight; +typedef enum BankWidthHeight { + ADDR_SURF_BANK_WH_1 = 0x0, + ADDR_SURF_BANK_WH_2 = 0x1, + ADDR_SURF_BANK_WH_4 = 0x2, + ADDR_SURF_BANK_WH_8 = 0x3, +} BankWidthHeight; +typedef enum MacroTileAspect { + ADDR_SURF_MACRO_ASPECT_1 = 0x0, + ADDR_SURF_MACRO_ASPECT_2 = 0x1, + ADDR_SURF_MACRO_ASPECT_4 = 0x2, + ADDR_SURF_MACRO_ASPECT_8 = 0x3, +} MacroTileAspect; +typedef enum TCC_CACHE_POLICIES { + TCC_CACHE_POLICY_LRU = 0x0, + TCC_CACHE_POLICY_STREAM = 0x1, + TCC_CACHE_POLICY_BYPASS = 0x2, +} TCC_CACHE_POLICIES; +typedef enum PERFMON_COUNTER_MODE { + PERFMON_COUNTER_MODE_ACCUM = 0x0, + PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1, + PERFMON_COUNTER_MODE_MAX = 0x2, + PERFMON_COUNTER_MODE_DIRTY = 0x3, + PERFMON_COUNTER_MODE_SAMPLE = 0x4, + PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5, + PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6, + PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7, + PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8, + PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9, + PERFMON_COUNTER_MODE_RESERVED = 0xf, +} PERFMON_COUNTER_MODE; +typedef enum PERFMON_SPM_MODE { + PERFMON_SPM_MODE_OFF = 0x0, + PERFMON_SPM_MODE_16BIT_CLAMP = 0x1, + PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2, + PERFMON_SPM_MODE_32BIT_CLAMP = 0x3, + PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4, + PERFMON_SPM_MODE_RESERVED_5 = 0x5, + PERFMON_SPM_MODE_RESERVED_6 = 0x6, + PERFMON_SPM_MODE_RESERVED_7 = 0x7, + PERFMON_SPM_MODE_TEST_MODE_0 = 0x8, + PERFMON_SPM_MODE_TEST_MODE_1 = 0x9, + PERFMON_SPM_MODE_TEST_MODE_2 = 0xa, +} PERFMON_SPM_MODE; +typedef enum SurfaceTiling { + ARRAY_LINEAR = 0x0, + ARRAY_TILED = 0x1, +} SurfaceTiling; +typedef enum SurfaceArray { + ARRAY_1D = 0x0, + ARRAY_2D = 0x1, + ARRAY_3D = 0x2, + ARRAY_3D_SLICE = 0x3, +} SurfaceArray; +typedef enum ColorArray { + ARRAY_2D_ALT_COLOR = 0x0, + ARRAY_2D_COLOR = 0x1, + ARRAY_3D_SLICE_COLOR = 0x3, +} ColorArray; +typedef enum DepthArray { + ARRAY_2D_ALT_DEPTH = 0x0, + ARRAY_2D_DEPTH = 0x1, +} DepthArray; + +#endif /* DCE_8_0_ENUM_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h index 8a2930734477..c331c9fe7b81 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h @@ -4130,6 +4130,18 @@ #define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0xe #define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x10000 #define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x10 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK_MASK 0x1 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK__SHIFT 0x0 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS_MASK 0x2 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS__SHIFT 0x1 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV_MASK 0x4 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV__SHIFT 0x2 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK_MASK 0x10 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK__SHIFT 0x4 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS_MASK 0x20 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS__SHIFT 0x5 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV_MASK 0x40 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV__SHIFT 0x6 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x1 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x0 #define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 713aec954692..aec38fc3834f 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -109,6 +109,8 @@ enum cgs_system_info_id { CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, CGS_SYSTEM_INFO_PCIE_GEN_INFO, CGS_SYSTEM_INFO_PCIE_MLW, + CGS_SYSTEM_INFO_CG_FLAGS, + CGS_SYSTEM_INFO_PG_FLAGS, CGS_SYSTEM_INFO_ID_MAXIMUM, }; diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h new file mode 100644 index 000000000000..d21c6b14662f --- /dev/null +++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h @@ -0,0 +1,102 @@ +/* + * Volcanic Islands IV SRC Register documentation + * + * Copyright (C) 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IVSRCID_VISLANDS30_H_ +#define _IVSRCID_VISLANDS30_H_ + + +// IV Source IDs + +#define VISLANDS30_IV_SRCID_D1_V_UPDATE_INT 7 // 0x07 +#define VISLANDS30_IV_EXTID_D1_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D1_GRPH_PFLIP 8 // 0x08 +#define VISLANDS30_IV_EXTID_D1_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D2_V_UPDATE_INT 9 // 0x09 +#define VISLANDS30_IV_EXTID_D2_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D2_GRPH_PFLIP 10 // 0x0a +#define VISLANDS30_IV_EXTID_D2_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D3_V_UPDATE_INT 11 // 0x0b +#define VISLANDS30_IV_EXTID_D3_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D3_GRPH_PFLIP 12 // 0x0c +#define VISLANDS30_IV_EXTID_D3_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D4_V_UPDATE_INT 13 // 0x0d +#define VISLANDS30_IV_EXTID_D4_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D4_GRPH_PFLIP 14 // 0x0e +#define VISLANDS30_IV_EXTID_D4_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D5_V_UPDATE_INT 15 // 0x0f +#define VISLANDS30_IV_EXTID_D5_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D5_GRPH_PFLIP 16 // 0x10 +#define VISLANDS30_IV_EXTID_D5_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D6_V_UPDATE_INT 17 // 0x11 +#define VISLANDS30_IV_EXTID_D6_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D6_GRPH_PFLIP 18 // 0x12 +#define VISLANDS30_IV_EXTID_D6_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A 0 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_B 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B 1 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_C 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C 2 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_D 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D 3 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_E 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E 4 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_F 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F 5 + +#define VISLANDS30_IV_SRCID_HPD_RX_A 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_A 6 + +#define VISLANDS30_IV_SRCID_HPD_RX_B 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_B 7 + +#define VISLANDS30_IV_SRCID_HPD_RX_C 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_C 8 + +#define VISLANDS30_IV_SRCID_HPD_RX_D 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_D 9 + +#define VISLANDS30_IV_SRCID_HPD_RX_E 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_E 10 + +#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_F 11 + +#endif // _IVSRCID_VISLANDS30_H_ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 888250b33ea8..a09d9f352871 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -221,7 +221,7 @@ struct kgd2kfd_calls { int (*resume)(struct kfd_dev *kfd); }; -bool kgd2kfd_init(unsigned interface_version, +int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f); #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index aa67244a77ae..9d2290044708 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -29,6 +29,7 @@ #include "pp_instance.h" #include "power_state.h" #include "eventmanager.h" +#include "pp_debug.h" #define PP_CHECK(handle) \ do { \ @@ -402,8 +403,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, data.requested_ui_label = power_state_convert(ps); ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); + break; } - break; + case AMD_PP_EVENT_COMPLETE_INIT: + ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); + break; default: break; } @@ -433,7 +437,10 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) case PP_StateUILabel_Performance: return POWER_STATE_TYPE_PERFORMANCE; default: - return POWER_STATE_TYPE_DEFAULT; + if (state->classification.flags & PP_StateClassificationFlag_Boot) + return POWER_STATE_TYPE_INTERNAL_BOOT; + else + return POWER_STATE_TYPE_DEFAULT; } } @@ -535,6 +542,112 @@ static int pp_dpm_get_temperature(void *handle) return hwmgr->hwmgr_func->get_temperature(hwmgr); } +static int pp_dpm_get_pp_num_states(void *handle, + struct pp_states_info *data) +{ + struct pp_hwmgr *hwmgr; + int i; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->ps == NULL) + return -EINVAL; + + data->nums = hwmgr->num_ps; + + for (i = 0; i < hwmgr->num_ps; i++) { + struct pp_power_state *state = (struct pp_power_state *) + ((unsigned long)hwmgr->ps + i * hwmgr->ps_size); + switch (state->classification.ui_label) { + case PP_StateUILabel_Battery: + data->states[i] = POWER_STATE_TYPE_BATTERY; + break; + case PP_StateUILabel_Balanced: + data->states[i] = POWER_STATE_TYPE_BALANCED; + break; + case PP_StateUILabel_Performance: + data->states[i] = POWER_STATE_TYPE_PERFORMANCE; + break; + default: + if (state->classification.flags & PP_StateClassificationFlag_Boot) + data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT; + else + data->states[i] = POWER_STATE_TYPE_DEFAULT; + } + } + + return 0; +} + +static int pp_dpm_get_pp_table(void *handle, char **table) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->get_pp_table == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_pp_table(hwmgr, table); +} + +static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->set_pp_table == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); +} + +static int pp_dpm_force_clock_level(void *handle, + enum pp_clock_type type, int level) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->force_clock_level == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level); +} + +static int pp_dpm_print_clock_levels(void *handle, + enum pp_clock_type type, char *buf) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->print_clock_levels == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -552,6 +665,11 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .get_fan_control_mode = pp_dpm_get_fan_control_mode, .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, .get_fan_speed_percent = pp_dpm_get_fan_speed_percent, + .get_pp_num_states = pp_dpm_get_pp_num_states, + .get_pp_table = pp_dpm_get_pp_table, + .set_pp_table = pp_dpm_set_pp_table, + .force_clock_level = pp_dpm_force_clock_level, + .print_clock_levels = pp_dpm_print_clock_levels, }; static int amd_pp_instance_init(struct amd_pp_init *pp_init, @@ -635,10 +753,10 @@ int amd_powerplay_fini(void *handle) /* export this function to DAL */ -int amd_powerplay_display_configuration_change(void *handle, const void *input) +int amd_powerplay_display_configuration_change(void *handle, + const struct amd_pp_display_configuration *display_config) { struct pp_hwmgr *hwmgr; - const struct amd_pp_display_configuration *display_config = input; PP_CHECK((struct pp_instance *)handle); @@ -650,7 +768,7 @@ int amd_powerplay_display_configuration_change(void *handle, const void *input) } int amd_powerplay_get_display_power_level(void *handle, - struct amd_pp_dal_clock_info *output) + struct amd_pp_simple_clock_info *output) { struct pp_hwmgr *hwmgr; @@ -663,3 +781,86 @@ int amd_powerplay_get_display_power_level(void *handle, return phm_get_dal_power_level(hwmgr, output); } + +int amd_powerplay_get_current_clocks(void *handle, + struct amd_pp_clock_info *clocks) +{ + struct pp_hwmgr *hwmgr; + struct amd_pp_simple_clock_info simple_clocks; + struct pp_clock_info hw_clocks; + + PP_CHECK((struct pp_instance *)handle); + + if (clocks == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + phm_get_dal_power_level(hwmgr, &simple_clocks); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { + if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment)) + PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1); + } else { + if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity)) + PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1); + } + + clocks->min_engine_clock = hw_clocks.min_eng_clk; + clocks->max_engine_clock = hw_clocks.max_eng_clk; + clocks->min_memory_clock = hw_clocks.min_mem_clk; + clocks->max_memory_clock = hw_clocks.max_mem_clk; + clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; + clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; + + clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; + clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; + + clocks->max_clocks_state = simple_clocks.level; + + if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { + clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; + clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; + } + + return 0; + +} + +int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) +{ + int result = -1; + + struct pp_hwmgr *hwmgr; + + PP_CHECK((struct pp_instance *)handle); + + if (clocks == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + result = phm_get_clock_by_type(hwmgr, type, clocks); + + return result; +} + +int amd_powerplay_get_display_mode_validation_clocks(void *handle, + struct amd_pp_simple_clock_info *clocks) +{ + int result = -1; + struct pp_hwmgr *hwmgr; + + PP_CHECK((struct pp_instance *)handle); + + if (clocks == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) + result = phm_get_max_high_clocks(hwmgr, clocks); + + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index 83be3cf210e0..6b52c78cb404 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = { }; static const pem_event_action *complete_init_event[] = { + unblock_adjust_power_state_tasks, adjust_power_state_tasks, enable_gfx_clock_gating_tasks, enable_gfx_voltage_island_power_gating_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index 52a3efc97f05..46410e3c7349 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c @@ -31,7 +31,7 @@ static int pem_init(struct pp_eventmgr *eventmgr) { int result = 0; - struct pem_event_data event_data; + struct pem_event_data event_data = { {0} }; /* Initialize PowerPlay feature info */ pem_init_feature_info(eventmgr); @@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr) static void pem_fini(struct pp_eventmgr *eventmgr) { - struct pem_event_data event_data; + struct pem_event_data event_data = { {0} }; pem_uninit_featureInfo(eventmgr); pem_unregister_interrupts(eventmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index ad7700822a1c..ff08ce41bde9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) } } else { cz_dpm_update_vce_dpm(hwmgr); - cz_enable_disable_vce_dpm(hwmgr, true); + cz_enable_disable_vce_dpm(hwmgr, !bgate); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 0874ab42ee95..b8d6a82c1be2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); uint32_t i; + struct cgs_system_info sys_info = {0}; + int result; cz_hwmgr->gfx_ramp_step = 256*25/100; @@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageIsland); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (!result) { + if (sys_info.value & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + if (sys_info.value & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + } + return 0; } @@ -715,7 +733,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, unsigned long clock = 0; unsigned long level; unsigned long stable_pstate_sclk; - struct PP_Clocks clocks; unsigned long percentage; cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; @@ -726,8 +743,9 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, else cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; - /*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/ - clock = clocks.engineClock; + clock = hwmgr->display_config.min_core_set_clock; + if (clock == 0) + printk(KERN_ERR "[ powerplay ] min_core_set_clock not set\n"); if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { cz_hwmgr->sclk_dpm.hard_min_clk = clock; @@ -883,9 +901,9 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, if (pnew_state->action == FORCE_HIGH) cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); - else if(pnew_state->action == CANCEL_FORCE_HIGH) - cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); - else + else if (pnew_state->action == CANCEL_FORCE_HIGH) + cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); + else cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); } return 0; @@ -1110,9 +1128,10 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cast_const_PhwCzPowerState(&pcurrent_ps->hardware); struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - struct PP_Clocks clocks; + struct PP_Clocks clocks = {0, 0, 0, 0}; bool force_high; - unsigned long num_of_active_displays = 4; + uint32_t num_of_active_displays = 0; + struct cgs_display_info info = {0}; cz_ps->evclk = hwmgr->vce_arbiter.evclk; cz_ps->ecclk = hwmgr->vce_arbiter.ecclk; @@ -1124,12 +1143,15 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); - /* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */ - /* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */ + clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? + hwmgr->display_config.min_mem_set_clock : + cz_hwmgr->sys_info.nbp_memory_clock[1]; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_of_active_displays = info.display_count; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; - else - clocks.memoryClock = 0; if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk) clocks.memoryClock = hwmgr->gfx_arbiter.mclk; @@ -1199,6 +1221,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n"); return result; } + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings)); if (result != 0) { @@ -1630,10 +1653,10 @@ static void cz_hw_print_display_cfg( & PWRMGT_SEPARATION_TIME_MASK) << PWRMGT_SEPARATION_TIME_SHIFT; - data|= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) + data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) << PWRMGT_DISABLE_CPU_CSTATES_SHIFT; - data|= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) + data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) << PWRMGT_DISABLE_CPU_PSTATES_SHIFT; PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n", @@ -1648,9 +1671,9 @@ static void cz_hw_print_display_cfg( } - static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, +static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) - { +{ struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); if (separation_time != @@ -1678,20 +1701,19 @@ static void cz_hw_print_display_cfg( return 0; } - static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info*info) +static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_simple_clock_info *info) { uint32_t i; - const struct phm_clock_voltage_dependency_table * table = + const struct phm_clock_voltage_dependency_table *table = hwmgr->dyn_state.vddc_dep_on_dal_pwrl; - const struct phm_clock_and_voltage_limits* limits = + const struct phm_clock_and_voltage_limits *limits = &hwmgr->dyn_state.max_clock_voltage_on_ac; info->engine_max_clock = limits->sclk; info->memory_max_clock = limits->mclk; for (i = table->count - 1; i > 0; i--) { - if (limits->vddc >= table->entries[i].v) { info->level = table->entries[i].clk; return 0; @@ -1700,6 +1722,158 @@ static void cz_hw_print_display_cfg( return -EINVAL; } +static int cz_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, int level) +{ + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + (1 << level)); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + (1 << level)); + break; + default: + break; + } + + return 0; +} + +static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct phm_clock_voltage_dependency_table *sclk_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + int i, now, size = 0; + + switch (type) { + case PP_SCLK: + now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, + CURR_SCLK_INDEX); + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->entries[i].clk / 100, + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + const struct cz_power_state *ps; + struct cz_hwmgr *data; + uint32_t level_index; + uint32_t i; + + if (level == NULL || hwmgr == NULL || state == NULL) + return -EINVAL; + + data = (struct cz_hwmgr *)(hwmgr->backend); + ps = cast_const_PhwCzPowerState(state); + + level_index = index > ps->level - 1 ? ps->level - 1 : index; + + level->coreClock = ps->levels[level_index].engineClock; + + if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { + for (i = 1; i < ps->level; i++) { + if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) { + level->coreClock = ps->levels[i].engineClock; + break; + } + } + } + + if (level_index == 0) + level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]; + else + level->memory_clock = data->sys_info.nbp_memory_clock[0]; + + level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; + level->nonLocalMemoryFreq = 0; + level->nonLocalMemoryWidth = 0; + + return 0; +} + +static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) +{ + const struct cz_power_state *ps = cast_const_PhwCzPowerState(state); + + clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); + clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); + + return 0; +} + +static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend); + int i; + struct phm_clock_voltage_dependency_table *table; + + clocks->count = cz_get_max_sclk_level(hwmgr); + switch (type) { + case amd_pp_disp_clock: + for (i = 0; i < clocks->count; i++) + clocks->clock[i] = data->sys_info.display_clock[i]; + break; + case amd_pp_sys_clock: + table = hwmgr->dyn_state.vddc_dependency_on_sclk; + for (i = 0; i < clocks->count; i++) + clocks->clock[i] = table->entries[i].clk; + break; + case amd_pp_mem_clock: + clocks->count = CZ_NUM_NBPMEMORYCLOCK; + for (i = 0; i < clocks->count; i++) + clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; + break; + default: + return -1; + } + + return 0; +} + +static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) +{ + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + unsigned long level; + const struct phm_clock_and_voltage_limits *limits = + &hwmgr->dyn_state.max_clock_voltage_on_ac; + + if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) + return -EINVAL; + + level = cz_get_max_sclk_level(hwmgr) - 1; + + if (level < table->count) + clocks->engine_max_clock = table->entries[level].clk; + else + clocks->engine_max_clock = table->entries[table->count - 1].clk; + + clocks->memory_max_clock = limits->mclk; + + return 0; +} + static const struct pp_hwmgr_func cz_hwmgr_funcs = { .backend_init = cz_hwmgr_backend_init, .backend_fini = cz_hwmgr_backend_fini, @@ -1718,7 +1892,13 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .print_current_perforce_level = cz_print_current_perforce_level, .set_cpu_power_state = cz_set_cpu_power_state, .store_cc6_data = cz_store_cc6_data, - .get_dal_power_level= cz_get_dal_power_level, + .force_clock_level = cz_force_clock_level, + .print_clock_levels = cz_print_clock_levels, + .get_dal_power_level = cz_get_dal_power_level, + .get_performance_level = cz_get_performance_level, + .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, + .get_clock_by_type = cz_get_clock_by_type, + .get_max_high_clocks = cz_get_max_high_clocks, }; int cz_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 28031a7eddba..5cca2ecc6bea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -5073,6 +5073,125 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } +static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + *table = (char *)&data->smc_state_table; + + return sizeof(struct SMU73_Discrete_DpmTable); +} + +static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + void *table = (void *)&data->smc_state_table; + + memcpy(table, buf, size); + + return 0; +} + +static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, int level) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_PCIE: + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (1 << level)); + break; + default: + break; + } + + return 0; +} + +static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = fiji_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .backend_init = &fiji_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, @@ -5108,6 +5227,10 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, .set_fan_control_mode = fiji_set_fan_control_mode, .get_fan_control_mode = fiji_get_fan_control_mode, + .get_pp_table = fiji_get_pp_table, + .set_pp_table = fiji_set_pp_table, + .force_clock_level = fiji_force_clock_level, + .print_clock_levels = fiji_print_clock_levels, }; int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 0f2d5e4bc241..be31bed2538a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -26,7 +26,7 @@ #include "power_state.h" #include "pp_acpi.h" #include "amd_acpi.h" -#include "amd_powerplay.h" +#include "pp_debug.h" #define PHM_FUNC_CHECK(hw) \ do { \ @@ -313,13 +313,12 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, } int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info *info) + struct amd_pp_simple_clock_info *info) { PHM_FUNC_CHECK(hwmgr); if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL) return -EINVAL; - return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info); } @@ -332,3 +331,91 @@ int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr) return 0; } + + +int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + PHM_FUNC_CHECK(hwmgr); + if (hwmgr->hwmgr_func->get_performance_level == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level); + + +} + + +/** +* Gets Clock Info. +* +* @param pHwMgr the address of the powerplay hardware manager. +* @param pPowerState the address of the Power State structure. +* @param pClockInfo the address of PP_ClockInfo structure where the result will be returned. +* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end. +*/ +int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info, + PHM_PerformanceLevelDesignation designation) +{ + int result; + PHM_PerformanceLevel performance_level; + + PHM_FUNC_CHECK(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL); + PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL); + + result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result); + + + pclock_info->min_mem_clk = performance_level.memory_clock; + pclock_info->min_eng_clk = performance_level.coreClock; + pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; + + + result = phm_get_performance_level(hwmgr, state, designation, + (hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result); + + pclock_info->max_mem_clk = performance_level.memory_clock; + pclock_info->max_eng_clk = performance_level.coreClock; + pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; + + return 0; +} + +int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info); + +} + +int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_clock_by_type == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks); + +} + +int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_max_high_clocks == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks); +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h index b7429a527828..b10df328d58c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h @@ -293,7 +293,7 @@ fInt GetScaledFraction(int X, int factor) } if (factor == 1) - return (ConvertToFraction(X)); + return ConvertToFraction(X); fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); @@ -371,7 +371,7 @@ fInt fDivide (fInt X, fInt Y) fZERO = ConvertToFraction(0); if (Equal(Y, fZERO)) - return fZERO; + return fZERO; longlongX = (int64_t)X.full; longlongY = (int64_t)Y.full; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 44a925006479..aec4f8346d9c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); phw_tonga_ulv_parm *ulv; + struct cgs_system_info sys_info = {0}; PP_ASSERT_WITH_CODE((NULL != hwmgr), "Invalid Parameter!", return -1;); @@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddc_phase_shed_control = 0; - if (0 == result) { - struct cgs_system_info sys_info = {0}; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (!result) { + if (sys_info.value & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + if (sys_info.value & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + } + if (0 == result) { data->is_tlu_enabled = 0; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = TONGA_MAX_HARDWARE_POWERLEVELS; @@ -6018,6 +6033,125 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } +static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + *table = (char *)&data->smc_state_table; + + return sizeof(struct SMU72_Discrete_DpmTable); +} + +static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + void *table = (void *)&data->smc_state_table; + + memcpy(table, buf, size); + + return 0; +} + +static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, int level) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_PCIE: + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (1 << level)); + break; + default: + break; + } + + return 0; +} + +static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = tonga_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .backend_init = &tonga_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, @@ -6055,6 +6189,10 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .check_states_equal = tonga_check_states_equal, .set_fan_control_mode = tonga_set_fan_control_mode, .get_fan_control_mode = tonga_get_fan_control_mode, + .get_pp_table = tonga_get_pp_table, + .set_pp_table = tonga_set_pp_table, + .force_clock_level = tonga_force_clock_level, + .print_clock_levels = tonga_print_clock_levels, }; int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index e61a3e67852e..7255f7ddf93a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -29,6 +29,7 @@ #include "amd_shared.h" #include "cgs_common.h" + enum amd_pp_event { AMD_PP_EVENT_INITIALIZE = 0, AMD_PP_EVENT_UNINITIALIZE, @@ -123,6 +124,7 @@ enum amd_dpm_forced_level { AMD_DPM_FORCED_LEVEL_AUTO = 0, AMD_DPM_FORCED_LEVEL_LOW = 1, AMD_DPM_FORCED_LEVEL_HIGH = 2, + AMD_DPM_FORCED_LEVEL_MANUAL = 3, }; struct amd_pp_init { @@ -212,12 +214,55 @@ struct amd_pp_display_configuration { uint32_t dce_tolerable_mclk_in_active_latency; }; -struct amd_pp_dal_clock_info { +struct amd_pp_simple_clock_info { uint32_t engine_max_clock; uint32_t memory_max_clock; uint32_t level; }; +enum PP_DAL_POWERLEVEL { + PP_DAL_POWERLEVEL_INVALID = 0, + PP_DAL_POWERLEVEL_ULTRALOW, + PP_DAL_POWERLEVEL_LOW, + PP_DAL_POWERLEVEL_NOMINAL, + PP_DAL_POWERLEVEL_PERFORMANCE, + + PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW, + PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW, + PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL, + PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE, + PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1, + PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1, + PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1, + PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1, +}; + +struct amd_pp_clock_info { + uint32_t min_engine_clock; + uint32_t max_engine_clock; + uint32_t min_memory_clock; + uint32_t max_memory_clock; + uint32_t min_bus_bandwidth; + uint32_t max_bus_bandwidth; + uint32_t max_engine_clock_in_sr; + uint32_t min_engine_clock_in_sr; + enum PP_DAL_POWERLEVEL max_clocks_state; +}; + +enum amd_pp_clock_type { + amd_pp_disp_clock = 1, + amd_pp_sys_clock, + amd_pp_mem_clock +}; + +#define MAX_NUM_CLOCKS 16 + +struct amd_pp_clocks { + uint32_t count; + uint32_t clock[MAX_NUM_CLOCKS]; +}; + + enum { PP_GROUP_UNKNOWN = 0, PP_GROUP_GFX = 1, @@ -225,6 +270,17 @@ enum { PP_GROUP_MAX }; +enum pp_clock_type { + PP_SCLK, + PP_MCLK, + PP_PCIE, +}; + +struct pp_states_info { + uint32_t nums; + uint32_t states[16]; +}; + #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 @@ -278,6 +334,11 @@ struct amd_powerplay_funcs { int (*get_fan_control_mode)(void *handle); int (*set_fan_speed_percent)(void *handle, uint32_t percent); int (*get_fan_speed_percent)(void *handle, uint32_t *speed); + int (*get_pp_num_states)(void *handle, struct pp_states_info *data); + int (*get_pp_table)(void *handle, char **table); + int (*set_pp_table)(void *handle, const char *buf, size_t size); + int (*force_clock_level)(void *handle, enum pp_clock_type type, int level); + int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); }; struct amd_powerplay { @@ -288,12 +349,23 @@ struct amd_powerplay { int amd_powerplay_init(struct amd_pp_init *pp_init, struct amd_powerplay *amd_pp); + int amd_powerplay_fini(void *handle); -int amd_powerplay_display_configuration_change(void *handle, const void *input); +int amd_powerplay_display_configuration_change(void *handle, + const struct amd_pp_display_configuration *input); int amd_powerplay_get_display_power_level(void *handle, - struct amd_pp_dal_clock_info *output); + struct amd_pp_simple_clock_info *output); +int amd_powerplay_get_current_clocks(void *handle, + struct amd_pp_clock_info *output); + +int amd_powerplay_get_clock_by_type(void *handle, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks); + +int amd_powerplay_get_display_mode_validation_clocks(void *handle, + struct amd_pp_simple_clock_info *output); #endif /* _AMD_POWERPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 91795efe1336..040d3f7cbf49 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -31,6 +31,7 @@ struct pp_power_state; enum amd_dpm_forced_level; struct PP_TemperatureRange; + struct phm_fan_speed_info { uint32_t min_percent; uint32_t max_percent; @@ -290,6 +291,15 @@ struct PP_Clocks { uint32_t engineClockInSR; }; +struct pp_clock_info { + uint32_t min_mem_clk; + uint32_t max_mem_clk; + uint32_t min_eng_clk; + uint32_t max_eng_clk; + uint32_t min_bus_bandwidth; + uint32_t max_bus_bandwidth; +}; + struct phm_platform_descriptor { uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; uint32_t vbiosInterruptId; @@ -323,24 +333,6 @@ struct phm_clocks { uint32_t clock[MAX_NUM_CLOCKS]; }; -enum PP_DAL_POWERLEVEL { - PP_DAL_POWERLEVEL_INVALID = 0, - PP_DAL_POWERLEVEL_ULTRALOW, - PP_DAL_POWERLEVEL_LOW, - PP_DAL_POWERLEVEL_NOMINAL, - PP_DAL_POWERLEVEL_PERFORMANCE, - - PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW, - PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW, - PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL, - PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE, - PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1, - PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1, - PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1, - PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1, -}; - - extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate); extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); @@ -375,11 +367,25 @@ extern int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, const struct amd_pp_display_configuration *display_config); extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info*info); + struct amd_pp_simple_clock_info *info); extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr); extern int phm_power_down_asic(struct pp_hwmgr *hwmgr); +extern int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level); + +extern int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + struct pp_clock_info *pclock_info, + PHM_PerformanceLevelDesignation designation); + +extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); + +extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); + +extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); + #endif /* _HARDWARE_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index aeaa3dbba525..928f5a740cba 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -325,8 +325,18 @@ struct pp_hwmgr_func { bool cc6_disable, bool pstate_disable, bool pstate_switch_disable); int (*get_dal_power_level)(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info *info); + struct amd_pp_simple_clock_info *info); + int (*get_performance_level)(struct pp_hwmgr *, const struct pp_hw_power_state *, + PHM_PerformanceLevelDesignation, uint32_t, PHM_PerformanceLevel *); + int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); + int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); + int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); int (*power_off_asic)(struct pp_hwmgr *hwmgr); + int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table); + int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size); + int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, int level); + int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); }; struct pp_table_func { diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 8b2becd1aa07..a5ff9458d359 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -229,6 +229,14 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) amd_sched_wakeup(entity->sched); } +static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_entity *entity = + container_of(cb, struct amd_sched_entity, cb); + entity->dependency = NULL; + fence_put(f); +} + static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; @@ -251,7 +259,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) } /* Wait for fence to be scheduled */ - entity->cb.func = amd_sched_entity_wakeup; + entity->cb.func = amd_sched_entity_clear_dep; list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); return true; } diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig new file mode 100644 index 000000000000..eaed454e043c --- /dev/null +++ b/drivers/gpu/drm/arm/Kconfig @@ -0,0 +1,27 @@ +config DRM_ARM + bool + help + Choose this option to select drivers for ARM's devices + +config DRM_HDLCD + tristate "ARM HDLCD" + depends on DRM && OF && (ARM || ARM64) + depends on COMMON_CLK + select DRM_ARM + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select DRM_KMS_CMA_HELPER + help + Choose this option if you have an ARM High Definition Colour LCD + controller. + + If M is selected the module will be called hdlcd. + +config DRM_HDLCD_SHOW_UNDERRUN + bool "Show underrun conditions" + depends on DRM_HDLCD + default n + help + Enable this option to show in red colour the pixels that the + HDLCD device did not fetch from framebuffer due to underrun + conditions. diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile new file mode 100644 index 000000000000..89dcb7bab93a --- /dev/null +++ b/drivers/gpu/drm/arm/Makefile @@ -0,0 +1,2 @@ +hdlcd-y := hdlcd_drv.o hdlcd_crtc.o +obj-$(CONFIG_DRM_HDLCD) += hdlcd.o diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c new file mode 100644 index 000000000000..fef1b04c2aab --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -0,0 +1,327 @@ +/* + * Copyright (C) 2013-2015 ARM Limited + * Author: Liviu Dudau + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Implementation of a CRTC class for the HDLCD driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include