提交 bd21a37d 编写于 作者: D Dave Airlie

Merge remote-tracking branch 'pfdo/drm-next' into drm-next

Pull in drm-next for the object find API changes.

Fix the one place the API crashes.
Signed-off-by: NDave Airlie <airlied@redhat.com>
......@@ -68,6 +68,8 @@ Optional properties:
- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
generator. The chip will rely on the sync signals in the DSI data lanes,
rather than generate its own timings for HDMI output.
- clocks: from common clock binding: reference to the CEC clock.
- clock-names: from common clock binding: must be "cec".
Required nodes:
......@@ -89,6 +91,8 @@ Example
reg = <39>;
interrupt-parent = <&gpio3>;
interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
clocks = <&cec_clock>;
clock-names = "cec";
adi,input-depth = <8>;
adi,input-colorspace = "rgb";
......
Silicon Image SiI9234 HDMI/MHL bridge bindings
Required properties:
- compatible : "sil,sii9234".
- reg : I2C address for TPI interface, use 0x39
- avcc33-supply : MHL/USB Switch Supply Voltage (3.3V)
- iovcc18-supply : I/O Supply Voltage (1.8V)
- avcc12-supply : TMDS Analog Supply Voltage (1.2V)
- cvcc12-supply : Digital Core Supply Voltage (1.2V)
- interrupts, interrupt-parent: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin (active low)
- video interfaces: Device node can contain two video interface port
nodes for HDMI encoder and connector according to [1].
- port@0 - MHL to HDMI
- port@1 - MHL to connector
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
sii9234@39 {
compatible = "sil,sii9234";
reg = <0x39>;
avcc33-supply = <&vcc33mhl>;
iovcc18-supply = <&vcc18mhl>;
avcc12-supply = <&vsil12>;
cvcc12-supply = <&vsil12>;
reset-gpios = <&gpf3 4 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpf3>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mhl_to_hdmi: endpoint {
remote-endpoint = <&hdmi_to_mhl>;
};
};
port@1 {
reg = <1>;
mhl_to_connector: endpoint {
remote-endpoint = <&connector_to_mhl>;
};
};
};
};
This binding covers the official 7" (800x480) Raspberry Pi touchscreen
panel.
This DSI panel contains:
- TC358762 DSI->DPI bridge
- Atmel microcontroller on I2C for power sequencing the DSI bridge and
controlling backlight
- Touchscreen controller on I2C for touch input
and this binding covers the DSI display parts but not its touch input.
Required properties:
- compatible: Must be "raspberrypi,7inch-touchscreen-panel"
- reg: Must be "45"
- port: See panel-common.txt
Example:
dsi1: dsi@7e700000 {
#address-cells = <1>;
#size-cells = <0>;
<...>
port {
dsi_out_port: endpoint {
remote-endpoint = <&panel_dsi_port>;
};
};
};
i2c_dsi: i2c {
compatible = "i2c-gpio";
#address-cells = <1>;
#size-cells = <0>;
gpios = <&gpio 28 0
&gpio 29 0>;
lcd@45 {
compatible = "raspberrypi,7inch-touchscreen-panel";
reg = <0x45>;
port {
panel_dsi_port: endpoint {
remote-endpoint = <&dsi_out_port>;
};
};
};
};
......@@ -41,14 +41,17 @@ CEC. It is one end of the pipeline.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a10s-hdmi
* allwinner,sun6i-a31-hdmi
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the HDMI encoder
* ahb: the HDMI interface clock
* mod: the HDMI module clock
* ddc: the HDMI ddc clock (A31 only)
* pll-0: the first video PLL
* pll-1: the second video PLL
- clock-names: the clock names mentioned above
- resets: phandle to the reset control for the HDMI encoder (A31 only)
- dmas: phandles to the DMA channels used by the HDMI encoder
* ddc-tx: The channel for DDC transmission
* ddc-rx: The channel for DDC reception
......
......@@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* @dst: the destination reservation object
* @src: the source reservation object
*
* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
* held.
* Copy all fences from src to dst. dst-lock must be held.
*/
int reservation_object_copy_fences(struct reservation_object *dst,
struct reservation_object *src)
......@@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size;
unsigned i;
src_list = reservation_object_get_list(src);
rcu_read_lock();
src_list = rcu_dereference(src->fence);
retry:
if (src_list) {
size = offsetof(typeof(*src_list),
shared[src_list->shared_count]);
unsigned shared_count = src_list->shared_count;
size = offsetof(typeof(*src_list), shared[shared_count]);
rcu_read_unlock();
dst_list = kmalloc(size, GFP_KERNEL);
if (!dst_list)
return -ENOMEM;
dst_list->shared_count = src_list->shared_count;
dst_list->shared_max = src_list->shared_count;
for (i = 0; i < src_list->shared_count; ++i)
dst_list->shared[i] =
dma_fence_get(src_list->shared[i]);
rcu_read_lock();
src_list = rcu_dereference(src->fence);
if (!src_list || src_list->shared_count > shared_count) {
kfree(dst_list);
goto retry;
}
dst_list->shared_count = 0;
dst_list->shared_max = shared_count;
for (i = 0; i < src_list->shared_count; ++i) {
struct dma_fence *fence;
fence = rcu_dereference(src_list->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
continue;
if (!dma_fence_get_rcu(fence)) {
kfree(dst_list);
src_list = rcu_dereference(src->fence);
goto retry;
}
if (dma_fence_is_signaled(fence)) {
dma_fence_put(fence);
continue;
}
dst_list->shared[dst_list->shared_count++] = fence;
}
} else {
dst_list = NULL;
}
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
kfree(dst->staged);
dst->staged = NULL;
src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst);
new = reservation_object_get_excl(src);
dma_fence_get(new);
preempt_disable();
write_seqcount_begin(&dst->seq);
......
......@@ -231,7 +231,7 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
......@@ -256,7 +256,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
......@@ -372,7 +372,7 @@ amdgpu_connector_best_single_encoder(struct drm_connector *connector)
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
......@@ -1077,7 +1077,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
......@@ -1134,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
......@@ -1153,7 +1153,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
......@@ -1294,7 +1294,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
......@@ -1323,7 +1323,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
......
......@@ -288,7 +288,7 @@ dce_virtual_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
......@@ -298,7 +298,7 @@ dce_virtual_encoder(struct drm_connector *connector)
/* pick the first one */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
......
......@@ -2638,7 +2638,7 @@ static struct drm_encoder *best_encoder(struct drm_connector *connector)
/* pick the encoder ids */
if (enc_id) {
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
if (!obj) {
DRM_ERROR("Couldn't find a matching encoder for our connector\n");
return NULL;
......
......@@ -713,7 +713,7 @@ static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connect
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
......
......@@ -213,7 +213,7 @@ bochs_connector_best_encoder(struct drm_connector *connector)
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
......
......@@ -71,7 +71,7 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
depends on OF && RC_CORE
select DRM_KMS_HELPER
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
......@@ -84,6 +84,14 @@ config DRM_SII902X
---help---
Silicon Image sii902x bridge chip driver.
config DRM_SII9234
tristate "Silicon Image SII9234 HDMI/MHL bridge"
depends on OF
---help---
Say Y here if you want support for the MHL interface.
It is an I2C driver, that detects connection of MHL bridge
and starts encapsulation of HDMI signal.
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
depends on OF
......
......@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
......
......@@ -21,3 +21,11 @@ config DRM_I2C_ADV7533
default y
help
Support for the Analog Devices ADV7533 DSI to HDMI encoder.
config DRM_I2C_ADV7511_CEC
bool "ADV7511/33 HDMI CEC driver"
depends on DRM_I2C_ADV7511
select CEC_CORE
default y
help
When selected the HDMI transmitter will support the CEC feature.
adv7511-y := adv7511_drv.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
......@@ -195,6 +195,25 @@
#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
#define ADV7511_REG_CEC_TX_FRAME_LEN 0x10
#define ADV7511_REG_CEC_TX_ENABLE 0x11
#define ADV7511_REG_CEC_TX_RETRY 0x12
#define ADV7511_REG_CEC_TX_LOW_DRV_CNT 0x14
#define ADV7511_REG_CEC_RX_FRAME_HDR 0x15
#define ADV7511_REG_CEC_RX_FRAME_DATA0 0x16
#define ADV7511_REG_CEC_RX_FRAME_LEN 0x25
#define ADV7511_REG_CEC_RX_ENABLE 0x26
#define ADV7511_REG_CEC_RX_BUFFERS 0x4a
#define ADV7511_REG_CEC_LOG_ADDR_MASK 0x4b
#define ADV7511_REG_CEC_LOG_ADDR_0_1 0x4c
#define ADV7511_REG_CEC_LOG_ADDR_2 0x4d
#define ADV7511_REG_CEC_CLK_DIV 0x4e
#define ADV7511_REG_CEC_SOFT_RESET 0x50
#define ADV7533_REG_CEC_OFFSET 0x70
enum adv7511_input_clock {
ADV7511_INPUT_CLOCK_1X,
ADV7511_INPUT_CLOCK_2X,
......@@ -297,6 +316,8 @@ enum adv7511_type {
ADV7533,
};
#define ADV7511_MAX_ADDRS 3
struct adv7511 {
struct i2c_client *i2c_main;
struct i2c_client *i2c_edid;
......@@ -341,15 +362,27 @@ struct adv7511 {
enum adv7511_type type;
struct platform_device *audio_pdev;
struct cec_adapter *cec_adap;
u8 cec_addr[ADV7511_MAX_ADDRS];
u8 cec_valid_addrs;
bool cec_enabled_adap;
struct clk *cec_clk;
u32 cec_clk_freq;
};
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
unsigned int offset);
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#endif
#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
void adv7533_uninit_cec(struct adv7511 *adv);
int adv7533_init_cec(struct adv7511 *adv);
int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
......@@ -372,11 +405,7 @@ static inline int adv7533_patch_registers(struct adv7511 *adv)
return -ENODEV;
}
static inline void adv7533_uninit_cec(struct adv7511 *adv)
{
}
static inline int adv7533_init_cec(struct adv7511 *adv)
static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
{
return -ENODEV;
}
......
/*
* adv7511_cec.c - Analog Devices ADV7511/33 cec driver
*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/cec.h>
#include "adv7511.h"
#define ADV7511_INT1_CEC_MASK \
(ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | \
ADV7511_INT1_CEC_TX_RETRY_TIMEOUT | ADV7511_INT1_CEC_RX_READY1)
static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
{
unsigned int offset = adv7511->type == ADV7533 ?
ADV7533_REG_CEC_OFFSET : 0;
unsigned int val;
if (regmap_read(adv7511->regmap_cec,
ADV7511_REG_CEC_TX_ENABLE + offset, &val))
return;
if ((val & 0x01) == 0)
return;
if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
cec_transmit_attempt_done(adv7511->cec_adap,
CEC_TX_STATUS_ARB_LOST);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
u8 status;
u8 err_cnt = 0;
u8 nack_cnt = 0;
u8 low_drive_cnt = 0;
unsigned int cnt;
/*
* We set this status bit since this hardware performs
* retransmissions.
*/
status = CEC_TX_STATUS_MAX_RETRIES;
if (regmap_read(adv7511->regmap_cec,
ADV7511_REG_CEC_TX_LOW_DRV_CNT + offset, &cnt)) {
err_cnt = 1;
status |= CEC_TX_STATUS_ERROR;
} else {
nack_cnt = cnt & 0xf;
if (nack_cnt)
status |= CEC_TX_STATUS_NACK;
low_drive_cnt = cnt >> 4;
if (low_drive_cnt)
status |= CEC_TX_STATUS_LOW_DRIVE;
}
cec_transmit_done(adv7511->cec_adap, status,
0, nack_cnt, low_drive_cnt, err_cnt);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
return;
}
}
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
{
unsigned int offset = adv7511->type == ADV7533 ?
ADV7533_REG_CEC_OFFSET : 0;
const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY |
ADV7511_INT1_CEC_TX_ARBIT_LOST |
ADV7511_INT1_CEC_TX_RETRY_TIMEOUT;
struct cec_msg msg = {};
unsigned int len;
unsigned int val;
u8 i;
if (irq1 & irq_tx_mask)
adv_cec_tx_raw_status(adv7511, irq1);
if (!(irq1 & ADV7511_INT1_CEC_RX_READY1))
return;
if (regmap_read(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_FRAME_LEN + offset, &len))
return;
msg.len = len & 0x1f;
if (msg.len > 16)
msg.len = 16;
if (!msg.len)
return;
for (i = 0; i < msg.len; i++) {
regmap_read(adv7511->regmap_cec,
i + ADV7511_REG_CEC_RX_FRAME_HDR + offset, &val);
msg.msg[i] = val;
}
/* toggle to re-enable rx 1 */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, 1);
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
cec_received_msg(adv7511->cec_adap, &msg);
}
static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct adv7511 *adv7511 = cec_get_drvdata(adap);
unsigned int offset = adv7511->type == ADV7533 ?
ADV7533_REG_CEC_OFFSET : 0;
if (adv7511->i2c_cec == NULL)
return -EIO;
if (!adv7511->cec_enabled_adap && enable) {
/* power up cec section */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_CLK_DIV + offset,
0x03, 0x01);
/* legacy mode and clear all rx buffers */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, 0x07);
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
/* initially disable tx */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_TX_ENABLE + offset, 1, 0);
/* enabled irqs: */
/* tx: ready */
/* tx: arbitration lost */
/* tx: retry timeout */
/* rx: ready 1 */
regmap_update_bits(adv7511->regmap,
ADV7511_REG_INT_ENABLE(1), 0x3f,
ADV7511_INT1_CEC_MASK);
} else if (adv7511->cec_enabled_adap && !enable) {
regmap_update_bits(adv7511->regmap,
ADV7511_REG_INT_ENABLE(1), 0x3f, 0);
/* disable address mask 1-3 */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
0x70, 0x00);
/* power down cec section */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_CLK_DIV + offset,
0x03, 0x00);
adv7511->cec_valid_addrs = 0;
}
adv7511->cec_enabled_adap = enable;
return 0;
}
static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
struct adv7511 *adv7511 = cec_get_drvdata(adap);
unsigned int offset = adv7511->type == ADV7533 ?
ADV7533_REG_CEC_OFFSET : 0;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
if (!adv7511->cec_enabled_adap)
return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
if (addr == CEC_LOG_ADDR_INVALID) {
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
0x70, 0);
adv7511->cec_valid_addrs = 0;
return 0;
}
for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
bool is_valid = adv7511->cec_valid_addrs & (1 << i);
if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
free_idx = i;
if (is_valid && adv7511->cec_addr[i] == addr)
return 0;
}
if (i == ADV7511_MAX_ADDRS) {
i = free_idx;
if (i == ADV7511_MAX_ADDRS)
return -ENXIO;
}
adv7511->cec_addr[i] = addr;
adv7511->cec_valid_addrs |= 1 << i;
switch (i) {
case 0:
/* enable address mask 0 */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
0x10, 0x10);
/* set address for mask 0 */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
0x0f, addr);
break;
case 1:
/* enable address mask 1 */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
0x20, 0x20);
/* set address for mask 1 */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
0xf0, addr << 4);
break;
case 2:
/* enable address mask 2 */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
0x40, 0x40);
/* set address for mask 1 */
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_LOG_ADDR_2 + offset,
0x0f, addr);
break;
}
return 0;
}
static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct adv7511 *adv7511 = cec_get_drvdata(adap);
unsigned int offset = adv7511->type == ADV7533 ?
ADV7533_REG_CEC_OFFSET : 0;
u8 len = msg->len;
unsigned int i;
/*
* The number of retries is the number of attempts - 1, but retry
* at least once. It's not clear if a value of 0 is allowed, so
* let's do at least one retry.
*/
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_TX_RETRY + offset,
0x70, max(1, attempts - 1) << 4);
/* blocking, clear cec tx irq status */
regmap_update_bits(adv7511->regmap, ADV7511_REG_INT(1), 0x38, 0x38);
/* write data */
for (i = 0; i < len; i++)
regmap_write(adv7511->regmap_cec,
i + ADV7511_REG_CEC_TX_FRAME_HDR + offset,
msg->msg[i]);
/* set length (data + header) */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_TX_FRAME_LEN + offset, len);
/* start transmit, enable tx */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_TX_ENABLE + offset, 0x01);
return 0;
}
static const struct cec_adap_ops adv7511_cec_adap_ops = {
.adap_enable = adv7511_cec_adap_enable,
.adap_log_addr = adv7511_cec_adap_log_addr,
.adap_transmit = adv7511_cec_adap_transmit,
};
static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
{
adv7511->cec_clk = devm_clk_get(dev, "cec");
if (IS_ERR(adv7511->cec_clk)) {
int ret = PTR_ERR(adv7511->cec_clk);
adv7511->cec_clk = NULL;
return ret;
}
clk_prepare_enable(adv7511->cec_clk);
adv7511->cec_clk_freq = clk_get_rate(adv7511->cec_clk);
return 0;
}
int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
unsigned int offset)
{
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
return ret;
adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
if (IS_ERR(adv7511->cec_adap))
return PTR_ERR(adv7511->cec_adap);
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
/* cec soft reset */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_SOFT_RESET + offset, 0x00);
/* legacy mode */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, 0x00);
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_CLK_DIV + offset,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
ret = cec_register_adapter(adv7511->cec_adap, dev);
if (ret) {
cec_delete_adapter(adv7511->cec_adap);
adv7511->cec_adap = NULL;
}
return ret;
}
......@@ -11,12 +11,15 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <media/cec.h>
#include "adv7511.h"
/* ADI recommended values for proper operation. */
......@@ -336,8 +339,10 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
*/
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD);
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap,
ADV7511_REG_INT_ENABLE(1),
ADV7511_INT1_DDC_ERROR,
ADV7511_INT1_DDC_ERROR);
}
/*
......@@ -373,6 +378,9 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
regmap_update_bits(adv7511->regmap,
ADV7511_REG_INT_ENABLE(1),
ADV7511_INT1_DDC_ERROR, 0);
regcache_mark_dirty(adv7511->regmap);
}
......@@ -423,6 +431,8 @@ static void adv7511_hpd_work(struct work_struct *work)
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
if (status == connector_status_disconnected)
cec_phys_addr_invalidate(adv7511->cec_adap);
drm_kms_helper_hotplug_event(adv7511->connector.dev);
}
}
......@@ -453,6 +463,10 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
wake_up_all(&adv7511->wq);
}
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
adv7511_cec_irq_process(adv7511, irq1);
#endif
return 0;
}
......@@ -595,6 +609,8 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
kfree(edid);
cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
return count;
}
......@@ -919,6 +935,65 @@ static void adv7511_uninit_regulators(struct adv7511 *adv)
regulator_bulk_disable(adv->num_supplies, adv->supplies);
}
static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
{
struct i2c_client *i2c = to_i2c_client(dev);
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
if (adv7511->type == ADV7533)
reg -= ADV7533_REG_CEC_OFFSET;
switch (reg) {
case ADV7511_REG_CEC_RX_FRAME_HDR:
case ADV7511_REG_CEC_RX_FRAME_DATA0...
ADV7511_REG_CEC_RX_FRAME_DATA0 + 14:
case ADV7511_REG_CEC_RX_FRAME_LEN:
case ADV7511_REG_CEC_RX_BUFFERS:
case ADV7511_REG_CEC_TX_LOW_DRV_CNT:
return true;
}
return false;
}
static const struct regmap_config adv7511_cec_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
.cache_type = REGCACHE_RBTREE,
.volatile_reg = adv7511_cec_register_volatile,
};
static int adv7511_init_cec_regmap(struct adv7511 *adv)
{
int ret;
adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
adv->i2c_main->addr - 1);
if (!adv->i2c_cec)
return -ENOMEM;
i2c_set_clientdata(adv->i2c_cec, adv);
adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
&adv7511_cec_regmap_config);
if (IS_ERR(adv->regmap_cec)) {
ret = PTR_ERR(adv->regmap_cec);
goto err;
}
if (adv->type == ADV7533) {
ret = adv7533_patch_cec_registers(adv);
if (ret)
goto err;
}
return 0;
err:
i2c_unregister_device(adv->i2c_cec);
return ret;
}
static int adv7511_parse_dt(struct device_node *np,
struct adv7511_link_config *config)
{
......@@ -1009,6 +1084,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
struct device *dev = &i2c->dev;
unsigned int main_i2c_addr = i2c->addr << 1;
unsigned int edid_i2c_addr = main_i2c_addr + 4;
unsigned int offset;
unsigned int val;
int ret;
......@@ -1092,11 +1168,9 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto uninit_regulators;
}
if (adv7511->type == ADV7533) {
ret = adv7533_init_cec(adv7511);
if (ret)
goto err_i2c_unregister_edid;
}
ret = adv7511_init_cec_regmap(adv7511);
if (ret)
goto err_i2c_unregister_edid;
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
......@@ -1111,10 +1185,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
}
/* CEC is unused for now */
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
......@@ -1129,10 +1199,23 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511_audio_init(dev, adv7511);
offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
ret = adv7511_cec_init(dev, adv7511, offset);
if (ret)
goto err_unregister_cec;
#else
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
ADV7511_CEC_CTRL_POWER_DOWN);
#endif
return 0;
err_unregister_cec:
adv7533_uninit_cec(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
......@@ -1145,10 +1228,11 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
if (adv7511->type == ADV7533) {
if (adv7511->type == ADV7533)
adv7533_detach_dsi(adv7511);
adv7533_uninit_cec(adv7511);
}
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
clk_disable_unprepare(adv7511->cec_clk);
adv7511_uninit_regulators(adv7511);
......@@ -1156,6 +1240,8 @@ static int adv7511_remove(struct i2c_client *i2c)
adv7511_audio_exit(adv7511);
cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_edid);
return 0;
......
......@@ -32,14 +32,6 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
static const struct regmap_config adv7533_cec_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
.cache_type = REGCACHE_RBTREE,
};
static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
......@@ -145,37 +137,11 @@ int adv7533_patch_registers(struct adv7511 *adv)
ARRAY_SIZE(adv7533_fixed_registers));
}
void adv7533_uninit_cec(struct adv7511 *adv)
{
i2c_unregister_device(adv->i2c_cec);
}
int adv7533_init_cec(struct adv7511 *adv)
int adv7533_patch_cec_registers(struct adv7511 *adv)
{
int ret;
adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
adv->i2c_main->addr - 1);
if (!adv->i2c_cec)
return -ENOMEM;
adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
&adv7533_cec_regmap_config);
if (IS_ERR(adv->regmap_cec)) {
ret = PTR_ERR(adv->regmap_cec);
goto err;
}
ret = regmap_register_patch(adv->regmap_cec,
return regmap_register_patch(adv->regmap_cec,
adv7533_cec_fixed_registers,
ARRAY_SIZE(adv7533_cec_fixed_registers));
if (ret)
goto err;
return 0;
err:
adv7533_uninit_cec(adv);
return ret;
}
int adv7533_attach_dsi(struct adv7511 *adv)
......
......@@ -188,7 +188,15 @@ EXPORT_SYMBOL(drm_panel_bridge_add);
*/
void drm_panel_bridge_remove(struct drm_bridge *bridge)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct panel_bridge *panel_bridge;
if (!bridge)
return;
if (bridge->funcs != &panel_bridge_bridge_funcs)
return;
panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
devm_kfree(panel_bridge->panel->dev, bridge);
......
此差异已折叠。
......@@ -28,6 +28,8 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#include "sil-sii8620.h"
#define SII8620_BURST_BUF_LEN 288
......@@ -58,6 +60,7 @@ enum sii8620_mt_state {
struct sii8620 {
struct drm_bridge bridge;
struct device *dev;
struct rc_dev *rc_dev;
struct clk *clk_xtal;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_int;
......@@ -431,6 +434,16 @@ static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
}
static void sii8620_mt_rcpk(struct sii8620 *ctx, u8 code)
{
sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPK, code);
}
static void sii8620_mt_rcpe(struct sii8620 *ctx, u8 code)
{
sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPE, code);
}
static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
......@@ -1753,6 +1766,25 @@ static void sii8620_send_features(struct sii8620 *ctx)
sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf));
}
static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
{
bool pressed = !(scancode & MHL_RCP_KEY_RELEASED_MASK);
scancode &= MHL_RCP_KEY_ID_MASK;
if (!ctx->rc_dev) {
dev_dbg(ctx->dev, "RCP input device not initialized\n");
return false;
}
if (pressed)
rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
else
rc_keyup(ctx->rc_dev);
return true;
}
static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
{
u8 ints[MHL_INT_SIZE];
......@@ -1804,19 +1836,25 @@ static void sii8620_msc_mt_done(struct sii8620 *ctx)
static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
{
struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
struct sii8620_mt_msg *msg;
u8 buf[2];
if (!msg)
return;
sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
switch (buf[0]) {
case MHL_MSC_MSG_RAPK:
msg = sii8620_msc_msg_first(ctx);
if (!msg)
return;
msg->ret = buf[1];
ctx->mt_state = MT_STATE_DONE;
break;
case MHL_MSC_MSG_RCP:
if (!sii8620_rcp_consume(ctx, buf[1]))
sii8620_mt_rcpe(ctx,
MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE);
sii8620_mt_rcpk(ctx, buf[1]);
break;
default:
dev_err(ctx->dev, "%s message type %d,%d not supported",
__func__, buf[0], buf[1]);
......@@ -2102,11 +2140,57 @@ static void sii8620_cable_in(struct sii8620 *ctx)
enable_irq(to_i2c_client(ctx->dev)->irq);
}
static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
{
struct rc_dev *rc_dev;
int ret;
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc_dev) {
dev_err(ctx->dev, "Failed to allocate RC device\n");
ctx->error = -ENOMEM;
return;
}
rc_dev->input_phys = "sii8620/input0";
rc_dev->input_id.bustype = BUS_VIRTUAL;
rc_dev->map_name = RC_MAP_CEC;
rc_dev->allowed_protocols = RC_PROTO_BIT_CEC;
rc_dev->driver_name = "sii8620";
rc_dev->device_name = "sii8620";
ret = rc_register_device(rc_dev);
if (ret) {
dev_err(ctx->dev, "Failed to register RC device\n");
ctx->error = ret;
rc_free_device(ctx->rc_dev);
return;
}
ctx->rc_dev = rc_dev;
}
static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
{
return container_of(bridge, struct sii8620, bridge);
}
static int sii8620_attach(struct drm_bridge *bridge)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
sii8620_init_rcp_input_dev(ctx);
return sii8620_clear_error(ctx);
}
static void sii8620_detach(struct drm_bridge *bridge)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
rc_unregister_device(ctx->rc_dev);
}
static bool sii8620_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
......@@ -2151,6 +2235,8 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs sii8620_bridge_funcs = {
.attach = sii8620_attach,
.detach = sii8620_detach,
.mode_fixup = sii8620_mode_fixup,
};
......@@ -2217,8 +2303,8 @@ static int sii8620_remove(struct i2c_client *client)
struct sii8620 *ctx = i2c_get_clientdata(client);
disable_irq(to_i2c_client(ctx->dev)->irq);
drm_bridge_remove(&ctx->bridge);
sii8620_hw_off(ctx);
drm_bridge_remove(&ctx->bridge);
return 0;
}
......
......@@ -221,7 +221,6 @@ struct dw_mipi_dsi {
struct drm_bridge bridge;
struct mipi_dsi_host dsi_host;
struct drm_bridge *panel_bridge;
bool is_panel_bridge;
struct device *dev;
void __iomem *base;
......@@ -297,7 +296,6 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
dsi->is_panel_bridge = true;
}
dsi->panel_bridge = bridge;
......@@ -312,8 +310,7 @@ static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
if (dsi->is_panel_bridge)
drm_panel_bridge_remove(dsi->panel_bridge);
drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
drm_bridge_remove(&dsi->bridge);
......
......@@ -457,7 +457,7 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
......
......@@ -182,9 +182,6 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
if (!obj)
continue;
obj->funcs->atomic_destroy_state(obj,
state->private_objs[i].state);
state->private_objs[i].ptr = NULL;
......@@ -718,7 +715,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_put(fb);
......@@ -734,7 +731,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return -EINVAL;
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
......@@ -1149,7 +1146,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
......@@ -2259,7 +2256,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
goto out;
}
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj) {
ret = -ENOENT;
goto out;
......
......@@ -1704,7 +1704,7 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
* drm_atomic_helper_commit_cleanup_done().
*
* This is all implemented by in drm_atomic_helper_commit(), giving drivers a
* complete and esay-to-use default implementation of the atomic_commit() hook.
* complete and easy-to-use default implementation of the atomic_commit() hook.
*
* The tracking of asynchronously executed and still pending commits is done
* using the core structure &drm_crtc_commit.
......@@ -1819,7 +1819,7 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
* This function waits for all preceeding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
* by calling drm_crtc_vblank_send_event() on the &drm_crtc_state.event).
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
......@@ -3052,6 +3052,7 @@ int drm_atomic_helper_resume(struct drm_device *dev,
drm_modeset_backoff(&ctx);
}
drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
......@@ -3206,7 +3207,7 @@ struct drm_encoder *
drm_atomic_helper_best_encoder(struct drm_connector *connector)
{
WARN_ON(connector->encoder_ids[1]);
return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
}
EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
......
......@@ -230,7 +230,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
......@@ -308,7 +308,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
......
......@@ -1310,7 +1310,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
connector = drm_connector_lookup(dev, out_resp->connector_id);
connector = drm_connector_lookup(dev, file_priv, out_resp->connector_id);
if (!connector)
return -ENOENT;
......
......@@ -402,7 +402,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
crtc = drm_crtc_find(dev, file_priv, crtc_resp->crtc_id);
if (!crtc)
return -ENOENT;
......@@ -569,7 +569,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
crtc = drm_crtc_find(dev, crtc_req->crtc_id);
crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
return -ENOENT;
......@@ -595,7 +595,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
/* Make refcounting symmetric with the lookup path. */
drm_framebuffer_get(fb);
} else {
fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
......@@ -680,7 +680,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
connector = drm_connector_lookup(dev, out_id);
connector = drm_connector_lookup(dev, file_priv, out_id);
if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
......
......@@ -562,12 +562,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!save_encoder_crtcs)
return -ENOMEM;
save_connector_encoders = kzalloc(dev->mode_config.num_connector *
save_connector_encoders = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_encoder *), GFP_KERNEL);
if (!save_connector_encoders) {
kfree(save_encoder_crtcs);
......
......@@ -106,6 +106,7 @@ int drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj);
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t id, uint32_t type);
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
......
......@@ -137,8 +137,10 @@ EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
case 162000:
default:
WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
DP_LINK_BW_1_62);
case 162000:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
......@@ -151,8 +153,9 @@ EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
default:
WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
case DP_LINK_BW_1_62:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
......
......@@ -220,7 +220,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
encoder = drm_encoder_find(dev, enc_resp->encoder_id);
encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id);
if (!encoder)
return -ENOENT;
......
......@@ -2266,7 +2266,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (modes[n] == NULL)
return best_score;
crtcs = kzalloc(fb_helper->connector_count *
crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
......
......@@ -381,7 +381,7 @@ int drm_mode_rmfb(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
fb = drm_framebuffer_lookup(dev, *id);
fb = drm_framebuffer_lookup(dev, file_priv, *id);
if (!fb)
return -ENOENT;
......@@ -450,7 +450,7 @@ int drm_mode_getfb(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
fb = drm_framebuffer_lookup(dev, r->fb_id);
fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
......@@ -515,7 +515,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
fb = drm_framebuffer_lookup(dev, r->fb_id);
fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
......@@ -688,12 +688,13 @@ EXPORT_SYMBOL(drm_framebuffer_init);
* again, using drm_framebuffer_put().
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB);
obj = __drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_FB);
if (obj)
fb = obj_to_fb(obj);
return fb;
......
......@@ -27,19 +27,24 @@
* DOC: overview
*
* This library provides helpers for drivers that don't subclass
* &drm_framebuffer and and use &drm_gem_object for their backing storage.
* &drm_framebuffer and use &drm_gem_object for their backing storage.
*
* Drivers without additional needs to validate framebuffers can simply use
* drm_gem_fb_create() and everything is wired up automatically. But all
* parts can be used individually.
* drm_gem_fb_create() and everything is wired up automatically. Other drivers
* can use all parts independently.
*/
/**
* drm_gem_fb_get_obj() - Get GEM object for framebuffer
* @fb: The framebuffer
* @plane: Which plane
* drm_gem_fb_get_obj() - Get GEM object backing the framebuffer
* @fb: Framebuffer
* @plane: Plane index
*
* Returns the GEM object for given framebuffer.
* No additional reference is taken beyond the one that the &drm_frambuffer
* already holds.
*
* Returns:
* Pointer to &drm_gem_object for the given framebuffer and plane index or NULL
* if it does not exist.
*/
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane)
......@@ -82,7 +87,7 @@ drm_gem_fb_alloc(struct drm_device *dev,
/**
* drm_gem_fb_destroy - Free GEM backed framebuffer
* @fb: DRM framebuffer
* @fb: Framebuffer
*
* Frees a GEM backed framebuffer with its backing buffer(s) and the structure
* itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
......@@ -102,12 +107,13 @@ EXPORT_SYMBOL(drm_gem_fb_destroy);
/**
* drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
* @fb: DRM framebuffer
* @file: drm file
* @handle: handle created
* @fb: Framebuffer
* @file: DRM file to register the handle for
* @handle: Pointer to return the created handle
*
* This function creates a handle for the GEM object backing the framebuffer.
* Drivers can use this as their &drm_framebuffer_funcs->create_handle
* callback.
* callback. The GETFB IOCTL calls into this callback.
*
* Returns:
* 0 on success or a negative error code on failure.
......@@ -120,18 +126,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
EXPORT_SYMBOL(drm_gem_fb_create_handle);
/**
* drm_gem_fb_create_with_funcs() - helper function for the
* drm_gem_fb_create_with_funcs() - Helper function for the
* &drm_mode_config_funcs.fb_create
* callback
* @dev: DRM device
* @file: drm file for the ioctl call
* @mode_cmd: metadata from the userspace fb creation request
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
* This can be used to set &drm_framebuffer_funcs for drivers that need the
* &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
* need to change &drm_framebuffer_funcs.
* The function does buffer size validation.
*
* Returns:
* Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
......@@ -192,15 +201,26 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
};
/**
* drm_gem_fb_create() - &drm_mode_config_funcs.fb_create callback function
* drm_gem_fb_create() - Helper function for the
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: drm file for the ioctl call
* @mode_cmd: metadata from the userspace fb creation request
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
* &drm_mode_fb_cmd2. This description includes handles for the buffer(s)
* backing the framebuffer.
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. The function does buffer size
* validation. Use drm_gem_fb_create_with_funcs() if you need to set
* &drm_framebuffer_funcs.dirty.
*
* Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
* The ADDFB2 IOCTL calls into this callback.
*
* Returns:
* Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
......@@ -212,15 +232,15 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
/**
* drm_gem_fb_prepare_fb() - Prepare gem framebuffer
* @plane: Which plane
* @state: Plane state attach fence to
* drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
* @plane: Plane
* @state: Plane state the fence will be attached to
*
* This can be used as the &drm_plane_helper_funcs.prepare_fb hook.
*
* This function checks if the plane FB has an dma-buf attached, extracts
* the exclusive fence and attaches it to plane state for the atomic helper
* to wait on.
* This function prepares a GEM backed framebuffer for scanout by checking if
* the plane framebuffer has a DMA-BUF attached. If it does, it extracts the
* exclusive fence and attaches it to the plane state for the atomic helper to
* wait on. This function can be used as the &drm_plane_helper_funcs.prepare_fb
* callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* gem based framebuffer drivers which have their buffers always pinned in
......@@ -246,17 +266,19 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
/**
* drm_gem_fbdev_fb_create - Create a drm_framebuffer for fbdev emulation
* drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
* emulation
* @dev: DRM device
* @sizes: fbdev size description
* @pitch_align: optional pitch alignment
* @pitch_align: Optional pitch alignment
* @obj: GEM object backing the framebuffer
* @funcs: vtable to be used for the new framebuffer object
*
* This function creates a framebuffer for use with fbdev emulation.
* This function creates a framebuffer from a &drm_fb_helper_surface_size
* description for use in the &drm_fb_helper_funcs.fb_probe callback.
*
* Returns:
* Pointer to a drm_framebuffer on success or an error pointer on failure.
* Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fbdev_fb_create(struct drm_device *dev,
......
......@@ -55,7 +55,6 @@ int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_vblank.c */
extern unsigned int drm_timestamp_monotonic;
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
void drm_vblank_cleanup(struct drm_device *dev);
......
......@@ -235,7 +235,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
/* Only some caps make sense with UMS/render-only drivers. */
switch (req->capability) {
case DRM_CAP_TIMESTAMP_MONOTONIC:
req->value = drm_timestamp_monotonic;
req->value = 1;
return 0;
case DRM_CAP_PRIME:
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
......
......@@ -105,6 +105,7 @@ void drm_mode_object_unregister(struct drm_device *dev,
}
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
......@@ -127,7 +128,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
* @file_priv: drm file
* @id: id of the mode object
* @type: type of the mode object
*
......@@ -136,11 +137,12 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
* by callind drm_mode_object_put().
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
obj = __drm_mode_object_find(dev, id, type);
obj = __drm_mode_object_find(dev, file_priv, id, type);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
......@@ -359,7 +361,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -ENOENT;
goto out;
......@@ -481,7 +483,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
arg_obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!arg_obj)
return -ENOENT;
......
......@@ -513,7 +513,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
plane = drm_plane_find(dev, plane_resp->plane_id);
plane = drm_plane_find(dev, file_priv, plane_resp->plane_id);
if (!plane)
return -ENOENT;
......@@ -703,7 +703,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
plane = drm_plane_find(dev, plane_req->plane_id);
plane = drm_plane_find(dev, file_priv, plane_req->plane_id);
if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
......@@ -711,14 +711,14 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
if (plane_req->fb_id) {
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
return -ENOENT;
}
crtc = drm_crtc_find(dev, plane_req->crtc_id);
crtc = drm_crtc_find(dev, file_priv, plane_req->crtc_id);
if (!crtc) {
drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
......@@ -829,7 +829,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
crtc = drm_crtc_find(dev, req->crtc_id);
crtc = drm_crtc_find(dev, file_priv, req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
......@@ -944,7 +944,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
return -EINVAL;
crtc = drm_crtc_find(dev, page_flip->crtc_id);
crtc = drm_crtc_find(dev, file_priv, page_flip->crtc_id);
if (!crtc)
return -ENOENT;
......@@ -1005,7 +1005,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
}
fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
fb = drm_framebuffer_lookup(dev, file_priv, page_flip->fb_id);
if (!fb) {
ret = -ENOENT;
goto out;
......
......@@ -354,7 +354,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
connector_list = kzalloc(num_connectors * sizeof(*connector_list),
connector_list = kcalloc(num_connectors, sizeof(*connector_list),
GFP_KERNEL);
if (!connector_list)
return -ENOMEM;
......
......@@ -99,7 +99,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
/* Step 2: Validate against encoders and crtcs */
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
struct drm_encoder *encoder = drm_encoder_find(dev, ids[i]);
struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
struct drm_crtc *crtc;
if (!encoder)
......
......@@ -450,7 +450,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
property = drm_property_find(dev, out_resp->prop_id);
property = drm_property_find(dev, file_priv, out_resp->prop_id);
if (!property)
return -ENOENT;
......@@ -634,7 +634,7 @@ struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
struct drm_mode_object *obj;
struct drm_property_blob *blob = NULL;
obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
obj = __drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_BLOB);
if (obj)
blob = obj_to_blob(obj);
return blob;
......@@ -897,7 +897,7 @@ bool drm_property_change_valid_get(struct drm_property *property,
if (value == 0)
return true;
*ref = __drm_mode_object_find(property->dev, value,
*ref = __drm_mode_object_find(property->dev, NULL, value,
property->values[0]);
return *ref != NULL;
}
......
......@@ -845,7 +845,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
}
static int drm_syncobj_array_find(struct drm_file *file_private,
void *user_handles, uint32_t count_handles,
void __user *user_handles,
uint32_t count_handles,
struct drm_syncobj ***syncobjs_out)
{
uint32_t i, *handles;
......
......@@ -78,28 +78,20 @@
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, bool in_vblank_irq);
ktime_t *tvblank, bool in_vblank_irq);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
/*
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
unsigned int drm_timestamp_monotonic = 1;
static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc,
struct timeval *t_vblank, u32 last)
ktime_t t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
......@@ -108,7 +100,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
vblank->last = last;
write_seqlock(&vblank->seqlock);
vblank->time = *t_vblank;
vblank->time = t_vblank;
vblank->count += vblank_count_inc;
write_sequnlock(&vblank->seqlock);
}
......@@ -151,7 +143,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
{
u32 cur_vblank;
bool rc;
struct timeval t_vblank;
ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
spin_lock(&dev->vblank_time_lock);
......@@ -171,13 +163,13 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
* interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
*/
if (!rc)
t_vblank = (struct timeval) {0, 0};
t_vblank = 0;
/*
* +1 to make sure user will never see the same
* vblank counter value before and after a modeset
*/
store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
store_vblank(dev, pipe, 1, t_vblank, cur_vblank);
spin_unlock(&dev->vblank_time_lock);
}
......@@ -200,7 +192,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank, diff;
bool rc;
struct timeval t_vblank;
ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
int framedur_ns = vblank->framedur_ns;
......@@ -225,11 +217,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
/* trust the hw counter when it's around */
diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
} else if (rc && framedur_ns) {
const struct timeval *t_old;
u64 diff_ns;
t_old = &vblank->time;
diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
/*
* Figure out how many vblanks we've missed based
......@@ -278,9 +266,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* for now, to mark the vblanktimestamp as invalid.
*/
if (!rc && !in_vblank_irq)
t_vblank = (struct timeval) {0, 0};
t_vblank = 0;
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
......@@ -556,7 +544,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* @pipe: index of CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to struct timeval which should receive the timestamp
* @vblank_time: Pointer to time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
......@@ -584,10 +572,10 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe,
int *max_error,
struct timeval *vblank_time,
ktime_t *vblank_time,
bool in_vblank_irq)
{
struct timeval tv_etime;
struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
struct drm_crtc *crtc;
......@@ -676,41 +664,31 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
mode->crtc_clock);
if (!drm_timestamp_monotonic)
etime = ktime_mono_to_real(etime);
/* save this only for debugging purposes */
tv_etime = ktime_to_timeval(etime);
ts_etime = ktime_to_timespec64(etime);
ts_vblank_time = ktime_to_timespec64(*vblank_time);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
*vblank_time = etime;
DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
pipe, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
duration_ns/1000, i);
(u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
(u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
duration_ns / 1000, i);
return true;
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
static struct timeval get_drm_timestamp(void)
{
ktime_t now;
now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
return ktime_to_timeval(now);
}
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval
* @dev: DRM device
* @pipe: index of CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
* @tvblank: Pointer to target time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
......@@ -728,7 +706,7 @@ static struct timeval get_drm_timestamp(void)
*/
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, bool in_vblank_irq)
ktime_t *tvblank, bool in_vblank_irq)
{
bool ret = false;
......@@ -744,7 +722,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* Return current monotonic/gettimeofday timestamp as best estimate.
*/
if (!ret)
*tvblank = get_drm_timestamp();
*tvblank = ktime_get();
return ret;
}
......@@ -769,14 +747,14 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_count);
static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
ktime_t *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 vblank_count;
unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs)) {
*vblanktime = (struct timeval) { 0 };
*vblanktime = 0;
return 0;
}
......@@ -793,7 +771,7 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value
* @crtc: which counter to retrieve
* @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
* @vblanktime: Pointer to time to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
......@@ -801,7 +779,7 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* of the vblank interval that corresponds to the current vblank counter value.
*/
u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime)
ktime_t *vblanktime)
{
return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
vblanktime);
......@@ -810,11 +788,18 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
unsigned long seq, ktime_t now)
{
struct timespec64 tv = ktime_to_timespec64(now);
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
/*
* e->event is a user space structure, with hardcoded unsigned
* 32-bit seconds/microseconds. This is safe as we always use
* monotonic timestamps since linux-4.15
*/
e->event.tv_sec = tv.tv_sec;
e->event.tv_usec = tv.tv_nsec / 1000;
trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe,
e->event.sequence);
......@@ -869,7 +854,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
assert_spin_locked(&dev->event_lock);
e->pipe = pipe;
e->event.sequence = drm_vblank_count(dev, pipe);
e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
e->event.crtc_id = crtc->base.id;
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
......@@ -891,18 +876,18 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
unsigned int seq, pipe = drm_crtc_index(crtc);
struct timeval now;
ktime_t now;
if (dev->num_crtcs > 0) {
seq = drm_vblank_count_and_time(dev, pipe, &now);
} else {
seq = 0;
now = get_drm_timestamp();
now = ktime_get();
}
e->pipe = pipe;
e->event.crtc_id = crtc->base.id;
send_vblank_event(dev, e, seq, &now);
send_vblank_event(dev, e, seq, now);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
......@@ -1100,7 +1085,8 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
ktime_t now;
unsigned long irqflags;
unsigned int seq;
......@@ -1141,7 +1127,7 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
e->event.sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
send_vblank_event(dev, e, seq, now);
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
......@@ -1321,7 +1307,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e;
struct timeval now;
ktime_t now;
unsigned long flags;
unsigned int seq;
int ret;
......@@ -1367,7 +1353,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
e->event.sequence = vblwait->request.sequence;
if (vblank_passed(seq, vblwait->request.sequence)) {
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
send_vblank_event(dev, e, seq, now);
vblwait->reply.sequence = seq;
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
......@@ -1398,6 +1384,23 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
_DRM_VBLANK_NEXTONMISS));
}
static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
struct drm_wait_vblank_reply *reply)
{
ktime_t now;
struct timespec64 ts;
/*
* drm_wait_vblank_reply is a UAPI structure that uses 'long'
* to store the seconds. This is safe as we always use monotonic
* timestamps since linux-4.15.
*/
reply->sequence = drm_vblank_count_and_time(dev, pipe, &now);
ts = ktime_to_timespec64(now);
reply->tval_sec = (u32)ts.tv_sec;
reply->tval_usec = ts.tv_nsec / 1000;
}
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
......@@ -1439,12 +1442,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (dev->vblank_disable_immediate &&
drm_wait_vblank_is_query(vblwait) &&
READ_ONCE(vblank->enabled)) {
struct timeval now;
vblwait->reply.sequence =
drm_vblank_count_and_time(dev, pipe, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
return 0;
}
......@@ -1487,11 +1485,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
}
if (ret != -EINTR) {
struct timeval now;
vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
DRM_DEBUG("crtc %d returning %u to client\n",
pipe, vblwait->reply.sequence);
......@@ -1507,7 +1501,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
ktime_t now;
unsigned int seq;
assert_spin_locked(&dev->event_lock);
......@@ -1525,7 +1519,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
send_vblank_event(dev, e, seq, now);
}
trace_drm_vblank_event(pipe, seq);
......
......@@ -7,8 +7,6 @@ config DRM_ETNAVIV
select SHMEM
select SYNC_FILE
select TMPFS
select IOMMU_API
select IOMMU_SUPPORT
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
select DMA_CMA if HAVE_DMA_CONTIGUOUS
......
......@@ -10,6 +10,7 @@ etnaviv-y := \
etnaviv_gpu.o \
etnaviv_iommu_v2.o \
etnaviv_iommu.o \
etnaviv_mmu.o
etnaviv_mmu.o \
etnaviv_perfmon.o
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
......@@ -250,6 +250,42 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
}
}
/* Append a 'sync point' to the ring buffer. */
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 dwords, target;
/*
* We need at most 3 dwords in the return target:
* 1 event + 1 end + 1 wait + 1 link.
*/
dwords = 4;
target = etnaviv_buffer_reserve(gpu, buffer, dwords);
/* Signal sync point event */
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
/* Stop the FE to 'pause' the GPU */
CMD_END(buffer);
/* Append waitlink */
CMD_WAIT(buffer);
CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
buffer->user_size - 4);
/*
* Kick off the 'sync point' command by replacing the previous
* WAIT with a link to the address in the ring buffer.
*/
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(dwords),
target);
}
/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf)
......
......@@ -19,6 +19,7 @@
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_256K
#define SUBALLOC_GRANULE SZ_4K
......@@ -87,9 +88,10 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
size_t nr_bos)
size_t nr_bos, size_t nr_pmrs)
{
struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_perfmon_request *pmrs;
size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
sizeof(*cmdbuf));
int granule_offs, order, ret;
......@@ -98,6 +100,12 @@ etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
if (!cmdbuf)
return NULL;
sz = sizeof(*pmrs) * nr_pmrs;
pmrs = kzalloc(sz, GFP_KERNEL);
if (!pmrs)
goto out_free_cmdbuf;
cmdbuf->pmrs = pmrs;
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
......@@ -124,6 +132,10 @@ etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
return cmdbuf;
out_free_cmdbuf:
kfree(cmdbuf);
return NULL;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
......@@ -139,6 +151,7 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
kfree(cmdbuf->pmrs);
kfree(cmdbuf);
}
......
......@@ -21,6 +21,7 @@
struct etnaviv_gpu;
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_perfmon_request;
struct etnaviv_cmdbuf {
/* suballocator this cmdbuf is allocated from */
......@@ -38,6 +39,9 @@ struct etnaviv_cmdbuf {
u32 exec_state;
/* per GPU in-flight list */
struct list_head node;
/* perfmon requests */
unsigned int nr_pmrs;
struct etnaviv_perfmon_request *pmrs;
/* BOs attached to this command buffer */
unsigned int nr_bos;
struct etnaviv_vram_mapping *bo_map[0];
......@@ -49,7 +53,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
size_t nr_bos);
size_t nr_bos, size_t nr_pmrs);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
......
......@@ -23,6 +23,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
static bool reglog;
......@@ -451,6 +452,40 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
return ret;
}
static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_pm_domain *args = data;
struct etnaviv_gpu *gpu;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
return etnaviv_pm_query_dom(gpu, args);
}
static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_pm_signal *args = data;
struct etnaviv_gpu *gpu;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
gpu = priv->gpu[args->pipe];
if (!gpu)
return -ENXIO;
return etnaviv_pm_query_sig(gpu, args);
}
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
#define ETNA_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
......@@ -463,6 +498,8 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
......@@ -513,7 +550,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
.minor = 1,
.minor = 2,
};
/*
......
......@@ -26,7 +26,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/sizes.h>
......@@ -92,15 +91,12 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
u32 size, u32 flags);
struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
u32 size, u32 flags);
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
......
......@@ -704,25 +704,6 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
u32 size, u32 flags)
{
struct drm_gem_object *obj;
int ret;
obj = __etnaviv_gem_new(dev, size, flags);
if (IS_ERR(obj))
return obj;
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
return obj;
}
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
struct etnaviv_gem_object **res)
......
......@@ -21,6 +21,7 @@
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_perfmon.h"
/*
* Cmdstream submission:
......@@ -283,6 +284,54 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
return 0;
}
static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
struct etnaviv_cmdbuf *cmdbuf,
const struct drm_etnaviv_gem_submit_pmr *pmrs,
u32 nr_pms)
{
u32 i;
for (i = 0; i < nr_pms; i++) {
const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
struct etnaviv_gem_submit_bo *bo;
int ret;
ret = submit_bo(submit, r->read_idx, &bo);
if (ret)
return ret;
/* at offset 0 a sequence number gets stored used for userspace sync */
if (r->read_offset == 0) {
DRM_ERROR("perfmon request: offset is 0");
return -EINVAL;
}
if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
DRM_ERROR("perfmon request: offset %u outside object", i);
return -EINVAL;
}
if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
DRM_ERROR("perfmon request: flags are not valid");
return -EINVAL;
}
if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
DRM_ERROR("perfmon request: domain or signal not valid");
return -EINVAL;
}
cmdbuf->pmrs[i].flags = r->flags;
cmdbuf->pmrs[i].domain = r->domain;
cmdbuf->pmrs[i].signal = r->signal;
cmdbuf->pmrs[i].sequence = r->sequence;
cmdbuf->pmrs[i].offset = r->read_offset;
cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
}
return 0;
}
static void submit_cleanup(struct etnaviv_gem_submit *submit)
{
unsigned i;
......@@ -306,6 +355,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_submit *args = data;
struct drm_etnaviv_gem_submit_reloc *relocs;
struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf;
......@@ -347,11 +397,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
ALIGN(args->stream_size, 8) + 8,
args->nr_bos);
if (!bos || !relocs || !stream || !cmdbuf) {
args->nr_bos, args->nr_pmrs);
if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
ret = -ENOMEM;
goto err_submit_cmds;
}
......@@ -373,6 +424,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_cmds;
}
ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
args->nr_pmrs * sizeof(*pmrs));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
cmdbuf->nr_pmrs = args->nr_pmrs;
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
if (ret) {
......@@ -441,6 +500,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
if (ret)
goto out;
memcpy(cmdbuf->vaddr, stream, args->stream_size);
cmdbuf->user_size = ALIGN(args->stream_size, 8);
......@@ -496,6 +559,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
kvfree(bos);
if (relocs)
kvfree(relocs);
if (pmrs)
kvfree(pmrs);
return ret;
}
......@@ -25,6 +25,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
......@@ -420,9 +421,10 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
gpu->base_rate_shader >> gpu->freq_scale);
} else {
unsigned int fscale = 1 << (6 - gpu->freq_scale);
u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, clock);
}
}
......@@ -433,24 +435,14 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
unsigned long timeout;
bool failed = true;
/* TODO
*
* - clock gating
* - puls eater
* - what about VG?
*/
/* We hope that the GPU resets in under one second */
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
/* enable clock */
etnaviv_gpu_update_clock(gpu);
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
/* Wait for stable clock. Vivante's code waited for 1ms */
usleep_range(1000, 10000);
unsigned int fscale = 1 << (6 - gpu->freq_scale);
control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, control);
/* isolate the GPU. */
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
......@@ -461,7 +453,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
/* wait for reset. */
msleep(1);
usleep_range(10, 20);
/* reset soft reset bit. */
control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
......@@ -490,6 +482,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
continue;
}
/* disable debug registers, as they are not normally needed */
control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
failed = false;
break;
}
......@@ -721,7 +717,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/* Create buffer: */
gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
if (!gpu->buffer) {
ret = -ENOMEM;
dev_err(gpu->dev, "could not create command buffer\n");
......@@ -739,10 +735,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Setup event management */
spin_lock_init(&gpu->event_spinlock);
init_completion(&gpu->event_free);
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
gpu->event[i].used = false;
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
complete(&gpu->event_free);
}
/* Now program the hardware */
mutex_lock(&gpu->lock);
......@@ -926,7 +921,7 @@ static void recover_worker(struct work_struct *work)
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
recover_work);
unsigned long flags;
unsigned int i;
unsigned int i = 0;
dev_err(gpu->dev, "hangcheck recover!\n");
......@@ -945,14 +940,12 @@ static void recover_worker(struct work_struct *work)
/* complete all events, the GPU won't do it after the reset */
spin_lock_irqsave(&gpu->event_spinlock, flags);
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
if (!gpu->event[i].used)
continue;
for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) {
dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
gpu->event[i].used = false;
complete(&gpu->event_free);
}
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
gpu->completed_fence = gpu->active_fence;
......@@ -1140,30 +1133,45 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
* event management:
*/
static unsigned int event_alloc(struct etnaviv_gpu *gpu)
static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
unsigned int *events)
{
unsigned long ret, flags;
unsigned int i, event = ~0U;
unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
unsigned i, acquired = 0;
ret = wait_for_completion_timeout(&gpu->event_free,
msecs_to_jiffies(10 * 10000));
if (!ret)
dev_err(gpu->dev, "wait_for_completion_timeout failed");
for (i = 0; i < nr_events; i++) {
unsigned long ret;
spin_lock_irqsave(&gpu->event_spinlock, flags);
ret = wait_for_completion_timeout(&gpu->event_free, timeout);
/* find first free event */
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
if (gpu->event[i].used == false) {
gpu->event[i].used = true;
event = i;
break;
if (!ret) {
dev_err(gpu->dev, "wait_for_completion_timeout failed");
goto out;
}
acquired++;
timeout = ret;
}
spin_lock_irqsave(&gpu->event_spinlock, flags);
for (i = 0; i < nr_events; i++) {
int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
events[i] = event;
memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
set_bit(event, gpu->event_bitmap);
}
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
return event;
return 0;
out:
for (i = 0; i < acquired; i++)
complete(&gpu->event_free);
return -EBUSY;
}
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
......@@ -1172,12 +1180,12 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
spin_lock_irqsave(&gpu->event_spinlock, flags);
if (gpu->event[event].used == false) {
if (!test_bit(event, gpu->event_bitmap)) {
dev_warn(gpu->dev, "event %u is already marked as free",
event);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
} else {
gpu->event[event].used = false;
clear_bit(event, gpu->event_bitmap);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
complete(&gpu->event_free);
......@@ -1311,12 +1319,71 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
pm_runtime_put_autosuspend(gpu->dev);
}
static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
struct etnaviv_event *event, unsigned int flags)
{
const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
unsigned int i;
for (i = 0; i < cmdbuf->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
if (pmr->flags == flags)
etnaviv_perfmon_process(gpu, pmr);
}
}
static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
u32 val;
/* disable clock gating */
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
/* enable debug register */
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
}
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
unsigned int i;
u32 val;
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
for (i = 0; i < cmdbuf->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
/* disable debug register */
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
/* enable clock gating */
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
}
/* add bo's to gpu's ring, and kick gpu: */
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
struct dma_fence *fence;
unsigned int event, i;
unsigned int i, nr_events = 1, event[3];
int ret;
ret = etnaviv_gpu_pm_get_sync(gpu);
......@@ -1332,10 +1399,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
*
*/
event = event_alloc(gpu);
if (unlikely(event == ~0U)) {
DRM_ERROR("no free event\n");
ret = -EBUSY;
/*
* if there are performance monitor requests we need to have
* - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
* requests.
* - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
* and update the sequence number for userspace.
*/
if (cmdbuf->nr_pmrs)
nr_events = 3;
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
goto out_pm_put;
}
......@@ -1343,12 +1419,14 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
event_free(gpu, event);
for (i = 0; i < nr_events; i++)
event_free(gpu, event[i]);
ret = -ENOMEM;
goto out_unlock;
}
gpu->event[event].fence = fence;
gpu->event[event[0]].fence = fence;
submit->fence = dma_fence_get(fence);
gpu->active_fence = submit->fence->seqno;
......@@ -1358,7 +1436,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
gpu->lastctx = cmdbuf->ctx;
}
etnaviv_buffer_queue(gpu, event, cmdbuf);
if (cmdbuf->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
gpu->event[event[1]].cmdbuf = cmdbuf;
etnaviv_sync_point_queue(gpu, event[1]);
}
etnaviv_buffer_queue(gpu, event[0], cmdbuf);
if (cmdbuf->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
gpu->event[event[2]].cmdbuf = cmdbuf;
etnaviv_sync_point_queue(gpu, event[2]);
}
cmdbuf->fence = fence;
list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
......@@ -1394,6 +1484,24 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
return ret;
}
static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
event->sync_point(gpu, event);
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
static void sync_point_worker(struct work_struct *work)
{
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
sync_point_work);
etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
event_free(gpu, gpu->sync_point_event);
}
/*
* Init/Cleanup:
*/
......@@ -1440,7 +1548,15 @@ static irqreturn_t irq_handler(int irq, void *data)
dev_dbg(gpu->dev, "event %u\n", event);
if (gpu->event[event].sync_point) {
gpu->sync_point_event = event;
etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
}
fence = gpu->event[event].fence;
if (!fence)
continue;
gpu->event[event].fence = NULL;
dma_fence_signal(fence);
......@@ -1645,6 +1761,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
INIT_LIST_HEAD(&gpu->active_cmd_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
......
......@@ -88,13 +88,17 @@ struct etnaviv_chip_identity {
};
struct etnaviv_event {
bool used;
struct dma_fence *fence;
struct etnaviv_cmdbuf *cmdbuf;
void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
};
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_cmdbuf;
#define ETNA_NR_EVENTS 30
struct etnaviv_gpu {
struct drm_device *drm;
struct thermal_cooling_device *cooling;
......@@ -112,7 +116,8 @@ struct etnaviv_gpu {
u32 memory_base;
/* event management: */
struct etnaviv_event event[30];
DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
struct etnaviv_event event[ETNA_NR_EVENTS];
struct completion event_free;
spinlock_t event_spinlock;
......@@ -133,6 +138,10 @@ struct etnaviv_gpu {
/* worker for handling active-list retiring: */
struct work_struct retire_work;
/* worker for handling 'sync' points: */
struct work_struct sync_point_work;
int sync_point_event;
void __iomem *mmio;
int irq;
......
......@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
......@@ -31,174 +30,115 @@
#define GPU_MEM_START 0x80000000
struct etnaviv_iommu_domain_pgtable {
u32 *pgtable;
dma_addr_t paddr;
struct etnaviv_iommuv1_domain {
struct etnaviv_iommu_domain base;
u32 *pgtable_cpu;
dma_addr_t pgtable_dma;
};
struct etnaviv_iommu_domain {
struct iommu_domain domain;
struct device *dev;
void *bad_page_cpu;
dma_addr_t bad_page_dma;
struct etnaviv_iommu_domain_pgtable pgtable;
spinlock_t map_lock;
};
static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
{
return container_of(domain, struct etnaviv_iommu_domain, domain);
}
static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
size_t size)
{
pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
if (!pgtable->pgtable)
return -ENOMEM;
return 0;
}
static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
size_t size)
static struct etnaviv_iommuv1_domain *
to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
}
static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
unsigned long iova)
{
/* calcuate index into page table */
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
phys_addr_t paddr;
paddr = pgtable->pgtable[index];
return paddr;
return container_of(domain, struct etnaviv_iommuv1_domain, base);
}
static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
unsigned long iova, phys_addr_t paddr)
{
/* calcuate index into page table */
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
pgtable->pgtable[index] = paddr;
}
static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
{
u32 *p;
int ret, i;
etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
SZ_4K,
&etnaviv_domain->bad_page_dma,
GFP_KERNEL);
if (!etnaviv_domain->bad_page_cpu)
int i;
etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->base.bad_page_dma,
GFP_KERNEL);
if (!etnaviv_domain->base.bad_page_cpu)
return -ENOMEM;
p = etnaviv_domain->bad_page_cpu;
p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
if (ret < 0) {
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->bad_page_cpu,
etnaviv_domain->bad_page_dma);
return ret;
etnaviv_domain->pgtable_cpu =
dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
&etnaviv_domain->pgtable_dma,
GFP_KERNEL);
if (!etnaviv_domain->pgtable_cpu) {
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
return -ENOMEM;
}
for (i = 0; i < PT_ENTRIES; i++)
etnaviv_domain->pgtable.pgtable[i] =
etnaviv_domain->bad_page_dma;
spin_lock_init(&etnaviv_domain->map_lock);
etnaviv_domain->pgtable_cpu[i] =
etnaviv_domain->base.bad_page_dma;
return 0;
}
static void etnaviv_domain_free(struct iommu_domain *domain)
static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
{
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(domain);
pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
etnaviv_domain->pgtable_cpu,
etnaviv_domain->pgtable_dma);
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->bad_page_cpu,
etnaviv_domain->bad_page_dma);
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
kfree(etnaviv_domain);
}
static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
spin_lock(&etnaviv_domain->map_lock);
pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
spin_unlock(&etnaviv_domain->map_lock);
etnaviv_domain->pgtable_cpu[index] = paddr;
return 0;
}
static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
unsigned long iova, size_t size)
{
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(domain);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
spin_lock(&etnaviv_domain->map_lock);
pgtable_write(&etnaviv_domain->pgtable, iova,
etnaviv_domain->bad_page_dma);
spin_unlock(&etnaviv_domain->map_lock);
etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
return SZ_4K;
}
static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
return pgtable_read(&etnaviv_domain->pgtable, iova);
}
static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
{
return PT_SIZE;
}
static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(domain);
memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
}
static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
.ops = {
.domain_free = etnaviv_domain_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
.iova_to_phys = etnaviv_iommu_iova_to_phys,
.pgsize_bitmap = SZ_4K,
},
.dump_size = etnaviv_iommuv1_dump_size,
.dump = etnaviv_iommuv1_dump,
};
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommu_domain *etnaviv_domain =
struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
u32 pgtable;
......@@ -210,7 +150,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
/* set page table address in MC */
pgtable = (u32)etnaviv_domain->pgtable.paddr;
pgtable = (u32)etnaviv_domain->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
......@@ -219,28 +159,37 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
.free = etnaviv_iommuv1_domain_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
.dump_size = etnaviv_iommuv1_dump_size,
.dump = etnaviv_iommuv1_dump,
};
struct etnaviv_iommu_domain *
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommu_domain *etnaviv_domain;
struct etnaviv_iommuv1_domain *etnaviv_domain;
struct etnaviv_iommu_domain *domain;
int ret;
etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
if (!etnaviv_domain)
return NULL;
etnaviv_domain->dev = gpu->dev;
domain = &etnaviv_domain->base;
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
domain->dev = gpu->dev;
domain->base = GPU_MEM_START;
domain->size = PT_ENTRIES * SZ_4K;
domain->ops = &etnaviv_iommuv1_ops;
ret = __etnaviv_iommu_init(etnaviv_domain);
if (ret)
goto out_free;
return &etnaviv_domain->domain;
return &etnaviv_domain->base;
out_free:
kfree(etnaviv_domain);
......
......@@ -18,11 +18,14 @@
#define __ETNAVIV_IOMMU_H__
struct etnaviv_gpu;
struct etnaviv_iommu_domain;
struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
struct etnaviv_iommu_domain *
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
struct etnaviv_iommu_domain *
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_IOMMU_H__ */
......@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
......@@ -40,10 +39,7 @@
#define MMUv2_MAX_STLB_ENTRIES 1024
struct etnaviv_iommuv2_domain {
struct iommu_domain domain;
struct device *dev;
void *bad_page_cpu;
dma_addr_t bad_page_dma;
struct etnaviv_iommu_domain base;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
......@@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain {
dma_addr_t stlb_dma[1024];
};
static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
static struct etnaviv_iommuv2_domain *
to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
return container_of(domain, struct etnaviv_iommuv2_domain, domain);
return container_of(domain, struct etnaviv_iommuv2_domain, base);
}
static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
......@@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
if (size != SZ_4K)
return -EINVAL;
if (prot & IOMMU_WRITE)
if (prot & ETNAVIV_PROT_WRITE)
entry |= MMUv2_PTE_WRITEABLE;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
......@@ -79,8 +77,8 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
return 0;
}
static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
unsigned long iova, size_t size)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
......@@ -97,38 +95,26 @@ static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
return SZ_4K;
}
static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int mtlb_entry, stlb_entry;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
}
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
u32 *p;
int ret, i, j;
/* allocate scratch page */
etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
SZ_4K,
&etnaviv_domain->bad_page_dma,
GFP_KERNEL);
if (!etnaviv_domain->bad_page_cpu) {
etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->base.bad_page_dma,
GFP_KERNEL);
if (!etnaviv_domain->base.bad_page_cpu) {
ret = -ENOMEM;
goto fail_mem;
}
p = etnaviv_domain->bad_page_cpu;
p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->mtlb_dma,
GFP_KERNEL);
......@@ -140,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
/* pre-populate STLB pages (may want to switch to on-demand later) */
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
etnaviv_domain->stlb_cpu[i] =
dma_alloc_coherent(etnaviv_domain->dev,
dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->stlb_dma[i],
GFP_KERNEL);
......@@ -159,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
return 0;
fail_mem:
if (etnaviv_domain->bad_page_cpu)
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->bad_page_cpu,
etnaviv_domain->bad_page_dma);
if (etnaviv_domain->base.bad_page_cpu)
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
if (etnaviv_domain->mtlb_cpu)
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
......@@ -179,23 +165,23 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
return ret;
}
static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int i;
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->bad_page_cpu,
etnaviv_domain->bad_page_dma);
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
......@@ -203,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
vfree(etnaviv_domain);
}
static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
......@@ -217,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
return dump_size;
}
static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
......@@ -230,18 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
.ops = {
.domain_free = etnaviv_iommuv2_domain_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
.iova_to_phys = etnaviv_iommuv2_iova_to_phys,
.pgsize_bitmap = SZ_4K,
},
.dump_size = etnaviv_iommuv2_dump_size,
.dump = etnaviv_iommuv2_dump,
};
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
......@@ -254,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
(u32)etnaviv_domain->bad_page_dma);
(u32)etnaviv_domain->base.bad_page_dma);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
.free = etnaviv_iommuv2_domain_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
.dump_size = etnaviv_iommuv2_dump_size,
.dump = etnaviv_iommuv2_dump,
};
struct etnaviv_iommu_domain *
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain;
struct etnaviv_iommu_domain *domain;
int ret;
etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
if (!etnaviv_domain)
return NULL;
etnaviv_domain->dev = gpu->dev;
domain = &etnaviv_domain->base;
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
etnaviv_domain->domain.geometry.aperture_start = 0;
etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
domain->dev = gpu->dev;
domain->base = 0;
domain->size = (u64)SZ_1G * 4;
domain->ops = &etnaviv_iommuv2_ops;
ret = etnaviv_iommuv2_init(etnaviv_domain);
if (ret)
goto out_free;
return &etnaviv_domain->domain;
return &etnaviv_domain->base;
out_free:
vfree(etnaviv_domain);
......
......@@ -22,17 +22,64 @@
#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
unsigned long iova, size_t size)
{
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
return 0;
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
if (!IS_ALIGNED(iova | size, pgsize)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, size, pgsize);
return;
}
while (unmapped < size) {
unmapped_page = domain->ops->unmap(domain, iova, pgsize);
if (!unmapped_page)
break;
iova += unmapped_page;
unmapped += unmapped_page;
}
}
int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
struct iommu_domain *domain = iommu->domain;
unsigned long orig_iova = iova;
size_t pgsize = SZ_4K;
size_t orig_size = size;
int ret = 0;
if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
iova, &paddr, size, pgsize);
return -EINVAL;
}
while (size) {
ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
iova += pgsize;
paddr += pgsize;
size -= pgsize;
}
/* unroll mapping in case something went wrong */
if (ret)
etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
return ret;
}
static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
......@@ -47,7 +94,7 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
......@@ -62,27 +109,24 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
iommu_unmap(domain, da, bytes);
etnaviv_domain_unmap(domain, da, bytes);
da += bytes;
}
return ret;
}
int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
struct sg_table *sgt, unsigned len)
static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
struct sg_table *sgt, unsigned len)
{
struct iommu_domain *domain = iommu->domain;
struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
size_t unmapped;
unmapped = iommu_unmap(domain, da, bytes);
if (unmapped < bytes)
return unmapped;
etnaviv_domain_unmap(domain, da, bytes);
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
......@@ -90,8 +134,6 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
da += bytes;
}
return 0;
}
static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
......@@ -237,7 +279,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
IOMMU_READ | IOMMU_WRITE);
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
drm_mm_remove_node(node);
......@@ -271,7 +313,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
{
drm_mm_takedown(&mmu->mm);
iommu_domain_free(mmu->domain);
mmu->domain->ops->free(mmu->domain);
kfree(mmu);
}
......@@ -303,11 +345,7 @@ struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
mutex_init(&mmu->lock);
INIT_LIST_HEAD(&mmu->mappings);
drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
mmu->domain->geometry.aperture_end -
mmu->domain->geometry.aperture_start + 1);
iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
return mmu;
}
......@@ -338,8 +376,8 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
mutex_unlock(&mmu->lock);
return ret;
}
ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
IOMMU_READ);
ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
size, ETNAVIV_PROT_READ);
if (ret < 0) {
drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
......@@ -362,25 +400,17 @@ void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
if (mmu->version == ETNAVIV_IOMMU_V2) {
mutex_lock(&mmu->lock);
iommu_unmap(mmu->domain,iova, size);
etnaviv_domain_unmap(mmu->domain, iova, size);
drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
}
}
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
{
struct etnaviv_iommu_ops *ops;
ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
return ops->dump_size(iommu->domain);
return iommu->domain->ops->dump_size(iommu->domain);
}
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
{
struct etnaviv_iommu_ops *ops;
ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
ops->dump(iommu->domain, buf);
iommu->domain->ops->dump(iommu->domain, buf);
}
......@@ -17,7 +17,8 @@
#ifndef __ETNAVIV_MMU_H__
#define __ETNAVIV_MMU_H__
#include <linux/iommu.h>
#define ETNAVIV_PROT_READ (1 << 0)
#define ETNAVIV_PROT_WRITE (1 << 1)
enum etnaviv_iommu_version {
ETNAVIV_IOMMU_V1 = 0,
......@@ -26,16 +27,31 @@ enum etnaviv_iommu_version {
struct etnaviv_gpu;
struct etnaviv_vram_mapping;
struct etnaviv_iommu_domain;
struct etnaviv_iommu_ops {
struct iommu_ops ops;
size_t (*dump_size)(struct iommu_domain *);
void (*dump)(struct iommu_domain *, void *);
struct etnaviv_iommu_domain_ops {
void (*free)(struct etnaviv_iommu_domain *);
int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
size_t size);
size_t (*dump_size)(struct etnaviv_iommu_domain *);
void (*dump)(struct etnaviv_iommu_domain *, void *);
};
struct etnaviv_iommu_domain {
struct device *dev;
void *bad_page_cpu;
dma_addr_t bad_page_dma;
u64 base;
u64 size;
const struct etnaviv_iommu_domain_ops *ops;
};
struct etnaviv_iommu {
struct etnaviv_gpu *gpu;
struct iommu_domain *domain;
struct etnaviv_iommu_domain *domain;
enum etnaviv_iommu_version version;
......@@ -49,18 +65,11 @@ struct etnaviv_iommu {
struct etnaviv_gem_object;
int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
int cnt);
int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
struct sg_table *sgt, unsigned len, int prot);
int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
struct sg_table *sgt, unsigned len);
int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
struct drm_mm_node *vram_node, size_t size,
......@@ -73,6 +82,7 @@ size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_MMU_H__ */
此差异已折叠。
/*
* Copyright (C) 2017 Etnaviv Project
* Copyright (C) 2017 Zodiac Inflight Innovations
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ETNAVIV_PERFMON_H__
#define __ETNAVIV_PERFMON_H__
struct etnaviv_gpu;
struct drm_etnaviv_pm_domain;
struct drm_etnaviv_pm_signal;
struct etnaviv_perfmon_request
{
u32 flags;
u8 domain;
u8 signal;
u32 sequence;
/* bo to store a value */
u32 *bo_vma;
u32 offset;
};
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_domain *domain);
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_signal *signal);
int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
u32 exec_state);
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
const struct etnaviv_perfmon_request *pmr);
#endif /* __ETNAVIV_PERFMON_H__ */
......@@ -237,7 +237,7 @@ static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
if (!gct)
return -1;
return -ENOMEM;
gct_virtual = ioremap(addr + sizeof(vbt),
sizeof(*gct) * vbt.panel_count);
......
......@@ -37,6 +37,7 @@
#include "psb_drv.h"
#include "psb_intel_sdvo_regs.h"
#include "psb_intel_reg.h"
#include <linux/kernel.h>
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
......@@ -62,8 +63,6 @@ static const char *tv_format_names[] = {
"SECAM_60"
};
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
struct psb_intel_sdvo {
struct gma_encoder base;
......@@ -148,7 +147,7 @@ struct psb_intel_sdvo_connector {
int force_audio;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
u8 tv_format_supported[ARRAY_SIZE(tv_format_names)];
int format_supported_num;
struct drm_property *tv_format;
......@@ -1709,7 +1708,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == psb_intel_sdvo_connector->tv_format) {
if (val >= TV_FORMAT_NUM)
if (val >= ARRAY_SIZE(tv_format_names))
return -EINVAL;
if (psb_intel_sdvo->tv_format_index ==
......@@ -2269,7 +2268,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
return false;
psb_intel_sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
for (i = 0 ; i < ARRAY_SIZE(tv_format_names); i++)
if (format_map & (1 << i))
psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
......
......@@ -36,7 +36,7 @@ static int hibmc_connector_mode_valid(struct drm_connector *connector,
static struct drm_encoder *
hibmc_connector_best_encoder(struct drm_connector *connector)
{
return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
}
static const struct drm_connector_helper_funcs
......
......@@ -237,8 +237,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
}
remote = of_graph_get_remote_node(np, 0, 0);
if (IS_ERR(remote))
return PTR_ERR(remote);
if (!remote)
return -ENODEV;
drm_of_component_match_add(dev, &match, compare_of, remote);
of_node_put(remote);
......
......@@ -12,6 +12,7 @@ config DRM_I915
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
......
......@@ -139,7 +139,8 @@ i915-y += i915_perf.o \
i915_oa_bxt.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o
i915_oa_glk.o \
i915_oa_cflgt2.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
......
......@@ -101,7 +101,7 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
......@@ -110,13 +110,25 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
u64 first_gfn, first_mfn;
phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
u64 first_gfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
if (map) {
vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
MEMREMAP_WC);
if (!vgpu->gm.aperture_va)
return -ENOMEM;
} else {
memunmap(vgpu->gm.aperture_va);
vgpu->gm.aperture_va = NULL;
}
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
......@@ -124,14 +136,16 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
vgpu_aperture_sz(vgpu) >>
PAGE_SHIFT, map);
if (ret)
aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT,
map);
if (ret) {
memunmap(vgpu->gm.aperture_va);
vgpu->gm.aperture_va = NULL;
return ret;
}
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
......@@ -275,7 +289,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
......
......@@ -1576,11 +1576,11 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
static uint32_t find_bb_size(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s)
{
unsigned long gma = 0;
struct cmd_info *info;
uint32_t bb_size = 0;
int bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
......@@ -1637,6 +1637,8 @@ static int perform_bb_shadow(struct parser_exec_state *s)
/* get the size of the batch buffer */
bb_size = find_bb_size(s);
if (bb_size < 0)
return -EINVAL;
/* allocate shadow batch buffer */
entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
......@@ -2603,7 +2605,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
u32 *cs;
void *shadow_ring_buffer_va;
int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
......@@ -2616,34 +2619,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
if (IS_ERR(cs))
return PTR_ERR(cs);
if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
void *va = vgpu->reserve_ring_buffer_va[ring_id];
/* realloc the new ring buffer if needed */
vgpu->reserve_ring_buffer_va[ring_id] =
krealloc(va, workload->rb_len, GFP_KERNEL);
if (!vgpu->reserve_ring_buffer_va[ring_id]) {
gvt_vgpu_err("fail to alloc reserve ring buffer\n");
return -ENOMEM;
}
vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
}
shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = cs;
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_top, cs);
gma_head, gma_top, shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
cs += ret / sizeof(u32);
shadow_ring_buffer_va += ret;
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
cs += ret / sizeof(u32);
intel_ring_advance(workload->req, cs);
return 0;
}
......
此差异已折叠。
......@@ -1647,14 +1647,13 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
return 0;
atomic_inc(&mm->pincount);
if (!mm->shadowed) {
ret = shadow_mm(mm);
if (ret)
return ret;
}
atomic_inc(&mm->pincount);
list_del_init(&mm->lru_list);
list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
return 0;
......@@ -1972,7 +1971,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
*/
se.val64 |= _PAGE_PRESENT | _PAGE_RW;
if (type == GTT_TYPE_PPGTT_PDE_PT)
se.val64 |= PPAT_CACHED_INDEX;
se.val64 |= PPAT_CACHED;
for (i = 0; i < page_entry_num; i++)
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
......
......@@ -111,7 +111,7 @@ static void init_device_info(struct intel_gvt *gvt)
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->gtt_start_offset = 8 * 1024 * 1024;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册