mirror of
https://github.com/torvalds/linux.git
synced 2026-01-24 23:16:46 +00:00
Merge tag 'mediatek-drm-fixes-20260119' of https://git.kernel.org/pub/scm/linux/kernel/git/chunkuang.hu/linux into drm-fixes
Mediatek DRM Fixes - 20260119 1. Fix platform_get_irq() error checking 2. HDMI DDC v2 driver fixes 3. dpi: Find next bridge during probe 4. mtk_gem: Partial refactor and use drm_gem_dma_object 5. dt-bindings: Fix typo 'hardwares' to 'hardware' Signed-off-by: Dave Airlie <airlied@redhat.com> From: Chun-Kuang Hu <chunkuang.hu@kernel.org> Link: https://patch.msgid.link/20260119154717.4735-1-chunkuang.hu@kernel.org
This commit is contained in:
@@ -11,7 +11,7 @@ maintainers:
|
||||
- Jitao shi <jitao.shi@mediatek.com>
|
||||
|
||||
description: |
|
||||
MediaTek DP and eDP are different hardwares and there are some features
|
||||
MediaTek DP and eDP are different hardware and there are some features
|
||||
which are not supported for eDP. For example, audio is not supported for
|
||||
eDP. Therefore, we need to use two different compatibles to describe them.
|
||||
In addition, We just need to enable the power domain of DP, so the clock
|
||||
|
||||
@@ -8,7 +8,7 @@ config DRM_MEDIATEK
|
||||
depends on OF
|
||||
depends on MTK_MMSYS
|
||||
select DRM_CLIENT_SELECTION
|
||||
select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION
|
||||
select DRM_GEM_DMA_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_DISPLAY_HELPER
|
||||
select DRM_BRIDGE_CONNECTOR
|
||||
|
||||
@@ -836,20 +836,6 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
|
||||
int ret;
|
||||
|
||||
dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1);
|
||||
if (IS_ERR(dpi->next_bridge)) {
|
||||
ret = PTR_ERR(dpi->next_bridge);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
return ret;
|
||||
|
||||
/* Old devicetree has only one endpoint */
|
||||
dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0);
|
||||
if (IS_ERR(dpi->next_bridge))
|
||||
return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge),
|
||||
"Failed to get bridge\n");
|
||||
}
|
||||
|
||||
return drm_bridge_attach(encoder, dpi->next_bridge,
|
||||
&dpi->bridge, flags);
|
||||
@@ -1319,6 +1305,15 @@ static int mtk_dpi_probe(struct platform_device *pdev)
|
||||
if (dpi->irq < 0)
|
||||
return dpi->irq;
|
||||
|
||||
dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1);
|
||||
if (IS_ERR(dpi->next_bridge) && PTR_ERR(dpi->next_bridge) == -ENODEV) {
|
||||
/* Old devicetree has only one endpoint */
|
||||
dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0);
|
||||
}
|
||||
if (IS_ERR(dpi->next_bridge))
|
||||
return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge),
|
||||
"Failed to get bridge\n");
|
||||
|
||||
platform_set_drvdata(pdev, dpi);
|
||||
|
||||
dpi->bridge.of_node = dev->of_node;
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015 MediaTek Inc.
|
||||
* Copyright (c) 2025 Collabora Ltd.
|
||||
* AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
|
||||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
@@ -18,24 +20,64 @@
|
||||
|
||||
static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
|
||||
static const struct vm_operations_struct vm_ops = {
|
||||
.open = drm_gem_vm_open,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
static void mtk_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
|
||||
struct mtk_drm_private *priv = obj->dev->dev_private;
|
||||
|
||||
if (dma_obj->sgt)
|
||||
drm_prime_gem_destroy(obj, dma_obj->sgt);
|
||||
else
|
||||
dma_free_wc(priv->dma_dev, dma_obj->base.size,
|
||||
dma_obj->vaddr, dma_obj->dma_addr);
|
||||
|
||||
/* release file pointer to gem object. */
|
||||
drm_gem_object_release(obj);
|
||||
|
||||
kfree(dma_obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a sg_table for this GEM object.
|
||||
* Note: Both the table's contents, and the sg_table itself must be freed by
|
||||
* the caller.
|
||||
* Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
|
||||
*/
|
||||
static struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
|
||||
struct mtk_drm_private *priv = obj->dev->dev_private;
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = dma_get_sgtable(priv->dma_dev, sgt, dma_obj->vaddr,
|
||||
dma_obj->dma_addr, obj->size);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to allocate sgt, %d\n", ret);
|
||||
kfree(sgt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static const struct drm_gem_object_funcs mtk_gem_object_funcs = {
|
||||
.free = mtk_gem_free_object,
|
||||
.print_info = drm_gem_dma_object_print_info,
|
||||
.get_sg_table = mtk_gem_prime_get_sg_table,
|
||||
.vmap = mtk_gem_prime_vmap,
|
||||
.vunmap = mtk_gem_prime_vunmap,
|
||||
.vmap = drm_gem_dma_object_vmap,
|
||||
.mmap = mtk_gem_object_mmap,
|
||||
.vm_ops = &vm_ops,
|
||||
.vm_ops = &drm_gem_dma_vm_ops,
|
||||
};
|
||||
|
||||
static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
|
||||
unsigned long size)
|
||||
static struct drm_gem_dma_object *mtk_gem_init(struct drm_device *dev,
|
||||
unsigned long size, bool private)
|
||||
{
|
||||
struct mtk_gem_obj *mtk_gem_obj;
|
||||
struct drm_gem_dma_object *dma_obj;
|
||||
int ret;
|
||||
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
@@ -43,86 +85,65 @@ static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
|
||||
if (size == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
|
||||
if (!mtk_gem_obj)
|
||||
dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
|
||||
if (!dma_obj)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mtk_gem_obj->base.funcs = &mtk_gem_object_funcs;
|
||||
dma_obj->base.funcs = &mtk_gem_object_funcs;
|
||||
|
||||
ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
|
||||
if (ret < 0) {
|
||||
if (private) {
|
||||
ret = 0;
|
||||
drm_gem_private_object_init(dev, &dma_obj->base, size);
|
||||
} else {
|
||||
ret = drm_gem_object_init(dev, &dma_obj->base, size);
|
||||
}
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to initialize gem object\n");
|
||||
kfree(mtk_gem_obj);
|
||||
kfree(dma_obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return mtk_gem_obj;
|
||||
return dma_obj;
|
||||
}
|
||||
|
||||
struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
|
||||
size_t size, bool alloc_kmap)
|
||||
static struct drm_gem_dma_object *mtk_gem_create(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct mtk_drm_private *priv = dev->dev_private;
|
||||
struct mtk_gem_obj *mtk_gem;
|
||||
struct drm_gem_dma_object *dma_obj;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
mtk_gem = mtk_gem_init(dev, size);
|
||||
if (IS_ERR(mtk_gem))
|
||||
return ERR_CAST(mtk_gem);
|
||||
dma_obj = mtk_gem_init(dev, size, false);
|
||||
if (IS_ERR(dma_obj))
|
||||
return ERR_CAST(dma_obj);
|
||||
|
||||
obj = &mtk_gem->base;
|
||||
obj = &dma_obj->base;
|
||||
|
||||
mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
|
||||
|
||||
if (!alloc_kmap)
|
||||
mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
|
||||
|
||||
mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
|
||||
&mtk_gem->dma_addr, GFP_KERNEL,
|
||||
mtk_gem->dma_attrs);
|
||||
if (!mtk_gem->cookie) {
|
||||
dma_obj->vaddr = dma_alloc_wc(priv->dma_dev, obj->size,
|
||||
&dma_obj->dma_addr,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!dma_obj->vaddr) {
|
||||
DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
|
||||
ret = -ENOMEM;
|
||||
goto err_gem_free;
|
||||
}
|
||||
|
||||
if (alloc_kmap)
|
||||
mtk_gem->kvaddr = mtk_gem->cookie;
|
||||
|
||||
DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
|
||||
mtk_gem->cookie, &mtk_gem->dma_addr,
|
||||
DRM_DEBUG_DRIVER("vaddr = %p dma_addr = %pad size = %zu\n",
|
||||
dma_obj->vaddr, &dma_obj->dma_addr,
|
||||
size);
|
||||
|
||||
return mtk_gem;
|
||||
return dma_obj;
|
||||
|
||||
err_gem_free:
|
||||
drm_gem_object_release(obj);
|
||||
kfree(mtk_gem);
|
||||
kfree(dma_obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void mtk_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||
struct mtk_drm_private *priv = obj->dev->dev_private;
|
||||
|
||||
if (mtk_gem->sg)
|
||||
drm_prime_gem_destroy(obj, mtk_gem->sg);
|
||||
else
|
||||
dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
|
||||
mtk_gem->dma_addr, mtk_gem->dma_attrs);
|
||||
|
||||
/* release file pointer to gem object. */
|
||||
drm_gem_object_release(obj);
|
||||
|
||||
kfree(mtk_gem);
|
||||
}
|
||||
|
||||
int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct mtk_gem_obj *mtk_gem;
|
||||
struct drm_gem_dma_object *dma_obj;
|
||||
int ret;
|
||||
|
||||
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
|
||||
@@ -135,25 +156,25 @@ int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
||||
args->size = args->pitch;
|
||||
args->size *= args->height;
|
||||
|
||||
mtk_gem = mtk_gem_create(dev, args->size, false);
|
||||
if (IS_ERR(mtk_gem))
|
||||
return PTR_ERR(mtk_gem);
|
||||
dma_obj = mtk_gem_create(dev, args->size);
|
||||
if (IS_ERR(dma_obj))
|
||||
return PTR_ERR(dma_obj);
|
||||
|
||||
/*
|
||||
* allocate a id of idr table where the obj is registered
|
||||
* and handle has the id what user can see.
|
||||
*/
|
||||
ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
|
||||
ret = drm_gem_handle_create(file_priv, &dma_obj->base, &args->handle);
|
||||
if (ret)
|
||||
goto err_handle_create;
|
||||
|
||||
/* drop reference from allocate - handle holds it now. */
|
||||
drm_gem_object_put(&mtk_gem->base);
|
||||
drm_gem_object_put(&dma_obj->base);
|
||||
|
||||
return 0;
|
||||
|
||||
err_handle_create:
|
||||
mtk_gem_free_object(&mtk_gem->base);
|
||||
mtk_gem_free_object(&dma_obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -161,129 +182,50 @@ static int mtk_gem_object_mmap(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma)
|
||||
|
||||
{
|
||||
int ret;
|
||||
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
|
||||
struct mtk_drm_private *priv = obj->dev->dev_private;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
|
||||
* whole buffer from the start.
|
||||
*/
|
||||
vma->vm_pgoff = 0;
|
||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||
|
||||
/*
|
||||
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
|
||||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||
*/
|
||||
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
|
||||
vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
|
||||
|
||||
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
|
||||
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
|
||||
ret = dma_mmap_wc(priv->dma_dev, vma, dma_obj->vaddr,
|
||||
dma_obj->dma_addr, obj->size);
|
||||
if (ret)
|
||||
drm_gem_vm_close(vma);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a sg_table for this GEM object.
|
||||
* Note: Both the table's contents, and the sg_table itself must be freed by
|
||||
* the caller.
|
||||
* Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
|
||||
*/
|
||||
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||
struct mtk_drm_private *priv = obj->dev->dev_private;
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
|
||||
mtk_gem->dma_addr, obj->size,
|
||||
mtk_gem->dma_attrs);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to allocate sgt, %d\n", ret);
|
||||
kfree(sgt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg)
|
||||
struct dma_buf_attachment *attach, struct sg_table *sgt)
|
||||
{
|
||||
struct mtk_gem_obj *mtk_gem;
|
||||
struct drm_gem_dma_object *dma_obj;
|
||||
|
||||
/* check if the entries in the sg_table are contiguous */
|
||||
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
|
||||
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
|
||||
DRM_ERROR("sg_table is not contiguous");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
mtk_gem = mtk_gem_init(dev, attach->dmabuf->size);
|
||||
if (IS_ERR(mtk_gem))
|
||||
return ERR_CAST(mtk_gem);
|
||||
dma_obj = mtk_gem_init(dev, attach->dmabuf->size, true);
|
||||
if (IS_ERR(dma_obj))
|
||||
return ERR_CAST(dma_obj);
|
||||
|
||||
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
|
||||
mtk_gem->sg = sg;
|
||||
dma_obj->dma_addr = sg_dma_address(sgt->sgl);
|
||||
dma_obj->sgt = sgt;
|
||||
|
||||
return &mtk_gem->base;
|
||||
}
|
||||
|
||||
int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
{
|
||||
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||
struct sg_table *sgt = NULL;
|
||||
unsigned int npages;
|
||||
|
||||
if (mtk_gem->kvaddr)
|
||||
goto out;
|
||||
|
||||
sgt = mtk_gem_prime_get_sg_table(obj);
|
||||
if (IS_ERR(sgt))
|
||||
return PTR_ERR(sgt);
|
||||
|
||||
npages = obj->size >> PAGE_SHIFT;
|
||||
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
|
||||
if (!mtk_gem->pages) {
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
|
||||
|
||||
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
|
||||
pgprot_writecombine(PAGE_KERNEL));
|
||||
if (!mtk_gem->kvaddr) {
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
kfree(mtk_gem->pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
|
||||
out:
|
||||
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
{
|
||||
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||
void *vaddr = map->vaddr;
|
||||
|
||||
if (!mtk_gem->pages)
|
||||
return;
|
||||
|
||||
vunmap(vaddr);
|
||||
mtk_gem->kvaddr = NULL;
|
||||
kfree(mtk_gem->pages);
|
||||
return &dma_obj->base;
|
||||
}
|
||||
|
||||
@@ -7,42 +7,11 @@
|
||||
#define _MTK_GEM_H_
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
|
||||
/*
|
||||
* mtk drm buffer structure.
|
||||
*
|
||||
* @base: a gem object.
|
||||
* - a new handle to this gem object would be created
|
||||
* by drm_gem_handle_create().
|
||||
* @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs()
|
||||
* @kvaddr: kernel virtual address of gem buffer.
|
||||
* @dma_addr: dma address of gem buffer.
|
||||
* @dma_attrs: dma attributes of gem buffer.
|
||||
*
|
||||
* P.S. this object would be transferred to user as kms_bo.handle so
|
||||
* user can access the buffer through kms_bo.handle.
|
||||
*/
|
||||
struct mtk_gem_obj {
|
||||
struct drm_gem_object base;
|
||||
void *cookie;
|
||||
void *kvaddr;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long dma_attrs;
|
||||
struct sg_table *sg;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base)
|
||||
|
||||
void mtk_gem_free_object(struct drm_gem_object *gem);
|
||||
struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size,
|
||||
bool alloc_kmap);
|
||||
int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||
int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
|
||||
void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -303,7 +303,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, struct platform_device
|
||||
return dev_err_probe(dev, ret, "Failed to get clocks\n");
|
||||
|
||||
hdmi->irq = platform_get_irq(pdev, 0);
|
||||
if (!hdmi->irq)
|
||||
if (hdmi->irq < 0)
|
||||
return hdmi->irq;
|
||||
|
||||
hdmi->regs = device_node_to_regmap(dev->of_node);
|
||||
|
||||
@@ -168,7 +168,7 @@ struct mtk_hdmi {
|
||||
bool audio_enable;
|
||||
bool powered;
|
||||
bool enabled;
|
||||
unsigned int irq;
|
||||
int irq;
|
||||
enum hdmi_hpd_state hpd;
|
||||
hdmi_codec_plugged_cb plugged_cb;
|
||||
struct device *codec_dev;
|
||||
|
||||
@@ -66,11 +66,19 @@ static int mtk_ddc_check_and_rise_low_bus(struct mtk_hdmi_ddc *ddc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id,
|
||||
u16 offset_id, u8 *wr_data)
|
||||
static int mtk_ddcm_write_hdmi(struct mtk_hdmi_ddc *ddc, u16 addr_id,
|
||||
u16 offset_id, u16 data_cnt, u8 *wr_data)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
/* Don't allow transfer with a size over than the transfer fifo size
|
||||
* (16 byte)
|
||||
*/
|
||||
if (data_cnt > 16) {
|
||||
dev_err(ddc->dev, "Invalid DDCM write request\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* If down, rise bus for write operation */
|
||||
mtk_ddc_check_and_rise_low_bus(ddc);
|
||||
@@ -78,16 +86,21 @@ static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id,
|
||||
regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT,
|
||||
FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT));
|
||||
|
||||
/* In case there is no payload data, just do a single write for the
|
||||
* address only
|
||||
*/
|
||||
if (wr_data) {
|
||||
regmap_write(ddc->regs, SI2C_CTRL,
|
||||
FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) |
|
||||
FIELD_PREP(SI2C_WDATA, *wr_data) |
|
||||
SI2C_WR);
|
||||
/* Fill transfer fifo with payload data */
|
||||
for (i = 0; i < data_cnt; i++) {
|
||||
regmap_write(ddc->regs, SI2C_CTRL,
|
||||
FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) |
|
||||
FIELD_PREP(SI2C_WDATA, wr_data[i]) |
|
||||
SI2C_WR);
|
||||
}
|
||||
}
|
||||
|
||||
regmap_write(ddc->regs, DDC_CTRL,
|
||||
FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) |
|
||||
FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : 1) |
|
||||
FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : data_cnt) |
|
||||
FIELD_PREP(DDC_CTRL_OFFSET, offset_id) |
|
||||
FIELD_PREP(DDC_CTRL_ADDR, addr_id));
|
||||
usleep_range(1000, 1250);
|
||||
@@ -96,6 +109,11 @@ static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id,
|
||||
!(val & DDC_I2C_IN_PROG), 500, 1000);
|
||||
if (ret) {
|
||||
dev_err(ddc->dev, "DDC I2C write timeout\n");
|
||||
|
||||
/* Abort transfer if it is still in progress */
|
||||
regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD,
|
||||
FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -179,6 +197,11 @@ static int mtk_ddcm_read_hdmi(struct mtk_hdmi_ddc *ddc, u16 uc_dev,
|
||||
500 * (temp_length + 5));
|
||||
if (ret) {
|
||||
dev_err(ddc->dev, "Timeout waiting for DDC I2C\n");
|
||||
|
||||
/* Abort transfer if it is still in progress */
|
||||
regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD,
|
||||
FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -250,24 +273,9 @@ static int mtk_hdmi_fg_ddc_data_read(struct mtk_hdmi_ddc *ddc, u16 b_dev,
|
||||
static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev,
|
||||
u8 data_addr, u16 data_cnt, u8 *pr_data)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN);
|
||||
/*
|
||||
* In case there is no payload data, just do a single write for the
|
||||
* address only
|
||||
*/
|
||||
if (data_cnt == 0)
|
||||
return mtk_ddc_wr_one(ddc, b_dev, data_addr, NULL);
|
||||
|
||||
i = 0;
|
||||
do {
|
||||
ret = mtk_ddc_wr_one(ddc, b_dev, data_addr + i, pr_data + i);
|
||||
if (ret)
|
||||
return ret;
|
||||
} while (++i < data_cnt);
|
||||
|
||||
return 0;
|
||||
return mtk_ddcm_write_hdmi(ddc, b_dev, data_addr, data_cnt, pr_data);
|
||||
}
|
||||
|
||||
static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
||||
|
||||
@@ -1120,9 +1120,10 @@ static void mtk_hdmi_v2_hpd_disable(struct drm_bridge *bridge)
|
||||
mtk_hdmi_v2_disable(hdmi);
|
||||
}
|
||||
|
||||
static int mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
|
||||
const struct drm_display_mode *mode,
|
||||
unsigned long long tmds_rate)
|
||||
static enum drm_mode_status
|
||||
mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
|
||||
const struct drm_display_mode *mode,
|
||||
unsigned long long tmds_rate)
|
||||
{
|
||||
if (mode->clock < MTK_HDMI_V2_CLOCK_MIN)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
@@ -11,13 +11,13 @@
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <linux/align.h>
|
||||
|
||||
#include "mtk_crtc.h"
|
||||
#include "mtk_ddp_comp.h"
|
||||
#include "mtk_drm_drv.h"
|
||||
#include "mtk_gem.h"
|
||||
#include "mtk_plane.h"
|
||||
|
||||
static const u64 modifiers[] = {
|
||||
@@ -114,8 +114,8 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
|
||||
struct mtk_plane_state *mtk_plane_state)
|
||||
{
|
||||
struct drm_framebuffer *fb = new_state->fb;
|
||||
struct drm_gem_dma_object *dma_obj;
|
||||
struct drm_gem_object *gem;
|
||||
struct mtk_gem_obj *mtk_gem;
|
||||
unsigned int pitch, format;
|
||||
u64 modifier;
|
||||
dma_addr_t addr;
|
||||
@@ -124,8 +124,8 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
|
||||
int offset;
|
||||
|
||||
gem = fb->obj[0];
|
||||
mtk_gem = to_mtk_gem_obj(gem);
|
||||
addr = mtk_gem->dma_addr;
|
||||
dma_obj = to_drm_gem_dma_obj(gem);
|
||||
addr = dma_obj->dma_addr;
|
||||
pitch = fb->pitches[0];
|
||||
format = fb->format->format;
|
||||
modifier = fb->modifier;
|
||||
|
||||
Reference in New Issue
Block a user