drm/nouveau/core/memory: split info pointers from accessor pointers
The accessor functions can change as a result of acquire()/release() calls, and are protected by any refcounting done there. Other functions must remain constant, as they can be called any time. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
dde59b9c34
commit
07bbc1c5f4
|
@ -9,7 +9,10 @@ struct nvkm_vm;
|
|||
#define NVOBJ_FLAG_HEAP 0x00000004
|
||||
|
||||
struct nvkm_gpuobj {
|
||||
const struct nvkm_gpuobj_func *func;
|
||||
union {
|
||||
const struct nvkm_gpuobj_func *func;
|
||||
const struct nvkm_gpuobj_func *ptrs;
|
||||
};
|
||||
struct nvkm_gpuobj *parent;
|
||||
struct nvkm_memory *memory;
|
||||
struct nvkm_mm_node *node;
|
||||
|
|
|
@ -14,6 +14,7 @@ enum nvkm_memory_target {
|
|||
|
||||
struct nvkm_memory {
|
||||
const struct nvkm_memory_func *func;
|
||||
const struct nvkm_memory_ptrs *ptrs;
|
||||
};
|
||||
|
||||
struct nvkm_memory_func {
|
||||
|
@ -24,9 +25,12 @@ struct nvkm_memory_func {
|
|||
void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
|
||||
void __iomem *(*acquire)(struct nvkm_memory *);
|
||||
void (*release)(struct nvkm_memory *);
|
||||
void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
|
||||
};
|
||||
|
||||
struct nvkm_memory_ptrs {
|
||||
u32 (*rd32)(struct nvkm_memory *, u64 offset);
|
||||
void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
|
||||
void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
|
||||
};
|
||||
|
||||
void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
|
||||
|
@ -43,8 +47,8 @@ void nvkm_memory_del(struct nvkm_memory **);
|
|||
* macros to guarantee correct behaviour across all chipsets
|
||||
*/
|
||||
#define nvkm_kmap(o) (o)->func->acquire(o)
|
||||
#define nvkm_ro32(o,a) (o)->func->rd32((o), (a))
|
||||
#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
|
||||
#define nvkm_ro32(o,a) (o)->ptrs->rd32((o), (a))
|
||||
#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
|
||||
#define nvkm_mo32(o,a,m,d) ({ \
|
||||
u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
|
||||
nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
|
||||
|
|
|
@ -112,9 +112,13 @@ nvkm_instobj_func = {
|
|||
.size = nvkm_instobj_size,
|
||||
.acquire = nvkm_instobj_acquire,
|
||||
.release = nvkm_instobj_release,
|
||||
.map = nvkm_instobj_map,
|
||||
};
|
||||
|
||||
static const struct nvkm_memory_ptrs
|
||||
nvkm_instobj_ptrs = {
|
||||
.rd32 = nvkm_instobj_rd32,
|
||||
.wr32 = nvkm_instobj_wr32,
|
||||
.map = nvkm_instobj_map,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -137,8 +141,10 @@ nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
|
|||
{
|
||||
struct nvkm_instobj *iobj = nvkm_instobj(memory);
|
||||
iobj->map = nvkm_kmap(iobj->parent);
|
||||
if (iobj->map)
|
||||
if (iobj->map) {
|
||||
memory->func = &nvkm_instobj_func;
|
||||
memory->ptrs = &nvkm_instobj_ptrs;
|
||||
}
|
||||
return iobj->map;
|
||||
}
|
||||
|
||||
|
@ -165,9 +171,13 @@ nvkm_instobj_func_slow = {
|
|||
.boot = nvkm_instobj_boot,
|
||||
.acquire = nvkm_instobj_acquire_slow,
|
||||
.release = nvkm_instobj_release_slow,
|
||||
.map = nvkm_instobj_map,
|
||||
};
|
||||
|
||||
static const struct nvkm_memory_ptrs
|
||||
nvkm_instobj_ptrs_slow = {
|
||||
.rd32 = nvkm_instobj_rd32_slow,
|
||||
.wr32 = nvkm_instobj_wr32_slow,
|
||||
.map = nvkm_instobj_map,
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -196,6 +206,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
|
|||
}
|
||||
|
||||
nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
|
||||
iobj->memory.ptrs = &nvkm_instobj_ptrs_slow;
|
||||
iobj->parent = memory;
|
||||
iobj->imem = imem;
|
||||
spin_lock(&iobj->imem->lock);
|
||||
|
|
|
@ -346,8 +346,6 @@ gk20a_instobj_func_dma = {
|
|||
.size = gk20a_instobj_size,
|
||||
.acquire = gk20a_instobj_acquire_dma,
|
||||
.release = gk20a_instobj_release_dma,
|
||||
.rd32 = gk20a_instobj_rd32,
|
||||
.wr32 = gk20a_instobj_wr32,
|
||||
.map = gk20a_instobj_map,
|
||||
};
|
||||
|
||||
|
@ -359,9 +357,13 @@ gk20a_instobj_func_iommu = {
|
|||
.size = gk20a_instobj_size,
|
||||
.acquire = gk20a_instobj_acquire_iommu,
|
||||
.release = gk20a_instobj_release_iommu,
|
||||
.map = gk20a_instobj_map,
|
||||
};
|
||||
|
||||
static const struct nvkm_memory_ptrs
|
||||
gk20a_instobj_ptrs = {
|
||||
.rd32 = gk20a_instobj_rd32,
|
||||
.wr32 = gk20a_instobj_wr32,
|
||||
.map = gk20a_instobj_map,
|
||||
};
|
||||
|
||||
static int
|
||||
|
@ -377,6 +379,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|||
*_node = &node->base;
|
||||
|
||||
nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
|
||||
node->base.memory.ptrs = &gk20a_instobj_ptrs;
|
||||
|
||||
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
|
||||
&node->handle, GFP_KERNEL,
|
||||
|
@ -424,6 +427,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|||
node->dma_addrs = (void *)(node->pages + npages);
|
||||
|
||||
nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
|
||||
node->base.memory.ptrs = &gk20a_instobj_ptrs;
|
||||
|
||||
/* Allocate backing memory */
|
||||
for (i = 0; i < npages; i++) {
|
||||
|
|
|
@ -43,35 +43,12 @@ struct nv04_instobj {
|
|||
struct nvkm_mm_node *node;
|
||||
};
|
||||
|
||||
static enum nvkm_memory_target
|
||||
nv04_instobj_target(struct nvkm_memory *memory)
|
||||
{
|
||||
return NVKM_MEM_TARGET_INST;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv04_instobj_addr(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv04_instobj(memory)->node->offset;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv04_instobj_size(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv04_instobj(memory)->node->length;
|
||||
}
|
||||
|
||||
static void __iomem *
|
||||
nv04_instobj_acquire(struct nvkm_memory *memory)
|
||||
static void
|
||||
nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
|
||||
{
|
||||
struct nv04_instobj *iobj = nv04_instobj(memory);
|
||||
struct nvkm_device *device = iobj->imem->base.subdev.device;
|
||||
return device->pri + 0x700000 + iobj->node->offset;
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_instobj_release(struct nvkm_memory *memory)
|
||||
{
|
||||
nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
|
||||
}
|
||||
|
||||
static u32
|
||||
|
@ -82,12 +59,41 @@ nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
|
|||
return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
|
||||
}
|
||||
|
||||
static const struct nvkm_memory_ptrs
|
||||
nv04_instobj_ptrs = {
|
||||
.rd32 = nv04_instobj_rd32,
|
||||
.wr32 = nv04_instobj_wr32,
|
||||
};
|
||||
|
||||
static void
|
||||
nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
|
||||
nv04_instobj_release(struct nvkm_memory *memory)
|
||||
{
|
||||
}
|
||||
|
||||
static void __iomem *
|
||||
nv04_instobj_acquire(struct nvkm_memory *memory)
|
||||
{
|
||||
struct nv04_instobj *iobj = nv04_instobj(memory);
|
||||
struct nvkm_device *device = iobj->imem->base.subdev.device;
|
||||
nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
|
||||
return device->pri + 0x700000 + iobj->node->offset;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv04_instobj_size(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv04_instobj(memory)->node->length;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv04_instobj_addr(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv04_instobj(memory)->node->offset;
|
||||
}
|
||||
|
||||
static enum nvkm_memory_target
|
||||
nv04_instobj_target(struct nvkm_memory *memory)
|
||||
{
|
||||
return NVKM_MEM_TARGET_INST;
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -108,8 +114,6 @@ nv04_instobj_func = {
|
|||
.addr = nv04_instobj_addr,
|
||||
.acquire = nv04_instobj_acquire,
|
||||
.release = nv04_instobj_release,
|
||||
.rd32 = nv04_instobj_rd32,
|
||||
.wr32 = nv04_instobj_wr32,
|
||||
};
|
||||
|
||||
static int
|
||||
|
@ -125,6 +129,7 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|||
*pmemory = &iobj->memory;
|
||||
|
||||
nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
|
||||
iobj->memory.ptrs = &nv04_instobj_ptrs;
|
||||
iobj->imem = imem;
|
||||
|
||||
mutex_lock(&imem->base.subdev.mutex);
|
||||
|
|
|
@ -45,34 +45,11 @@ struct nv40_instobj {
|
|||
struct nvkm_mm_node *node;
|
||||
};
|
||||
|
||||
static enum nvkm_memory_target
|
||||
nv40_instobj_target(struct nvkm_memory *memory)
|
||||
{
|
||||
return NVKM_MEM_TARGET_INST;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv40_instobj_addr(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv40_instobj(memory)->node->offset;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv40_instobj_size(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv40_instobj(memory)->node->length;
|
||||
}
|
||||
|
||||
static void __iomem *
|
||||
nv40_instobj_acquire(struct nvkm_memory *memory)
|
||||
static void
|
||||
nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
|
||||
{
|
||||
struct nv40_instobj *iobj = nv40_instobj(memory);
|
||||
return iobj->imem->iomem + iobj->node->offset;
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_instobj_release(struct nvkm_memory *memory)
|
||||
{
|
||||
iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
|
||||
}
|
||||
|
||||
static u32
|
||||
|
@ -82,11 +59,40 @@ nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
|
|||
return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
|
||||
}
|
||||
|
||||
static const struct nvkm_memory_ptrs
|
||||
nv40_instobj_ptrs = {
|
||||
.rd32 = nv40_instobj_rd32,
|
||||
.wr32 = nv40_instobj_wr32,
|
||||
};
|
||||
|
||||
static void
|
||||
nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
|
||||
nv40_instobj_release(struct nvkm_memory *memory)
|
||||
{
|
||||
}
|
||||
|
||||
static void __iomem *
|
||||
nv40_instobj_acquire(struct nvkm_memory *memory)
|
||||
{
|
||||
struct nv40_instobj *iobj = nv40_instobj(memory);
|
||||
iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
|
||||
return iobj->imem->iomem + iobj->node->offset;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv40_instobj_size(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv40_instobj(memory)->node->length;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv40_instobj_addr(struct nvkm_memory *memory)
|
||||
{
|
||||
return nv40_instobj(memory)->node->offset;
|
||||
}
|
||||
|
||||
static enum nvkm_memory_target
|
||||
nv40_instobj_target(struct nvkm_memory *memory)
|
||||
{
|
||||
return NVKM_MEM_TARGET_INST;
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -107,8 +113,6 @@ nv40_instobj_func = {
|
|||
.addr = nv40_instobj_addr,
|
||||
.acquire = nv40_instobj_acquire,
|
||||
.release = nv40_instobj_release,
|
||||
.rd32 = nv40_instobj_rd32,
|
||||
.wr32 = nv40_instobj_wr32,
|
||||
};
|
||||
|
||||
static int
|
||||
|
@ -124,6 +128,7 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|||
*pmemory = &iobj->memory;
|
||||
|
||||
nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
|
||||
iobj->memory.ptrs = &nv40_instobj_ptrs;
|
||||
iobj->imem = imem;
|
||||
|
||||
mutex_lock(&imem->base.subdev.mutex);
|
||||
|
|
|
@ -49,22 +49,76 @@ struct nv50_instobj {
|
|||
void *map;
|
||||
};
|
||||
|
||||
static enum nvkm_memory_target
|
||||
nv50_instobj_target(struct nvkm_memory *memory)
|
||||
static void
|
||||
nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
|
||||
{
|
||||
return NVKM_MEM_TARGET_VRAM;
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
struct nv50_instmem *imem = iobj->imem;
|
||||
struct nvkm_device *device = imem->base.subdev.device;
|
||||
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
|
||||
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
|
||||
|
||||
if (unlikely(imem->addr != base)) {
|
||||
nvkm_wr32(device, 0x001700, base >> 16);
|
||||
imem->addr = base;
|
||||
}
|
||||
nvkm_wr32(device, 0x700000 + addr, data);
|
||||
}
|
||||
|
||||
static u64
|
||||
nv50_instobj_addr(struct nvkm_memory *memory)
|
||||
static u32
|
||||
nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
|
||||
{
|
||||
return nv50_instobj(memory)->mem->offset;
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
struct nv50_instmem *imem = iobj->imem;
|
||||
struct nvkm_device *device = imem->base.subdev.device;
|
||||
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
|
||||
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
|
||||
u32 data;
|
||||
|
||||
if (unlikely(imem->addr != base)) {
|
||||
nvkm_wr32(device, 0x001700, base >> 16);
|
||||
imem->addr = base;
|
||||
}
|
||||
data = nvkm_rd32(device, 0x700000 + addr);
|
||||
return data;
|
||||
}
|
||||
|
||||
static u64
|
||||
nv50_instobj_size(struct nvkm_memory *memory)
|
||||
static const struct nvkm_memory_ptrs
|
||||
nv50_instobj_slow = {
|
||||
.rd32 = nv50_instobj_rd32_slow,
|
||||
.wr32 = nv50_instobj_wr32_slow,
|
||||
};
|
||||
|
||||
static void
|
||||
nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
|
||||
{
|
||||
return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
nvkm_vm_map_at(vma, offset, iobj->mem);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_instobj_release(struct nvkm_memory *memory)
|
||||
{
|
||||
struct nv50_instmem *imem = nv50_instobj(memory)->imem;
|
||||
spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
|
||||
}
|
||||
|
||||
static void __iomem *
|
||||
nv50_instobj_acquire(struct nvkm_memory *memory)
|
||||
{
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
struct nv50_instmem *imem = iobj->imem;
|
||||
struct nvkm_vm *vm;
|
||||
unsigned long flags;
|
||||
|
||||
if (!iobj->map && (vm = nvkm_bar_bar2_vmm(imem->base.subdev.device)))
|
||||
nvkm_memory_boot(memory, vm);
|
||||
if (!IS_ERR_OR_NULL(iobj->map))
|
||||
return iobj->map;
|
||||
|
||||
spin_lock_irqsave(&imem->lock, flags);
|
||||
imem->lock_flags = flags;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -95,70 +149,22 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_instobj_release(struct nvkm_memory *memory)
|
||||
static u64
|
||||
nv50_instobj_size(struct nvkm_memory *memory)
|
||||
{
|
||||
struct nv50_instmem *imem = nv50_instobj(memory)->imem;
|
||||
spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
|
||||
return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
|
||||
}
|
||||
|
||||
static void __iomem *
|
||||
nv50_instobj_acquire(struct nvkm_memory *memory)
|
||||
static u64
|
||||
nv50_instobj_addr(struct nvkm_memory *memory)
|
||||
{
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
struct nv50_instmem *imem = iobj->imem;
|
||||
struct nvkm_vm *vm;
|
||||
unsigned long flags;
|
||||
|
||||
if (!iobj->map && (vm = nvkm_bar_bar2_vmm(imem->base.subdev.device)))
|
||||
nvkm_memory_boot(memory, vm);
|
||||
if (!IS_ERR_OR_NULL(iobj->map))
|
||||
return iobj->map;
|
||||
|
||||
spin_lock_irqsave(&imem->lock, flags);
|
||||
imem->lock_flags = flags;
|
||||
return NULL;
|
||||
return nv50_instobj(memory)->mem->offset;
|
||||
}
|
||||
|
||||
static u32
|
||||
nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
|
||||
static enum nvkm_memory_target
|
||||
nv50_instobj_target(struct nvkm_memory *memory)
|
||||
{
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
struct nv50_instmem *imem = iobj->imem;
|
||||
struct nvkm_device *device = imem->base.subdev.device;
|
||||
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
|
||||
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
|
||||
u32 data;
|
||||
|
||||
if (unlikely(imem->addr != base)) {
|
||||
nvkm_wr32(device, 0x001700, base >> 16);
|
||||
imem->addr = base;
|
||||
}
|
||||
data = nvkm_rd32(device, 0x700000 + addr);
|
||||
return data;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
|
||||
{
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
struct nv50_instmem *imem = iobj->imem;
|
||||
struct nvkm_device *device = imem->base.subdev.device;
|
||||
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
|
||||
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
|
||||
|
||||
if (unlikely(imem->addr != base)) {
|
||||
nvkm_wr32(device, 0x001700, base >> 16);
|
||||
imem->addr = base;
|
||||
}
|
||||
nvkm_wr32(device, 0x700000 + addr, data);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
|
||||
{
|
||||
struct nv50_instobj *iobj = nv50_instobj(memory);
|
||||
nvkm_vm_map_at(vma, offset, iobj->mem);
|
||||
return NVKM_MEM_TARGET_VRAM;
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -183,8 +189,6 @@ nv50_instobj_func = {
|
|||
.boot = nv50_instobj_boot,
|
||||
.acquire = nv50_instobj_acquire,
|
||||
.release = nv50_instobj_release,
|
||||
.rd32 = nv50_instobj_rd32,
|
||||
.wr32 = nv50_instobj_wr32,
|
||||
.map = nv50_instobj_map,
|
||||
};
|
||||
|
||||
|
@ -202,6 +206,7 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|||
*pmemory = &iobj->memory;
|
||||
|
||||
nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
|
||||
iobj->memory.ptrs = &nv50_instobj_slow;
|
||||
iobj->imem = imem;
|
||||
|
||||
size = max((size + 4095) & ~4095, (u32)4096);
|
||||
|
|
Loading…
Reference in New Issue