M7350/kernel/drivers/gpu/ion/ion_system_heap.c
2024-09-09 08:52:07 +00:00

419 lines
10 KiB
C

/*
* drivers/gpu/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/page.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion_priv.h"
#include <linux/dma-mapping.h>
#include <trace/events/kmem.h>
static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
__GFP_NOWARN | __GFP_NORETRY |
__GFP_NO_KSWAPD) & ~__GFP_WAIT;
static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
__GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
{
int i;
for (i = 0; i < num_orders; i++)
if (order == orders[i])
return i;
BUG();
return -1;
}
static unsigned int order_to_size(int order)
{
return PAGE_SIZE << order;
}
struct ion_system_heap {
struct ion_heap heap;
struct ion_page_pool **pools;
};
struct page_info {
struct page *page;
unsigned int order;
struct list_head list;
};
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order)
{
bool cached = ion_buffer_cached(buffer);
bool split_pages = ion_buffer_fault_user_mappings(buffer);
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
struct page *page;
if (!cached) {
page = ion_page_pool_alloc(pool);
} else {
struct scatterlist sg;
gfp_t gfp_flags = low_order_gfp_flags;
if (order > 4)
gfp_flags = high_order_gfp_flags;
trace_alloc_pages_sys_start(gfp_flags, order);
page = alloc_pages(gfp_flags, order);
trace_alloc_pages_sys_end(gfp_flags, order);
if (!page) {
trace_alloc_pages_sys_fail(gfp_flags, order);
return 0;
}
sg_init_table(&sg, 1);
sg_set_page(&sg, page, PAGE_SIZE << order, 0);
sg_dma_address(&sg) = sg_phys(&sg);
dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
}
if (!page)
return 0;
if (split_pages)
split_page(page, order);
return page;
}
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page,
unsigned int order)
{
bool cached = ion_buffer_cached(buffer);
bool split_pages = ion_buffer_fault_user_mappings(buffer);
int i;
if (!cached) {
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
ion_page_pool_free(pool, page);
} else if (split_pages) {
for (i = 0; i < (1 << order); i++)
__free_page(page + i);
} else {
__free_pages(page, order);
}
}
static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long size,
unsigned int max_order)
{
struct page *page;
struct page_info *info;
int i;
for (i = 0; i < num_orders; i++) {
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
continue;
page = alloc_buffer_page(heap, buffer, orders[i]);
if (!page)
continue;
info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
if (info) {
info->page = page;
info->order = orders[i];
}
return info;
}
return NULL;
}
static int ion_system_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
struct sg_table *table;
struct scatterlist *sg;
int ret;
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
bool split_pages = ion_buffer_fault_user_mappings(buffer);
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
size_remaining -= (1 << info->order) * PAGE_SIZE;
max_order = info->order;
i++;
}
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
goto err;
if (split_pages)
ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
GFP_KERNEL);
else
ret = sg_alloc_table(table, i, GFP_KERNEL);
if (ret)
goto err1;
sg = table->sgl;
list_for_each_entry_safe(info, tmp_info, &pages, list) {
struct page *page = info->page;
if (split_pages) {
for (i = 0; i < (1 << info->order); i++) {
sg_set_page(sg, page + i, PAGE_SIZE, 0);
sg = sg_next(sg);
}
} else {
sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
0);
sg = sg_next(sg);
}
list_del(&info->list);
kfree(info);
}
buffer->priv_virt = table;
return 0;
err1:
kfree(table);
err:
list_for_each_entry(info, &pages, list) {
free_buffer_page(sys_heap, buffer, info->page, info->order);
kfree(info);
}
return -ENOMEM;
}
void ion_system_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
LIST_HEAD(pages);
int i;
/* uncached pages come from the page pools, zero them before returning
for security purposes (other allocations are zerod at alloc time */
if (!cached)
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg),
get_order(sg_dma_len(sg)));
sg_free_table(table);
kfree(table);
}
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
}
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_dma = ion_system_heap_map_dma,
.unmap_dma = ion_system_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
};
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
void *unused)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
int i;
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool = sys_heap->pools[i];
seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
pool->high_count, pool->order,
(1 << pool->order) * PAGE_SIZE * pool->high_count);
seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
pool->low_count, pool->order,
(1 << pool->order) * PAGE_SIZE * pool->low_count);
}
return 0;
}
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
int i;
heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->heap.ops = &system_heap_ops;
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
GFP_KERNEL);
if (!heap->pools)
goto err_alloc_pools;
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool;
gfp_t gfp_flags = low_order_gfp_flags;
if (orders[i] > 4)
gfp_flags = high_order_gfp_flags;
pool = ion_page_pool_create(gfp_flags, orders[i]);
if (!pool)
goto err_create_pool;
heap->pools[i] = pool;
}
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
err_create_pool:
for (i = 0; i < num_orders; i++)
if (heap->pools[i])
ion_page_pool_destroy(heap->pools[i]);
kfree(heap->pools);
err_alloc_pools:
kfree(heap);
return ERR_PTR(-ENOMEM);
}
void ion_system_heap_destroy(struct ion_heap *heap)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
int i;
for (i = 0; i < num_orders; i++)
ion_page_pool_destroy(sys_heap->pools[i]);
kfree(sys_heap->pools);
kfree(sys_heap);
}
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len,
unsigned long align,
unsigned long flags)
{
buffer->priv_virt = kzalloc(len, GFP_KERNEL);
if (!buffer->priv_virt)
return -ENOMEM;
return 0;
}
void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
kfree(buffer->priv_virt);
}
static int ion_system_contig_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
*addr = virt_to_phys(buffer->priv_virt);
*len = buffer->size;
return 0;
}
struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct sg_table *table;
int ret;
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret) {
kfree(table);
return ERR_PTR(ret);
}
sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
0);
return table;
}
void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
sg_free_table(buffer->sg_table);
kfree(buffer->sg_table);
}
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
.unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
};
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
{
struct ion_heap *heap;
heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
return heap;
}
void ion_system_contig_heap_destroy(struct ion_heap *heap)
{
kfree(heap);
}