/* * drivers/gpu/ion/ion_carveout_heap.c * * Copyright (C) 2011 Google, Inc. * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include #include #include #include #include #include #include #include #include #include #include "ion_priv.h" #include #include #include struct ion_carveout_heap { struct ion_heap heap; struct gen_pool *pool; ion_phys_addr_t base; unsigned long allocated_bytes; unsigned long total_size; }; ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, unsigned long align) { struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool, size, ilog2(align)); if (!offset) { if ((carveout_heap->total_size - carveout_heap->allocated_bytes) >= size) pr_debug("%s: heap %s has enough memory (%lx) but" " the allocation of size %lx still failed." " Memory is probably fragmented.", __func__, heap->name, carveout_heap->total_size - carveout_heap->allocated_bytes, size); return ION_CARVEOUT_ALLOCATE_FAIL; } carveout_heap->allocated_bytes += size; return offset; } void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size) { struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); if (addr == ION_CARVEOUT_ALLOCATE_FAIL) return; gen_pool_free(carveout_heap->pool, addr, size); carveout_heap->allocated_bytes -= size; } static int ion_carveout_heap_phys(struct ion_heap *heap, struct ion_buffer *buffer, ion_phys_addr_t *addr, size_t *len) { *addr = buffer->priv_phys; *len = buffer->size; return 0; } static int ion_carveout_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned long align, unsigned long flags) { buffer->priv_phys = ion_carveout_allocate(heap, size, align); return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; } static void ion_carveout_heap_free(struct ion_buffer *buffer) { struct ion_heap *heap = buffer->heap; ion_carveout_free(heap, buffer->priv_phys, buffer->size); buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; } struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, struct ion_buffer *buffer) { size_t chunk_size = buffer->size; if (ION_IS_CACHED(buffer->flags)) chunk_size = PAGE_SIZE; return ion_create_chunked_sg_table(buffer->priv_phys, chunk_size, buffer->size); } void ion_carveout_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buffer) { if (buffer->sg_table) sg_free_table(buffer->sg_table); kfree(buffer->sg_table); buffer->sg_table = 0; } void *ion_carveout_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { void *ret_value; if (ION_IS_CACHED(buffer->flags)) ret_value = ioremap_cached(buffer->priv_phys, buffer->size); else ret_value = ioremap(buffer->priv_phys, buffer->size); return ret_value; } void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { __arm_iounmap(buffer->vaddr); buffer->vaddr = NULL; return; } int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, struct vm_area_struct *vma) { int ret_value = 0; if (!ION_IS_CACHED(buffer->flags)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ret_value = remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); return ret_value; } static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s, const struct rb_root *mem_map) { struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); seq_printf(s, "total bytes currently allocated: %lx\n", carveout_heap->allocated_bytes); seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size); if (mem_map) { unsigned long base = carveout_heap->base; unsigned long size = carveout_heap->total_size; unsigned long end = base+size; unsigned long last_end = base; struct rb_node *n; seq_printf(s, "\nMemory Map\n"); seq_printf(s, "%16.s %14.s %14.s %14.s\n", "client", "start address", "end address", "size (hex)"); for (n = rb_first(mem_map); n; n = rb_next(n)) { struct mem_map_data *data = rb_entry(n, struct mem_map_data, node); const char *client_name = "(null)"; if (last_end < data->addr) { phys_addr_t da; da = data->addr-1; seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", "FREE", &last_end, &da, data->addr-last_end, data->addr-last_end); } if (data->client_name) client_name = data->client_name; seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", client_name, &data->addr, &data->addr_end, data->size, data->size); last_end = data->addr_end+1; } if (last_end < end) { seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE", last_end, end-1, end-last_end, end-last_end); } } return 0; } static struct ion_heap_ops carveout_heap_ops = { .allocate = ion_carveout_heap_allocate, .free = ion_carveout_heap_free, .phys = ion_carveout_heap_phys, .map_user = ion_carveout_heap_map_user, .map_kernel = ion_carveout_heap_map_kernel, .unmap_kernel = ion_carveout_heap_unmap_kernel, .map_dma = ion_carveout_heap_map_dma, .unmap_dma = ion_carveout_heap_unmap_dma, .print_debug = ion_carveout_print_debug, }; struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) { struct ion_carveout_heap *carveout_heap; int ret; carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); if (!carveout_heap) return ERR_PTR(-ENOMEM); carveout_heap->pool = gen_pool_create(12, -1); if (!carveout_heap->pool) { kfree(carveout_heap); return ERR_PTR(-ENOMEM); } carveout_heap->base = heap_data->base; ret = gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, -1); if (ret < 0) { gen_pool_destroy(carveout_heap->pool); kfree(carveout_heap); return ERR_PTR(-EINVAL); } carveout_heap->heap.ops = &carveout_heap_ops; carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; carveout_heap->allocated_bytes = 0; carveout_heap->total_size = heap_data->size; return &carveout_heap->heap; } void ion_carveout_heap_destroy(struct ion_heap *heap) { struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); gen_pool_destroy(carveout_heap->pool); kfree(carveout_heap); carveout_heap = NULL; }