drm/radeon: make the ib an inline object

No need to malloc it any more.

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Christian König <deathsimple@vodafone.de>
Signed-off-by: Dave Airlie <airlied@redhat.com>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index dcfe2a0..c7d64a7 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -138,12 +138,12 @@
 		return 0;
 	}
 
-	r = radeon_semaphore_create(p->rdev, &p->ib->semaphore);
+	r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
 	if (r) {
 		return r;
 	}
 
-	return radeon_semaphore_sync_rings(p->rdev, p->ib->semaphore,
+	return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
 					   sync_to_ring, p->ring);
 }
 
@@ -161,8 +161,10 @@
 	/* get chunks */
 	INIT_LIST_HEAD(&p->validated);
 	p->idx = 0;
-	p->ib = NULL;
-	p->const_ib = NULL;
+	p->ib.sa_bo = NULL;
+	p->ib.semaphore = NULL;
+	p->const_ib.sa_bo = NULL;
+	p->const_ib.semaphore = NULL;
 	p->chunk_ib_idx = -1;
 	p->chunk_relocs_idx = -1;
 	p->chunk_flags_idx = -1;
@@ -301,10 +303,9 @@
 {
 	unsigned i;
 
-
-	if (!error && parser->ib)
+	if (!error)
 		ttm_eu_fence_buffer_objects(&parser->validated,
-					    parser->ib->fence);
+					    parser->ib.fence);
 	else
 		ttm_eu_backoff_reservation(&parser->validated);
 
@@ -327,9 +328,7 @@
 	kfree(parser->chunks);
 	kfree(parser->chunks_array);
 	radeon_ib_free(parser->rdev, &parser->ib);
-	if (parser->const_ib) {
-		radeon_ib_free(parser->rdev, &parser->const_ib);
-	}
+	radeon_ib_free(parser->rdev, &parser->const_ib);
 }
 
 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -355,7 +354,7 @@
 		DRM_ERROR("Failed to get ib !\n");
 		return r;
 	}
-	parser->ib->length_dw = ib_chunk->length_dw;
+	parser->ib.length_dw = ib_chunk->length_dw;
 	r = radeon_cs_parse(rdev, parser->ring, parser);
 	if (r || parser->parser_error) {
 		DRM_ERROR("Invalid command stream !\n");
@@ -370,8 +369,8 @@
 	if (r) {
 		DRM_ERROR("Failed to synchronize rings !\n");
 	}
-	parser->ib->vm_id = 0;
-	r = radeon_ib_schedule(rdev, parser->ib);
+	parser->ib.vm_id = 0;
+	r = radeon_ib_schedule(rdev, &parser->ib);
 	if (r) {
 		DRM_ERROR("Failed to schedule IB !\n");
 	}
@@ -422,14 +421,14 @@
 			DRM_ERROR("Failed to get const ib !\n");
 			return r;
 		}
-		parser->const_ib->is_const_ib = true;
-		parser->const_ib->length_dw = ib_chunk->length_dw;
+		parser->const_ib.is_const_ib = true;
+		parser->const_ib.length_dw = ib_chunk->length_dw;
 		/* Copy the packet into the IB */
-		if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr,
+		if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
 				       ib_chunk->length_dw * 4)) {
 			return -EFAULT;
 		}
-		r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib);
+		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
 		if (r) {
 			return r;
 		}
@@ -446,13 +445,13 @@
 		DRM_ERROR("Failed to get ib !\n");
 		return r;
 	}
-	parser->ib->length_dw = ib_chunk->length_dw;
+	parser->ib.length_dw = ib_chunk->length_dw;
 	/* Copy the packet into the IB */
-	if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+	if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
 			       ib_chunk->length_dw * 4)) {
 		return -EFAULT;
 	}
-	r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
 	if (r) {
 		return r;
 	}
@@ -473,29 +472,29 @@
 
 	if ((rdev->family >= CHIP_TAHITI) &&
 	    (parser->chunk_const_ib_idx != -1)) {
-		parser->const_ib->vm_id = vm->id;
+		parser->const_ib.vm_id = vm->id;
 		/* ib pool is bind at 0 in virtual address space to gpu_addr is the
 		 * offset inside the pool bo
 		 */
-		parser->const_ib->gpu_addr = parser->const_ib->sa_bo->soffset;
-		r = radeon_ib_schedule(rdev, parser->const_ib);
+		parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
+		r = radeon_ib_schedule(rdev, &parser->const_ib);
 		if (r)
 			goto out;
 	}
 
-	parser->ib->vm_id = vm->id;
+	parser->ib.vm_id = vm->id;
 	/* ib pool is bind at 0 in virtual address space to gpu_addr is the
 	 * offset inside the pool bo
 	 */
-	parser->ib->gpu_addr = parser->ib->sa_bo->soffset;
-	parser->ib->is_const_ib = false;
-	r = radeon_ib_schedule(rdev, parser->ib);
+	parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
+	parser->ib.is_const_ib = false;
+	r = radeon_ib_schedule(rdev, &parser->ib);
 out:
 	if (!r) {
 		if (vm->fence) {
 			radeon_fence_unref(&vm->fence);
 		}
-		vm->fence = radeon_fence_ref(parser->ib->fence);
+		vm->fence = radeon_fence_ref(parser->ib.fence);
 	}
 	mutex_unlock(&fpriv->vm.mutex);
 	return r;
@@ -573,7 +572,7 @@
 				size = PAGE_SIZE;
 		}
 		
-		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
 				       ibc->user_ptr + (i * PAGE_SIZE),
 				       size))
 			return -EFAULT;
@@ -590,7 +589,7 @@
 	bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
 
 	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
-		if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
 				       ibc->user_ptr + (i * PAGE_SIZE),
 				       PAGE_SIZE)) {
 			p->parser_error = -EFAULT;
@@ -606,7 +605,7 @@
 
 	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
 	if (copy1)
-		ibc->kpage[new_page] = p->ib->ptr + (pg_idx * (PAGE_SIZE / 4));
+		ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
 
 	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
 			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
@@ -617,7 +616,7 @@
 
 	/* copy to IB for non single case */
 	if (!copy1)
-		memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+		memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
 
 	ibc->last_copied_page = pg_idx;
 	ibc->kpage_idx[new_page] = pg_idx;