This takes care of the following
1. On page reserve request: set reserved bit to indicate page
is reserved
2. On page unreserve request: reset the reserved bit.
Bug
1444151
Change-Id: I0920af9a8eb538a84c8b3520b383276c7a28d74b
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/377862
(cherry picked from commit
3308000d6a3a8133e00d41ca1532f1b1e2eee0bb)
Reviewed-on: http://git-master/r/405140
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
nvmap_zap_handle(handles[i], offsets[i], sizes[i]);
}
+int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
+ u32 nr, u32 op)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ u32 size = sizes[i] ? sizes[i] : handles[i]->size;
+ u32 offset = sizes[i] ? offsets[i] : 0;
+
+ if (op == NVMAP_PAGES_RESERVE)
+ nvmap_handle_mkreserved(handles[i], offset, size);
+ else
+ nvmap_handle_mkunreserved(handles[i],offset, size);
+ }
+ return 0;
+}
+
NVMAP_CACHE_OP_WB_INV,
};
+enum {
+ NVMAP_PAGES_UNRESERVE = 0,
+ NVMAP_PAGES_RESERVE
+};
+
struct nvmap_create_handle {
union {
__u32 id; /* FromId */