Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/btree_map.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* btree_map.c -- textbook implementation of btree /w preemptive splitting
*/
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include "btree_map.h"
#ifdef GET_NDP_PERFORMENCE
#include <x86intrin.h>
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
#endif
static void setpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ);
return;
}
TOID_DECLARE(struct tree_map_node, BTREE_MAP_TYPE_OFFSET + 1);
#define BTREE_ORDER 8 /* can't be odd */
#define BTREE_MIN ((BTREE_ORDER / 2) - 1) /* min number of keys per node */
struct tree_map_node_item {
uint64_t key;
PMEMoid value;
};
struct tree_map_node {
int n; /* number of occupied slots */
struct tree_map_node_item items[BTREE_ORDER - 1];
TOID(struct tree_map_node) slots[BTREE_ORDER];
};
struct btree_map {
TOID(struct tree_map_node) root;
};
/*
* set_empty_item -- (internal) sets null to the item
*/
static void
set_empty_item(struct tree_map_node_item *item)
{
item->key = 0;
item->value = OID_NULL;
}
/*
* btree_map_create -- allocates a new btree instance
*/
int
btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_clear_node -- (internal) removes all elements from the node
*/
static void
btree_map_clear_node(TOID(struct tree_map_node) node)
{
for (int i = 0; i < D_RO(node)->n; ++i) {
btree_map_clear_node(D_RO(node)->slots[i]);
}
TX_FREE(node);
}
/*
* btree_map_clear -- removes all elements from the map
*/
int
btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear_node(D_RO(map)->root);
TX_ADD_FIELD(map, root);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_destroy -- cleanups and frees btree instance
*/
int
btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_insert_item_at -- (internal) inserts an item at position
*/
static void
btree_map_insert_item_at(TOID(struct tree_map_node) node, int pos,
struct tree_map_node_item item)
{
D_RW(node)->items[pos] = item;
D_RW(node)->n += 1;
}
/*
* btree_map_insert_empty -- (internal) inserts an item into an empty node
*/
static void
btree_map_insert_empty(TOID(struct btree_map) map,
struct tree_map_node_item item)
{
TX_ADD_FIELD(map, root);
D_RW(map)->root = TX_ZNEW(struct tree_map_node);
btree_map_insert_item_at(D_RO(map)->root, 0, item);
}
/*
* btree_map_insert_node -- (internal) inserts and makes space for new node
*/
static void
btree_map_insert_node(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item,
TOID(struct tree_map_node) left, TOID(struct tree_map_node) right)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) { /* move all existing data */
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
memmove(&D_RW(node)->slots[p + 1], &D_RW(node)->slots[p],
sizeof(TOID(struct tree_map_node)) * ((BTREE_ORDER - 1 - p)));
}
D_RW(node)->slots[p] = left;
D_RW(node)->slots[p + 1] = right;
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_create_split_node -- (internal) splits a node into two
*/
static TOID(struct tree_map_node)
btree_map_create_split_node(TOID(struct tree_map_node) node,
struct tree_map_node_item *m)
{
TOID(struct tree_map_node) right = TX_ZNEW(struct tree_map_node);
int c = (BTREE_ORDER / 2);
*m = D_RO(node)->items[c - 1]; /* select median item */
TX_ADD(node);
set_empty_item(&D_RW(node)->items[c - 1]);
/* move everything right side of median to the new node */
for (int i = c; i < BTREE_ORDER; ++i) {
if (i != BTREE_ORDER - 1) {
D_RW(right)->items[D_RW(right)->n++] =
D_RO(node)->items[i];
set_empty_item(&D_RW(node)->items[i]);
}
D_RW(right)->slots[i - c] = D_RO(node)->slots[i];
D_RW(node)->slots[i] = TOID_NULL(struct tree_map_node);
}
D_RW(node)->n = c - 1;
return right;
}
/*
* btree_map_find_dest_node -- (internal) finds a place to insert the new key at
*/
static TOID(struct tree_map_node)
btree_map_find_dest_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) parent,
uint64_t key, int *p)
{
if (D_RO(n)->n == BTREE_ORDER - 1) { /* node is full, perform a split */
struct tree_map_node_item m;
TOID(struct tree_map_node) right =
btree_map_create_split_node(n, &m);
if (!TOID_IS_NULL(parent)) {
btree_map_insert_node(parent, *p, m, n, right);
if (key > m.key) /* select node to continue search */
n = right;
} else { /* replacing root node, the tree grows in height */
TOID(struct tree_map_node) up =
TX_ZNEW(struct tree_map_node);
D_RW(up)->n = 1;
D_RW(up)->items[0] = m;
D_RW(up)->slots[0] = n;
D_RW(up)->slots[1] = right;
TX_ADD_FIELD(map, root);
D_RW(map)->root = up;
n = up;
}
}
int i;
for (i = 0; i < BTREE_ORDER - 1; ++i) {
*p = i;
/*
* The key either fits somewhere in the middle or at the
* right edge of the node.
*/
if (D_RO(n)->n == i || D_RO(n)->items[i].key > key) {
return TOID_IS_NULL(D_RO(n)->slots[i]) ? n :
btree_map_find_dest_node(map,
D_RO(n)->slots[i], n, key, p);
}
}
/*
* The key is bigger than the last node element, go one level deeper
* in the rightmost child.
*/
return btree_map_find_dest_node(map, D_RO(n)->slots[i], n, key, p);
}
/*
* btree_map_insert_item -- (internal) inserts and makes space for new item
*/
static void
btree_map_insert_item(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) {
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
}
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_is_empty -- checks whether the tree map is empty
*/
int
btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(D_RO(map)->root) || D_RO(D_RO(map)->root)->n == 0;
}
/*
* btree_map_insert -- inserts a new key-value pair into the map
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
#endif
int
btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value)
{
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
struct tree_map_node_item item = {key, value};
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
if (btree_map_is_empty(pop, map)) {
btree_map_insert_empty(map, item);
} else {
int p; /* position at the dest node to insert */
TOID(struct tree_map_node) parent =
TOID_NULL(struct tree_map_node);
TOID(struct tree_map_node) dest =
btree_map_find_dest_node(map, D_RW(map)->root,
parent, key, &p);
btree_map_insert_item(dest, p, item);
}
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return 0;
}
/*
* btree_map_rotate_right -- (internal) takes one element from right sibling
*/
static void
btree_map_rotate_right(TOID(struct tree_map_node) rsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p];
btree_map_insert_item(node, D_RO(node)->n, sep);
/* the first element of the right sibling is the new separator */
TX_ADD_FIELD(parent, items[p]);
D_RW(parent)->items[p] = D_RO(rsb)->items[0];
/* the nodes are not necessarily leafs, so copy also the slot */
TX_ADD_FIELD(node, slots[D_RO(node)->n]);
D_RW(node)->slots[D_RO(node)->n] = D_RO(rsb)->slots[0];
TX_ADD(rsb);
D_RW(rsb)->n -= 1; /* it loses one element, but still > min */
/* move all existing elements back by one array slot */
memmove(D_RW(rsb)->items, D_RO(rsb)->items + 1,
sizeof(struct tree_map_node_item) * (D_RO(rsb)->n));
memmove(D_RW(rsb)->slots, D_RO(rsb)->slots + 1,
sizeof(TOID(struct tree_map_node)) * (D_RO(rsb)->n + 1));
}
/*
* btree_map_rotate_left -- (internal) takes one element from left sibling
*/
static void
btree_map_rotate_left(TOID(struct tree_map_node) lsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p - 1];
btree_map_insert_item(node, 0, sep);
/* the last element of the left sibling is the new separator */
TX_ADD_FIELD(parent, items[p - 1]);
D_RW(parent)->items[p - 1] = D_RO(lsb)->items[D_RO(lsb)->n - 1];
/* rotate the node children */
memmove(D_RW(node)->slots + 1, D_RO(node)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(node)->n));
/* the nodes are not necessarily leafs, so copy also the slot */
D_RW(node)->slots[0] = D_RO(lsb)->slots[D_RO(lsb)->n];
TX_ADD_FIELD(lsb, n);
D_RW(lsb)->n -= 1; /* it loses one element, but still > min */
}
/*
* btree_map_merge -- (internal) merges node and right sibling
*/
static void
btree_map_merge(TOID(struct btree_map) map, TOID(struct tree_map_node) rn,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
struct tree_map_node_item sep = D_RO(parent)->items[p];
TX_ADD(node);
/* add separator to the deficient node */
D_RW(node)->items[D_RW(node)->n++] = sep;
/* copy right sibling data to node */
memcpy(&D_RW(node)->items[D_RO(node)->n], D_RO(rn)->items,
sizeof(struct tree_map_node_item) * D_RO(rn)->n);
memcpy(&D_RW(node)->slots[D_RO(node)->n], D_RO(rn)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(rn)->n + 1));
D_RW(node)->n += D_RO(rn)->n;
TX_FREE(rn); /* right node is now empty */
TX_ADD(parent);
D_RW(parent)->n -= 1;
/* move everything to the right of the separator by one array slot */
memmove(D_RW(parent)->items + p, D_RW(parent)->items + p + 1,
sizeof(struct tree_map_node_item) * (D_RO(parent)->n - p));
memmove(D_RW(parent)->slots + p + 1, D_RW(parent)->slots + p + 2,
sizeof(TOID(struct tree_map_node)) * (D_RO(parent)->n - p + 1));
/* if the parent is empty then the tree shrinks in height */
if (D_RO(parent)->n == 0 && TOID_EQUALS(parent, D_RO(map)->root)) {
TX_ADD(map);
TX_FREE(D_RO(map)->root);
D_RW(map)->root = node;
}
}
/*
* btree_map_rebalance -- (internal) performs tree rebalance
*/
static void
btree_map_rebalance(TOID(struct btree_map) map, TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
TOID(struct tree_map_node) rsb = p >= D_RO(parent)->n ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p + 1];
TOID(struct tree_map_node) lsb = p == 0 ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p - 1];
if (!TOID_IS_NULL(rsb) && D_RO(rsb)->n > BTREE_MIN)
btree_map_rotate_right(rsb, node, parent, p);
else if (!TOID_IS_NULL(lsb) && D_RO(lsb)->n > BTREE_MIN)
btree_map_rotate_left(lsb, node, parent, p);
else if (TOID_IS_NULL(rsb)) /* always merge with rightmost node */
btree_map_merge(map, node, lsb, parent, p - 1);
else
btree_map_merge(map, rsb, node, parent, p);
}
/*
* btree_map_get_leftmost_leaf -- (internal) searches for the successor
*/
static TOID(struct tree_map_node)
btree_map_get_leftmost_leaf(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) *p)
{
if (TOID_IS_NULL(D_RO(n)->slots[0]))
return n;
*p = n;
return btree_map_get_leftmost_leaf(map, D_RO(n)->slots[0], p);
}
/*
* btree_map_remove_from_node -- (internal) removes element from node
*/
static void
btree_map_remove_from_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
if (TOID_IS_NULL(D_RO(node)->slots[0])) { /* leaf */
TX_ADD(node);
if (D_RO(node)->n == 1 || p == BTREE_ORDER - 2) {
set_empty_item(&D_RW(node)->items[p]);
} else if (D_RO(node)->n != 1) {
memmove(&D_RW(node)->items[p],
&D_RW(node)->items[p + 1],
sizeof(struct tree_map_node_item) *
(D_RO(node)->n - p));
}
D_RW(node)->n -= 1;
return;
}
/* can't delete from non-leaf nodes, remove successor */
TOID(struct tree_map_node) rchild = D_RW(node)->slots[p + 1];
TOID(struct tree_map_node) lp = node;
TOID(struct tree_map_node) lm =
btree_map_get_leftmost_leaf(map, rchild, &lp);
TX_ADD_FIELD(node, items[p]);
D_RW(node)->items[p] = D_RO(lm)->items[0];
btree_map_remove_from_node(map, lm, lp, 0);
if (D_RO(lm)->n < BTREE_MIN) /* right child can be deficient now */
btree_map_rebalance(map, lm, lp,
TOID_EQUALS(lp, node) ? p + 1 : 0);
}
#define NODE_CONTAINS_ITEM(_n, _i, _k)\
((_i) != D_RO(_n)->n && D_RO(_n)->items[_i].key == (_k))
#define NODE_CHILD_CAN_CONTAIN_ITEM(_n, _i, _k)\
((_i) == D_RO(_n)->n || D_RO(_n)->items[_i].key > (_k)) &&\
!TOID_IS_NULL(D_RO(_n)->slots[_i])
/*
* btree_map_remove_item -- (internal) removes item from node
*/
static PMEMoid
btree_map_remove_item(TOID(struct btree_map) map,
TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent,
uint64_t key, int p)
{
PMEMoid ret = OID_NULL;
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key)) {
ret = D_RO(node)->items[i].value;
btree_map_remove_from_node(map, node, parent, i);
break;
} else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key)) {
ret = btree_map_remove_item(map, D_RO(node)->slots[i],
node, key, i);
break;
}
}
/* check for deficient nodes walking up */
if (!TOID_IS_NULL(parent) && D_RO(node)->n < BTREE_MIN)
btree_map_rebalance(map, node, parent, p);
return ret;
}
/*
* btree_map_remove -- removes key-value pair from the map
*/
PMEMoid
btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
ret = btree_map_remove_item(map, D_RW(map)->root,
TOID_NULL(struct tree_map_node), key, 0);
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* btree_map_get_in_node -- (internal) searches for a value in the node
*/
static PMEMoid
btree_map_get_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return D_RO(node)->items[i].value;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_get_in_node(D_RO(node)->slots[i], key);
}
return OID_NULL;
}
/*
* btree_map_get -- searches for a value of the key
*/
PMEMoid
btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return OID_NULL;
return btree_map_get_in_node(D_RO(map)->root, key);
}
/*
* btree_map_lookup_in_node -- (internal) searches for key if exists
*/
static int
btree_map_lookup_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return 1;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_lookup_in_node(
D_RO(node)->slots[i], key);
}
return 0;
}
/*
* btree_map_lookup -- searches if key exists
*/
int
btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return 0;
return btree_map_lookup_in_node(D_RO(map)->root, key);
}
/*
* btree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
btree_map_foreach_node(const TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid, void *arg), void *arg)
{
if (TOID_IS_NULL(p))
return 0;
for (int i = 0; i <= D_RO(p)->n; ++i) {
if (btree_map_foreach_node(D_RO(p)->slots[i], cb, arg) != 0)
return 1;
if (i != D_RO(p)->n && D_RO(p)->items[i].key != 0) {
if (cb(D_RO(p)->items[i].key, D_RO(p)->items[i].value,
arg) != 0)
return 1;
}
}
return 0;
}
/*
* btree_map_foreach -- initiates recursive traversal
*/
int
btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return btree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* btree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
btree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_remove_free -- removes and frees an object from the tree
*/
int
btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = btree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 19,016 | 25.158184 | 83 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/rtree_map.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rtree_map.h -- Radix TreeMap collection implementation
*/
#ifndef RTREE_MAP_H
#define RTREE_MAP_H
#include <libpmemobj.h>
#ifndef RTREE_MAP_TYPE_OFFSET
#define RTREE_MAP_TYPE_OFFSET 1020
#endif
struct rtree_map;
TOID_DECLARE(struct rtree_map, RTREE_MAP_TYPE_OFFSET + 0);
int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map);
int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg);
int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map);
int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value);
int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map);
PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg);
int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map);
#endif /* RTREE_MAP_H */
| 1,739 | 36.826087 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/rbtree_map.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* rbtree_map.h -- TreeMap sorted collection implementation
*/
#ifndef RBTREE_MAP_H
#define RBTREE_MAP_H
#include <libpmemobj.h>
#ifndef RBTREE_MAP_TYPE_OFFSET
#define RBTREE_MAP_TYPE_OFFSET 1016
#endif
struct rbtree_map;
TOID_DECLARE(struct rbtree_map, RBTREE_MAP_TYPE_OFFSET + 0);
int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map);
int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map,
void *arg);
int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map);
int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value);
int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map);
PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map);
#endif /* RBTREE_MAP_H */
| 1,557 | 34.409091 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/btree_map.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* btree_map.h -- TreeMap sorted collection implementation
*/
#ifndef BTREE_MAP_H
#define BTREE_MAP_H
#include <libpmemobj.h>
#ifndef BTREE_MAP_TYPE_OFFSET
#define BTREE_MAP_TYPE_OFFSET 1012
#endif
struct btree_map;
TOID_DECLARE(struct btree_map, BTREE_MAP_TYPE_OFFSET + 0);
int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map);
int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg);
int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map);
int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value);
int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map);
PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map);
#endif /* BTREE_MAP_H */
| 1,523 | 34.44186 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/rbtree_map.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* rbtree.c -- red-black tree implementation /w sentinel nodes
*/
#include <assert.h>
#include <errno.h>
#include "rbtree_map.h"
#include <stdio.h>
#include <x86intrin.h>
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
TOID_DECLARE(struct tree_map_node, RBTREE_MAP_TYPE_OFFSET + 1);
#define NODE_P(_n)\
D_RW(_n)->parent
#define NODE_GRANDP(_n)\
NODE_P(NODE_P(_n))
#define NODE_PARENT_AT(_n, _rbc)\
D_RW(NODE_P(_n))->slots[_rbc]
#define NODE_PARENT_RIGHT(_n)\
NODE_PARENT_AT(_n, RB_RIGHT)
#define NODE_IS(_n, _rbc)\
TOID_EQUALS(_n, NODE_PARENT_AT(_n, _rbc))
#define NODE_IS_RIGHT(_n)\
TOID_EQUALS(_n, NODE_PARENT_RIGHT(_n))
#define NODE_LOCATION(_n)\
NODE_IS_RIGHT(_n)
#define RB_FIRST(_m)\
D_RW(D_RW(_m)->root)->slots[RB_LEFT]
#define NODE_IS_NULL(_n)\
TOID_EQUALS(_n, s)
enum rb_color {
COLOR_BLACK,
COLOR_RED,
MAX_COLOR
};
enum rb_children {
RB_LEFT,
RB_RIGHT,
MAX_RB
};
struct tree_map_node {
uint64_t key;
PMEMoid value;
enum rb_color color;
TOID(struct tree_map_node) parent;
TOID(struct tree_map_node) slots[MAX_RB];
};
struct rbtree_map {
TOID(struct tree_map_node) sentinel;
TOID(struct tree_map_node) root;
};
/*
* rbtree_map_create -- allocates a new red-black tree instance
*/
int
rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct rbtree_map);
TOID(struct tree_map_node) s = TX_ZNEW(struct tree_map_node);
D_RW(s)->color = COLOR_BLACK;
D_RW(s)->parent = s;
D_RW(s)->slots[RB_LEFT] = s;
D_RW(s)->slots[RB_RIGHT] = s;
TOID(struct tree_map_node) r = TX_ZNEW(struct tree_map_node);
D_RW(r)->color = COLOR_BLACK;
D_RW(r)->parent = s;
D_RW(r)->slots[RB_LEFT] = s;
D_RW(r)->slots[RB_RIGHT] = s;
D_RW(*map)->sentinel = s;
D_RW(*map)->root = r;
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_clear_node -- (internal) clears this node and its children
*/
static void
rbtree_map_clear_node(TOID(struct rbtree_map) map, TOID(struct tree_map_node) p)
{
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!NODE_IS_NULL(D_RO(p)->slots[RB_LEFT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_LEFT]);
if (!NODE_IS_NULL(D_RO(p)->slots[RB_RIGHT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_RIGHT]);
TX_FREE(p);
}
/*
* rbtree_map_clear -- removes all elements from the map
*/
int
rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
TX_BEGIN(pop) {
rbtree_map_clear_node(map, D_RW(map)->root);
TX_ADD_FIELD(map, root);
TX_ADD_FIELD(map, sentinel);
TX_FREE(D_RW(map)->sentinel);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
D_RW(map)->sentinel = TOID_NULL(struct tree_map_node);
} TX_END
return 0;
}
/*
* rbtree_map_destroy -- cleanups and frees red-black tree instance
*/
int
rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
rbtree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct rbtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_rotate -- (internal) performs a left/right rotation around a node
*/
static void
rbtree_map_rotate(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) node, enum rb_children c)
{
TOID(struct tree_map_node) child = D_RO(node)->slots[!c];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TX_ADD(node);
TX_ADD(child);
D_RW(node)->slots[!c] = D_RO(child)->slots[c];
if (!TOID_EQUALS(D_RO(child)->slots[c], s))
TX_SET(D_RW(child)->slots[c], parent, node);
NODE_P(child) = NODE_P(node);
TX_SET(NODE_P(node), slots[NODE_LOCATION(node)], child);
D_RW(child)->slots[c] = node;
D_RW(node)->parent = child;
}
/*
* rbtree_map_insert_bst -- (internal) inserts a node in regular BST fashion
*/
static void
rbtree_map_insert_bst(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) parent = D_RO(map)->root;
TOID(struct tree_map_node) *dst = &RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
D_RW(n)->slots[RB_LEFT] = s;
D_RW(n)->slots[RB_RIGHT] = s;
while (!NODE_IS_NULL(*dst)) {
parent = *dst;
dst = &D_RW(*dst)->slots[D_RO(n)->key > D_RO(*dst)->key];
}
TX_SET(n, parent, parent);
pmemobj_tx_add_range_direct(dst, sizeof(*dst));
*dst = n;
}
/*
* rbtree_map_recolor -- (internal) restores red-black tree properties
*/
static TOID(struct tree_map_node)
rbtree_map_recolor(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) uncle = D_RO(NODE_GRANDP(n))->slots[!c];
if (D_RO(uncle)->color == COLOR_RED) {
TX_SET(uncle, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
return NODE_GRANDP(n);
} else {
if (NODE_IS(n, !c)) {
n = NODE_P(n);
rbtree_map_rotate(map, n, c);
}
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_GRANDP(n), (enum rb_children)!c);
}
return n;
}
/*
* rbtree_map_insert -- inserts a new key-value pair into the map
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
uint64_t resetCycles;
#endif
int
rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value)
{
int ret = 0;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
TOID(struct tree_map_node) n = TX_ZNEW(struct tree_map_node);
D_RW(n)->key = key;
D_RW(n)->value = value;
rbtree_map_insert_bst(map, n);
D_RW(n)->color = COLOR_RED;
while (D_RO(NODE_P(n))->color == COLOR_RED)
n = rbtree_map_recolor(map, n, (enum rb_children)
NODE_LOCATION(NODE_P(n)));
TX_SET(RB_FIRST(map), color, COLOR_BLACK);
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* rbtree_map_successor -- (internal) returns the successor of a node
*/
static TOID(struct tree_map_node)
rbtree_map_successor(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) dst = D_RO(n)->slots[RB_RIGHT];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!TOID_EQUALS(s, dst)) {
while (!NODE_IS_NULL(D_RO(dst)->slots[RB_LEFT]))
dst = D_RO(dst)->slots[RB_LEFT];
} else {
dst = D_RO(n)->parent;
while (TOID_EQUALS(n, D_RO(dst)->slots[RB_RIGHT])) {
n = dst;
dst = NODE_P(dst);
}
if (TOID_EQUALS(dst, D_RO(map)->root))
return s;
}
return dst;
}
/*
* rbtree_map_find_node -- (internal) returns the node that contains the key
*/
static TOID(struct tree_map_node)
rbtree_map_find_node(TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) dst = RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
while (!NODE_IS_NULL(dst)) {
if (D_RO(dst)->key == key)
return dst;
dst = D_RO(dst)->slots[key > D_RO(dst)->key];
}
return TOID_NULL(struct tree_map_node);
}
/*
* rbtree_map_repair_branch -- (internal) restores red-black tree in one branch
*/
static TOID(struct tree_map_node)
rbtree_map_repair_branch(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) sb = NODE_PARENT_AT(n, !c); /* sibling */
if (D_RO(sb)->color == COLOR_RED) {
TX_SET(sb, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_P(n), c);
sb = NODE_PARENT_AT(n, !c);
}
if (D_RO(D_RO(sb)->slots[RB_RIGHT])->color == COLOR_BLACK &&
D_RO(D_RO(sb)->slots[RB_LEFT])->color == COLOR_BLACK) {
TX_SET(sb, color, COLOR_RED);
return D_RO(n)->parent;
} else {
if (D_RO(D_RO(sb)->slots[!c])->color == COLOR_BLACK) {
TX_SET(D_RW(sb)->slots[c], color, COLOR_BLACK);
TX_SET(sb, color, COLOR_RED);
rbtree_map_rotate(map, sb, (enum rb_children)!c);
sb = NODE_PARENT_AT(n, !c);
}
TX_SET(sb, color, D_RO(NODE_P(n))->color);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(D_RW(sb)->slots[!c], color, COLOR_BLACK);
rbtree_map_rotate(map, NODE_P(n), c);
return RB_FIRST(map);
}
return n;
}
/*
* rbtree_map_repair -- (internal) restores red-black tree properties
* after remove
*/
static void
rbtree_map_repair(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
/* if left, repair right sibling, otherwise repair left sibling. */
while (!TOID_EQUALS(n, RB_FIRST(map)) && D_RO(n)->color == COLOR_BLACK)
n = rbtree_map_repair_branch(map, n, (enum rb_children)
NODE_LOCATION(n));
TX_SET(n, color, COLOR_BLACK);
}
/*
* rbtree_map_remove -- removes key-value pair from the map
*/
PMEMoid
rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
TOID(struct tree_map_node) n = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(n))
return ret;
ret = D_RO(n)->value;
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TOID(struct tree_map_node) r = D_RO(map)->root;
TOID(struct tree_map_node) y = (NODE_IS_NULL(D_RO(n)->slots[RB_LEFT]) ||
NODE_IS_NULL(D_RO(n)->slots[RB_RIGHT]))
? n : rbtree_map_successor(map, n);
TOID(struct tree_map_node) x = NODE_IS_NULL(D_RO(y)->slots[RB_LEFT]) ?
D_RO(y)->slots[RB_RIGHT] : D_RO(y)->slots[RB_LEFT];
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
TX_SET(x, parent, NODE_P(y));
if (TOID_EQUALS(NODE_P(x), r)) {
TX_SET(r, slots[RB_LEFT], x);
} else {
TX_SET(NODE_P(y), slots[NODE_LOCATION(y)], x);
}
if (D_RO(y)->color == COLOR_BLACK)
rbtree_map_repair(map, x);
if (!TOID_EQUALS(y, n)) {
TX_ADD(y);
D_RW(y)->slots[RB_LEFT] = D_RO(n)->slots[RB_LEFT];
D_RW(y)->slots[RB_RIGHT] = D_RO(n)->slots[RB_RIGHT];
D_RW(y)->parent = D_RO(n)->parent;
D_RW(y)->color = D_RO(n)->color;
TX_SET(D_RW(n)->slots[RB_LEFT], parent, y);
TX_SET(D_RW(n)->slots[RB_RIGHT], parent, y);
TX_SET(NODE_P(n), slots[NODE_LOCATION(n)], y);
}
TX_FREE(n);
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* rbtree_map_get -- searches for a value of the key
*/
PMEMoid
rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return OID_NULL;
return D_RO(node)->value;
}
/*
* rbtree_map_lookup -- searches if key exists
*/
int
rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return 0;
return 1;
}
/*
* rbtree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
rbtree_map_foreach_node(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
int ret = 0;
if (TOID_EQUALS(p, D_RO(map)->sentinel))
return 0;
if ((ret = rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_LEFT], cb, arg)) == 0) {
if ((ret = cb(D_RO(p)->key, D_RO(p)->value, arg)) == 0)
rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_RIGHT], cb, arg);
}
return ret;
}
/*
* rbtree_map_foreach -- initiates recursive traversal
*/
int
rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return rbtree_map_foreach_node(map, RB_FIRST(map), cb, arg);
}
/*
* rbtree_map_is_empty -- checks whether the tree map is empty
*/
int
rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(RB_FIRST(map));
}
/*
* rbtree_map_check -- check if given persistent object is a tree map
*/
int
rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* rbtree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
rbtree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_remove_free -- removes and frees an object from the tree
*/
int
rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = rbtree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 14,171 | 23.102041 | 83 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj++/README.md
|
This folder contained examples for libpmemobj C++ bindings.
They have been moved to https://github.com/pmem/libpmemobj-cpp/tree/master/examples
| 144 | 47.333333 | 83 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmempool/manpage.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* manpage.c -- simple example for the libpmempool man page
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <libpmempool.h>
#define PATH "./pmem-fs/myfile"
#define CHECK_FLAGS (PMEMPOOL_CHECK_FORMAT_STR|PMEMPOOL_CHECK_REPAIR|\
PMEMPOOL_CHECK_VERBOSE)
int
main(int argc, char *argv[])
{
PMEMpoolcheck *ppc;
struct pmempool_check_status *status;
enum pmempool_check_result ret;
/* arguments for check */
struct pmempool_check_args args = {
.path = PATH,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
.flags = CHECK_FLAGS
};
/* initialize check context */
if ((ppc = pmempool_check_init(&args, sizeof(args))) == NULL) {
perror("pmempool_check_init");
exit(EXIT_FAILURE);
}
/* perform check and repair, answer 'yes' for each question */
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
printf("%s\n", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
/* finalize the check and get the result */
ret = pmempool_check_end(ppc);
switch (ret) {
case PMEMPOOL_CHECK_RESULT_CONSISTENT:
case PMEMPOOL_CHECK_RESULT_REPAIRED:
return 0;
default:
return 1;
}
}
| 1,555 | 21.882353 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem/simple_copy.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* simple_copy.c -- show how to use pmem_memcpy_persist()
*
* usage: simple_copy src-file dst-file
*
* Reads 4k from src-file and writes it to dst-file.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* just copying 4k to pmem for this example */
#define BUF_LEN 4096
int
main(int argc, char *argv[])
{
int srcfd;
char buf[BUF_LEN];
char *pmemaddr;
size_t mapped_len;
int is_pmem;
int cc;
if (argc != 3) {
fprintf(stderr, "usage: %s src-file dst-file\n", argv[0]);
exit(1);
}
/* open src-file */
if ((srcfd = open(argv[1], O_RDONLY)) < 0) {
perror(argv[1]);
exit(1);
}
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(argv[2], BUF_LEN,
PMEM_FILE_CREATE|PMEM_FILE_EXCL,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* read up to BUF_LEN from srcfd */
if ((cc = read(srcfd, buf, BUF_LEN)) < 0) {
pmem_unmap(pmemaddr, mapped_len);
perror("read");
exit(1);
}
/* write it to the pmem */
if (is_pmem) {
pmem_memcpy_persist(pmemaddr, buf, cc);
} else {
memcpy(pmemaddr, buf, cc);
pmem_msync(pmemaddr, cc);
}
close(srcfd);
pmem_unmap(pmemaddr, mapped_len);
exit(0);
}
| 1,460 | 17.730769 | 60 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem/full_copy.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* full_copy.c -- show how to use pmem_memcpy_nodrain()
*
* usage: full_copy src-file dst-file
*
* Copies src-file to dst-file in 4k chunks.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* copying 4k at a time to pmem for this example */
#define BUF_LEN 4096
/*
* do_copy_to_pmem -- copy to pmem, postponing drain step until the end
*/
static void
do_copy_to_pmem(char *pmemaddr, int srcfd, off_t len)
{
char buf[BUF_LEN];
int cc;
/* copy the file, saving the last flush step to the end */
while ((cc = read(srcfd, buf, BUF_LEN)) > 0) {
pmem_memcpy_nodrain(pmemaddr, buf, cc);
pmemaddr += cc;
}
if (cc < 0) {
perror("read");
exit(1);
}
/* perform final flush step */
pmem_drain();
}
/*
* do_copy_to_non_pmem -- copy to a non-pmem memory mapped file
*/
static void
do_copy_to_non_pmem(char *addr, int srcfd, off_t len)
{
char *startaddr = addr;
char buf[BUF_LEN];
int cc;
/* copy the file, saving the last flush step to the end */
while ((cc = read(srcfd, buf, BUF_LEN)) > 0) {
memcpy(addr, buf, cc);
addr += cc;
}
if (cc < 0) {
perror("read");
exit(1);
}
/* flush it */
if (pmem_msync(startaddr, len) < 0) {
perror("pmem_msync");
exit(1);
}
}
int
main(int argc, char *argv[])
{
int srcfd;
struct stat stbuf;
char *pmemaddr;
size_t mapped_len;
int is_pmem;
if (argc != 3) {
fprintf(stderr, "usage: %s src-file dst-file\n", argv[0]);
exit(1);
}
/* open src-file */
if ((srcfd = open(argv[1], O_RDONLY)) < 0) {
perror(argv[1]);
exit(1);
}
/* find the size of the src-file */
if (fstat(srcfd, &stbuf) < 0) {
perror("fstat");
exit(1);
}
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(argv[2], stbuf.st_size,
PMEM_FILE_CREATE|PMEM_FILE_EXCL,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* determine if range is true pmem, call appropriate copy routine */
if (is_pmem)
do_copy_to_pmem(pmemaddr, srcfd, stbuf.st_size);
else
do_copy_to_non_pmem(pmemaddr, srcfd, stbuf.st_size);
close(srcfd);
pmem_unmap(pmemaddr, mapped_len);
exit(0);
}
| 2,379 | 17.888889 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem/manpage.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* manpage.c -- simple example for the libpmem man page
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* using 4k of pmem for this example */
#define PMEM_LEN 4096
#define PATH "/pmem-fs/myfile"
int
main(int argc, char *argv[])
{
char *pmemaddr;
size_t mapped_len;
int is_pmem;
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(PATH, PMEM_LEN, PMEM_FILE_CREATE,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* store a string to the persistent memory */
strcpy(pmemaddr, "hello, persistent memory");
/* flush above strcpy to persistence */
if (is_pmem)
pmem_persist(pmemaddr, mapped_len);
else
pmem_msync(pmemaddr, mapped_len);
/*
* Delete the mappings. The region is also
* automatically unmapped when the process is
* terminated.
*/
pmem_unmap(pmemaddr, mapped_len);
return 0;
}
| 1,152 | 18.87931 | 64 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem2/redo/redo.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* redo.c -- Implementation of simple persistent memory located redo log.
* This redo log is used to implement a doubly linked list.
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <libpmem2.h>
#if defined(__x86_64) || defined(_M_X64) || defined(__aarch64__)
#define CACHELINE 64ULL
#elif defined(__PPC64__)
#define CACHELINE 128ULL
#else
#error unable to recognize architecture at compile time
#endif
#define REDO_NENTRIES 1000
#define LIST_ENTRY_NONE UINT64_MAX
#define REDO_ENTRIES_IN_CL (CACHELINE / sizeof(struct redo_log_entry))
#define POOL_SIZE_MIN (sizeof(struct pool_hdr) + sizeof(struct node) * 100)
#define offset(pool, addr) ((uintptr_t)(addr) - (uintptr_t)(&(pool)->hdr.redo))
static pmem2_persist_fn Persist;
static pmem2_flush_fn Flush;
static pmem2_drain_fn Drain;
static pmem2_memset_fn Memset;
struct pool_layout;
struct redo_log_entry {
uint64_t offset;
uint64_t data;
};
struct redo_log {
struct redo_state {
uint64_t last;
uint8_t apply;
uint8_t unused[CACHELINE - sizeof(uint64_t) - sizeof(uint8_t)];
} state;
struct redo_log_entry entries[REDO_ENTRIES_IN_CL];
};
/*
* redo_apply -- process and apply the redo log
*/
static void
redo_apply(struct redo_log *redo)
{
if (!redo->state.apply) {
/*
* Redo log not committed.
* Just reset any potential leftovers.
*/
goto out;
}
uint8_t *start = (uint8_t *)redo;
for (uint64_t i = 0; i < redo->state.last; ++i) {
uint64_t *node = (uint64_t *)(start + redo->entries[i].offset);
*node = redo->entries[i].data;
Flush(node, sizeof(*node));
}
Drain();
out:
/*
* reset 'apply' and 'last' fields - if memset will be interrupted
* it will be applied anyway as redo_apply() is called on each restart
*/
Memset(redo, 0, sizeof(struct redo_state), PMEM2_F_MEM_WC);
}
/*
* redo_add -- add an entry to redo log
*/
static void
redo_add(struct redo_log *redo, uintptr_t offset, uint64_t data)
{
assert(redo->state.apply == 0);
assert(redo->state.last < REDO_ENTRIES_IN_CL);
struct redo_log_entry *entry = &redo->entries[redo->state.last++];
entry->offset = (uintptr_t)offset;
entry->data = data;
/* we will flush redo log at once when it's ready */
}
/*
* redo_commit -- commit redo log
*/
static void
redo_commit(struct redo_log *redo)
{
if (redo->state.last == 0)
return;
/* Persist entire redo log */
Persist(&redo, sizeof(redo->state) +
sizeof(*redo->entries) * redo->state.last);
redo->state.apply = 1;
Persist(&redo->state.apply, sizeof(redo->state.apply));
}
struct node {
uint64_t id;
uint64_t prev;
uint64_t next;
uint64_t key;
uint64_t value;
};
struct pool_layout {
struct pool_hdr {
struct redo_log redo;
uint64_t list_head;
uint64_t list_nentries;
uint64_t used_entries;
} hdr;
struct node nodes[];
};
/*
* list_alloc_node -- alloc and initialize new node
*/
static struct node *
list_alloc_node(struct pool_layout *pool, uint64_t key, uint64_t value)
{
struct node *node = &pool->nodes[pool->hdr.used_entries];
/*
* Until used_entries is not updated allocated node is
* not persistent so we can update it without using redo log
*/
node->next = LIST_ENTRY_NONE;
node->prev = LIST_ENTRY_NONE;
node->key = key;
node->value = value;
node->id = pool->hdr.used_entries;
redo_add(&pool->hdr.redo, offset(pool, &pool->hdr.used_entries),
pool->hdr.used_entries + 1);
return node;
}
/*
* list_add -- add a new node to the list
*/
static int
list_add(struct pool_layout *pool, uint64_t key, uint64_t value)
{
struct node *node;
struct node *next = NULL;
struct node *prev = NULL;
if (pool->hdr.list_nentries < pool->hdr.used_entries) {
fprintf(stderr, "pool is full\n");
return 1;
}
node = list_alloc_node(pool, key, value);
/*
* Find the appropriate location where
* an allocated node is to be inserted.
*/
if (pool->hdr.used_entries != 0) {
next = &pool->nodes[pool->hdr.list_head];
while (next->key < key) {
prev = next;
if (next->next == LIST_ENTRY_NONE) {
next = NULL;
break;
}
next = &pool->nodes[next->next];
}
}
struct redo_log *redo = &pool->hdr.redo;
if (next != NULL) {
node->next = next->id;
redo_add(redo, offset(pool, &next->prev), node->id);
}
if (prev != NULL)
node->prev = prev->id;
uint64_t next_offset =
offset(pool, prev ? &prev->next: &pool->hdr.list_head);
redo_add(redo, next_offset, node->id);
Flush(node, sizeof(*node));
redo_commit(redo);
redo_apply(redo);
return 0;
}
/*
* list_print -- dump content of a list
*/
static void
list_print(struct pool_layout *pool)
{
if (pool->hdr.used_entries == 0)
return;
struct node *node = &pool->nodes[pool->hdr.list_head];
printf("%" PRIu64 " = %" PRIu64 "\n", node->key, node->value);
while (node->next != LIST_ENTRY_NONE) {
node = &pool->nodes[node->next];
printf("%" PRIu64 " = %" PRIu64 "\n", node->key, node->value);
}
}
/*
* print_id -- prints id of the the node
*/
static void inline
print_id(uint64_t id)
{
if (id != LIST_ENTRY_NONE)
printf("%" PRIu64, id);
else
printf("NULL");
}
/*
* list_dump -- dumps all allocated nodes
*/
static void
list_dump(struct pool_layout *pool)
{
printf("allocated entries: %" PRIu64 "\n", pool->hdr.used_entries);
for (uint64_t i = 0; i < pool->hdr.used_entries; i++) {
struct node *node = &pool->nodes[i];
print_id(node->prev);
printf("<---%" PRIu64 "--->", node->id);
print_id(node->next);
printf("\t\t\tkey=%" PRIu64 " value=%" PRIu64 "\n",
node->key, node->value);
}
}
/*
* list_check -- check consistency of a list
*/
static int
list_check(struct pool_layout *pool)
{
char *c = NULL;
if (pool->hdr.used_entries == 0)
return 0;
if (pool->hdr.list_head >= pool->hdr.used_entries) {
goto failed; /* first list entry is not allocated */
}
struct node *node = &pool->nodes[pool->hdr.list_head];
if (node->prev != LIST_ENTRY_NONE) {
goto failed; /* first list entry has previous node */
}
c = malloc(pool->hdr.used_entries);
if (c == NULL) {
perror("malloc");
return 1;
}
memset(c, 0, pool->hdr.used_entries);
for (; node->next != LIST_ENTRY_NONE &&
node->next < pool->hdr.used_entries;
node = &pool->nodes[node->next]) {
c[node->id] = 1;
}
c[node->id] = 1;
for (uint64_t i = 0; i < pool->hdr.used_entries; i++) {
if (!c[i])
goto failed; /* allocated node is not on the list */
}
if (node->next != LIST_ENTRY_NONE)
goto failed; /* last list entry has next node */
free(c);
return 0;
failed:
free(c);
list_dump(pool);
fprintf(stderr,
"consistency check failed\n");
return 1;
}
/*
* pool_map -- create pmem2_map for a given file descriptor
*/
static struct pmem2_map *
pool_map(int fd, int map_private)
{
struct pmem2_config *cfg;
struct pmem2_map *map = NULL;
struct pmem2_source *src;
if (pmem2_config_new(&cfg)) {
pmem2_perror("pmem2_config_new");
goto err_cfg_new;
}
if (map_private && pmem2_config_set_sharing(cfg, PMEM2_PRIVATE)) {
pmem2_perror("pmem2_config_set_sharing");
goto err_cfg_set;
}
if (pmem2_config_set_required_store_granularity(cfg,
PMEM2_GRANULARITY_PAGE)) {
pmem2_perror("pmem2_config_set_required_store_granularity");
goto err_cfg_set;
}
if (pmem2_source_from_fd(&src, fd)) {
pmem2_perror("pmem2_source_from_fd");
goto err_src_new;
}
if (pmem2_map(cfg, src, &map)) {
pmem2_perror("pmem2_map");
goto err_map;
}
err_map:
pmem2_source_delete(&src);
err_src_new:
err_cfg_set:
pmem2_config_delete(&cfg);
err_cfg_new:
return map;
}
/*
* parse_uint64 -- parse uint64_t string
*/
static uint64_t
parse_uint64(const char *str)
{
char *end;
errno = 0;
uint64_t value = strtoull(str, &end, 0);
if (errno == ERANGE || *end != '\0') {
fprintf(stderr, "invalid argument %s\n", str);
exit(1);
}
return value;
}
/*
* print_help -- print help to the stderr
*/
static void
print_help(char *name)
{
fprintf(stderr, "usage: %s add pool key value\n", name);
fprintf(stderr, " %s print pool\n", name);
fprintf(stderr, " %s check pool\n", name);
fprintf(stderr, " %s dump pool\n", name);
}
int
main(int argc, char *argv[])
{
int fd;
int ret = 0;
if (argc < 3) {
print_help(argv[0]);
return 1;
}
const char *path = argv[2];
const char *cmd = argv[1];
uint64_t key = 0, value = 0;
int map_private = 0;
if (strcmp(cmd, "add") == 0) {
if ((argc - 3) % 2) {
print_help(argv[0]);
return 1;
}
} else {
map_private = 1;
if (argc != 3) {
print_help(argv[0]);
return 1;
}
}
fd = open(path, O_RDWR);
if (fd < 0) {
perror("open");
return 1;
}
struct pmem2_map *map = pool_map(fd, map_private);
if (map == NULL) {
ret = 1;
goto err_map;
}
size_t size = pmem2_map_get_size(map);
if (size < POOL_SIZE_MIN) {
fprintf(stderr,
"pool size(%" PRIu64") smaller than minimum size(%"
PRIu64 ")",
size, POOL_SIZE_MIN);
ret = 1;
goto out;
}
Persist = pmem2_get_persist_fn(map);
Flush = pmem2_get_flush_fn(map);
Drain = pmem2_get_drain_fn(map);
Memset = pmem2_get_memset_fn(map);
struct pool_layout *pool = pmem2_map_get_address(map);
redo_apply(&(pool->hdr.redo));
pool->hdr.list_nentries =
(size - sizeof(struct pool_hdr)) / sizeof(struct node);
Persist(&pool->hdr.list_nentries, sizeof(pool->hdr.list_nentries));
if (strcmp(cmd, "add") == 0) {
for (int i = 3; i < argc; i += 2) {
key = parse_uint64(argv[i]);
value = parse_uint64(argv[i + 1]);
ret = list_add(pool, key, value);
if (ret)
goto out;
}
} else if (strcmp(cmd, "print") == 0) {
list_print(pool);
} else if (strcmp(cmd, "check") == 0) {
ret = list_check(pool);
} else if (strcmp(cmd, "dump") == 0) {
list_dump(pool);
} else {
fprintf(stderr, "invalid command %s\n", cmd);
print_help(argv[0]);
ret = 1;
}
out:
pmem2_unmap(&map);
err_map:
close(fd);
return ret;
}
| 10,058 | 19.783058 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem2/map_multiple_files/map_multiple_files.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* map_multiple_files.c -- implementation of virtual address allocation example
*/
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <libpmem2.h>
/*
* file_dsc - a structure that keeps information about file mapping
*/
struct file_dsc {
int fd;
size_t size;
struct pmem2_source *src;
struct pmem2_map *map;
};
/*
* file_dsc_init - initialize file_dsc structure values
*/
static int
file_dsc_init(struct file_dsc *fdsc, char *path)
{
if ((fdsc->fd = open(path, O_RDWR)) < 0) {
perror("open");
goto fail;
}
if (pmem2_source_from_fd(&fdsc->src, fdsc->fd)) {
pmem2_perror("pmem2_source_from_fd");
goto file_close;
}
if (pmem2_source_size(fdsc->src, &fdsc->size)) {
pmem2_perror("pmem2_source_size");
goto source_delete;
}
return 0;
source_delete:
pmem2_source_delete(&fdsc->src);
file_close:
close(fdsc->fd);
fail:
return 1;
}
/*
* file_dsc_fini - deinitialize file_dsc structure values
*/
static void
file_dsc_fini(struct file_dsc *fdsc)
{
close(fdsc->fd);
pmem2_source_delete(&fdsc->src);
}
/*
* file_check_align - check if file is aligned
*/
static int
file_check_align(struct file_dsc *fdsc)
{
size_t alignment;
if (pmem2_source_alignment(fdsc->src, &alignment)) {
pmem2_perror("pmem2_source_alignment");
return 1;
}
if (fdsc->size % alignment != 0) {
fprintf(stderr,
"usage: files must be aligned to %zu bytes\n",
alignment);
return 1;
}
return 0;
}
/*
* files_check_same_align - check if files have the same alignment
*/
static int
files_check_same_align(struct file_dsc *fdsc, int nfiles)
{
size_t alignment, nalignment;
if (pmem2_source_alignment(fdsc[0].src, &alignment)) {
pmem2_perror("pmem2_source_alignment");
return 1;
}
for (int n = 1; n < nfiles; n++) {
if (pmem2_source_alignment(fdsc[n].src, &nalignment)) {
pmem2_perror("pmem2_source_alignment");
return 1;
}
if (alignment != nalignment) {
fprintf(stderr,
"usage: files must have the same alignment\n");
return 1;
}
}
return 0;
}
/*
* files_check_memset - check if mappings retrieve the same memset function
*/
static int
files_check_same_memset(struct file_dsc *fdsc, int nfiles,
pmem2_memset_fn memset_fn)
{
for (int n = 0; n < nfiles; n++) {
if (memset_fn != pmem2_get_memset_fn(fdsc[n].map)) {
fprintf(stderr,
"usage: filesystems must be compatible for a side by side mapping\n");
return 1;
}
}
return 0;
}
int
main(int argc, char *argv[])
{
int ret = 1;
if (argc < 2) {
fprintf(stderr,
"usage: ./map_multiple_files <file1> <file2> ...\n");
return ret;
}
int nfiles = argc - 1;
struct file_dsc *fdsc = malloc(sizeof(struct file_dsc) * nfiles);
if (!fdsc) {
perror("malloc");
return ret;
}
int ninit;
for (ninit = 0; ninit < nfiles; ninit++) {
if (file_dsc_init(&fdsc[ninit], argv[ninit + 1])) {
goto fdsc_fini;
}
}
for (int n = 0; n < nfiles; n++) {
if (file_check_align(&fdsc[n]))
goto fdsc_fini;
}
if (files_check_same_align(fdsc, nfiles))
goto fdsc_fini;
size_t reservation_size = 0;
for (int n = 0; n < nfiles; n++) {
reservation_size += fdsc[n].size;
}
struct pmem2_vm_reservation *rsv;
if (!pmem2_vm_reservation_new(&rsv, reservation_size, NULL)) {
pmem2_perror("pmem2_vm_reservation_new");
goto fdsc_fini;
}
struct pmem2_config *cfg;
if (pmem2_config_new(&cfg)) {
pmem2_perror("pmem2_config_new");
goto delete_vm_reservation;
}
if (pmem2_config_set_required_store_granularity(
cfg, PMEM2_GRANULARITY_PAGE)) {
pmem2_perror("pmem2_config_set_required_store_granularity");
goto delete_config;
}
size_t offset = 0;
int nmap;
for (nmap = 0; nmap < nfiles; nmap++) {
if (pmem2_config_set_vm_reservation(
cfg, rsv, offset) != PMEM2_E_NOSUPP) {
pmem2_perror("pmem2_config_set_vm_reservation");
goto unmap;
}
offset += fdsc[nmap].size;
if (pmem2_map(cfg, fdsc[nmap].src, &fdsc[nmap].map)) {
pmem2_perror("pmem2_map");
goto unmap;
}
}
char *addr = pmem2_map_get_address(fdsc[0].map);
if (addr == NULL) {
pmem2_perror("pmem2_map_get_address");
goto unmap;
}
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(fdsc[0].map);
if (files_check_same_memset(fdsc, nfiles, memset_fn))
goto unmap;
memset_fn(addr, '-', reservation_size, PMEM2_F_MEM_NONTEMPORAL);
ret = 0;
unmap:
for (nmap--; nmap >= 0; nmap--) {
pmem2_unmap(&fdsc[nmap].map);
}
delete_config:
pmem2_config_delete(&cfg);
delete_vm_reservation:
pmem2_vm_reservation_delete(&rsv);
fdsc_fini:
for (ninit--; ninit >= 0; ninit--)
file_dsc_fini(&fdsc[ninit]);
free(fdsc);
return ret;
}
| 4,780 | 18.594262 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem2/advanced/advanced.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* advanced.c -- example for the libpmem2
*
* * usage: advanced src-file offset length
*
*/
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <libpmem2.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
int
main(int argc, char *argv[])
{
int fd;
struct pmem2_config *cfg;
struct pmem2_map *map;
struct pmem2_source *src;
if (argc != 4) {
fprintf(stderr, "usage: %s src-file offset length\n", argv[0]);
exit(1);
}
size_t offset = atoi(argv[2]);
size_t user_length = atoi(argv[3]);
size_t length = user_length;
if ((fd = open(argv[1], O_RDWR)) < 0) {
perror("open");
exit(1);
}
if (pmem2_config_new(&cfg)) {
pmem2_perror("pmem2_config_new");
exit(1);
}
if (pmem2_source_from_fd(&src, fd)) {
pmem2_perror("pmem2_source_from_fd");
exit(1);
}
if (pmem2_config_set_required_store_granularity(cfg,
PMEM2_GRANULARITY_PAGE)) {
pmem2_perror("pmem2_config_set_required_store_granularity");
exit(1);
}
size_t alignment;
if (pmem2_source_alignment(src, &alignment)) {
pmem2_perror("pmem2_source_alignment");
exit(1);
}
size_t offset_align = offset % alignment;
if (offset_align != 0) {
offset = offset - offset_align;
length += offset_align;
}
size_t len_align = length % alignment;
if (len_align != 0)
length += (alignment - len_align);
if (pmem2_config_set_offset(cfg, offset)) {
pmem2_perror("pmem2_config_set_offset");
exit(1);
}
if (pmem2_config_set_length(cfg, length)) {
pmem2_perror("pmem2_config_set_length");
exit(1);
}
if (pmem2_map(cfg, src, &map)) {
pmem2_perror("pmem2_map");
exit(1);
}
char *addr = pmem2_map_get_address(map);
addr += offset_align;
for (size_t i = 0; i < user_length; i++) {
printf("%02hhX ", addr[i]);
if ((i & 0x0F) == 0x0F)
printf("\n");
}
pmem2_unmap(&map);
pmem2_source_delete(&src);
pmem2_config_delete(&cfg);
close(fd);
return 0;
}
| 2,042 | 17.241071 | 65 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem2/basic/basic.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* basic.c -- simple example for the libpmem2
*/
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <libpmem2.h>
int
main(int argc, char *argv[])
{
int fd;
struct pmem2_config *cfg;
struct pmem2_map *map;
struct pmem2_source *src;
pmem2_persist_fn persist;
if (argc != 2) {
fprintf(stderr, "usage: %s file\n", argv[0]);
exit(1);
}
if ((fd = open(argv[1], O_RDWR)) < 0) {
perror("open");
exit(1);
}
if (pmem2_config_new(&cfg)) {
pmem2_perror("pmem2_config_new");
exit(1);
}
if (pmem2_source_from_fd(&src, fd)) {
pmem2_perror("pmem2_source_from_fd");
exit(1);
}
if (pmem2_config_set_required_store_granularity(cfg,
PMEM2_GRANULARITY_PAGE)) {
pmem2_perror("pmem2_config_set_required_store_granularity");
exit(1);
}
if (pmem2_map(cfg, src, &map)) {
pmem2_perror("pmem2_map");
exit(1);
}
char *addr = pmem2_map_get_address(map);
size_t size = pmem2_map_get_size(map);
strcpy(addr, "hello, persistent memory");
persist = pmem2_get_persist_fn(map);
persist(addr, size);
pmem2_unmap(&map);
pmem2_source_delete(&src);
pmem2_config_delete(&cfg);
close(fd);
return 0;
}
| 1,339 | 16.866667 | 62 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmem2/log/log.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* log.c -- "Persistently-correct" implementation of a log structure.
* The user may do single/multiple appends and flush them all.
* When an append/appendv command is done, the application persists all append
* data, updates the header (this includes information about the end of
* the valid data in the file), and persists the changes done to the header.
*/
#include <errno.h>
#include <ex_common.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libpmem2.h>
#define LOG_HDR_SIGNATURE "PMEM2_LOG"
#define LOG_HDR_SIGNATURE_LEN (sizeof(LOG_HDR_SIGNATURE))
#define MIN_SIZE (1U << 20) /* 1 MiB */
struct log_header {
uint8_t signature[LOG_HDR_SIGNATURE_LEN];
uint64_t used;
};
struct log {
struct log_header header;
uint8_t data[];
};
struct log_ctx {
struct log *log;
struct pmem2_map *map;
size_t capacity;
/*
* pmem2 on non-DAX Windows volumes requires an open fd to flush buffers
*/
int fd;
pmem2_drain_fn drain_fn;
pmem2_persist_fn persist_fn;
pmem2_memcpy_fn memcpy_fn;
};
struct log_vec {
uint8_t *addr;
size_t len;
};
/*
* log_is_initialized -- check if log header is initialized
*/
static int
log_is_initialized(struct log_header *header)
{
return memcmp(header->signature, LOG_HDR_SIGNATURE,
LOG_HDR_SIGNATURE_LEN) == 0;
}
/*
* log_header_update -- update LOG header
*/
static void
log_header_update(struct log_ctx *ctx, uint64_t off)
{
uint64_t *used = &ctx->log->header.used;
*used += off;
ctx->persist_fn(used, sizeof(*used));
}
/*
* log_rewind -- remove the LOG file content
*/
static void
log_rewind(struct log_ctx *ctx)
{
uint64_t *used = &ctx->log->header.used;
*used = 0;
ctx->persist_fn(used, sizeof(*used));
}
/*
* log_header_init -- init LOG header
*/
static void
log_header_init(struct log_ctx *ctx)
{
log_rewind(ctx);
struct log_header *header = &ctx->log->header;
memcpy(header->signature, LOG_HDR_SIGNATURE, LOG_HDR_SIGNATURE_LEN);
ctx->persist_fn(header->signature, LOG_HDR_SIGNATURE_LEN);
}
/*
* log_init -- prepare LOG application context
*/
static int
log_init(struct log_ctx *ctx, const char *path)
{
ctx->fd = open(path, O_RDWR);
if (ctx->fd < 0) {
perror("open");
return 1;
}
struct pmem2_config *cfg;
int ret = pmem2_config_new(&cfg);
if (ret) {
pmem2_perror("pmem2_config_new");
goto err_config_new;
}
ret = pmem2_config_set_required_store_granularity(
cfg, PMEM2_GRANULARITY_PAGE);
if (ret) {
pmem2_perror("pmem2_config_set_required_store_granularity");
goto err_cfg_set_req_granularity;
}
struct pmem2_source *src;
ret = pmem2_source_from_fd(&src, ctx->fd);
if (ret) {
pmem2_perror("pmem2_config_set_fd");
goto err_source;
}
ret = pmem2_map(cfg, src, &ctx->map);
if (ret) {
pmem2_perror("pmem2_map");
goto err_map;
}
size_t map_size = pmem2_map_get_size(ctx->map);
if (map_size < MIN_SIZE) {
fprintf(stderr, "log_init: not enough space in the file\n");
ret = 1;
goto err_map_size;
}
ret = pmem2_source_delete(&src);
if (ret) {
pmem2_perror("pmem2_source_delete");
goto err_source_delete;
}
ret = pmem2_config_delete(&cfg);
if (ret) {
pmem2_perror("pmem2_config_delete");
goto err_cfg_delete;
}
ctx->drain_fn = pmem2_get_drain_fn(ctx->map);
ctx->persist_fn = pmem2_get_persist_fn(ctx->map);
ctx->memcpy_fn = pmem2_get_memcpy_fn(ctx->map);
ctx->log = pmem2_map_get_address(ctx->map);
if (!log_is_initialized(&ctx->log->header))
log_header_init(ctx);
ctx->capacity = map_size - sizeof(struct log_header);
if (ctx->log->header.used == ctx->capacity) {
fprintf(stderr, "log_init: log is full\n");
ret = 1;
goto err_map_size;
}
if (ctx->log->header.used > ctx->capacity) {
fprintf(stderr, "log_init: file truncated?\n");
ret = 1;
goto err_map_size;
}
return ret;
err_cfg_delete:
err_source_delete:
err_map_size:
(void) pmem2_unmap(&ctx->map);
err_map:
(void) pmem2_source_delete(&src);
err_source:
err_cfg_set_req_granularity:
(void) pmem2_config_delete(&cfg);
err_config_new:
(void) close(ctx->fd);
return ret;
}
/*
* log_fini -- cleanup LOG application context
*/
static int
log_fini(struct log_ctx *ctx)
{
int ret = pmem2_unmap(&ctx->map);
if (ret) {
pmem2_perror("pmem2_unmap");
return ret;
}
ret = close(ctx->fd);
if (ret) {
perror("close");
return ret;
}
return ret;
}
/*
* log_append -- append a new string to LOG
*/
static int
log_append(struct log_ctx *ctx, uint8_t *data, size_t data_len)
{
struct log *log = ctx->log;
if (log->header.used + data_len > ctx->capacity) {
fprintf(stderr, "log_append: no space left in the file\n");
return -1;
}
ctx->memcpy_fn(&log->data[log->header.used], (char *)data, data_len, 0);
log_header_update(ctx, data_len);
return 1;
}
/*
* log_appendv -- append N new strings to LOG and return the number
* of consumed arguments
*/
static int
log_appendv(struct log_ctx *ctx, struct log_vec *logv, size_t logvcnt)
{
struct log *log = ctx->log;
uint8_t *start = &log->data[log->header.used];
uint8_t *addr = start;
size_t total_len = 0;
for (size_t i = 0; i < logvcnt; ++i) {
if (log->header.used + logv->len > ctx->capacity) {
fprintf(stderr,
"log_appendv: no space left in the file\n");
return -1;
}
ctx->memcpy_fn(addr, logv->addr, logv->len,
PMEM2_F_MEM_NOFLUSH);
total_len += logv->len;
addr += logv->len;
logv++;
}
ctx->persist_fn(start, total_len);
log_header_update(ctx, total_len);
return (int)logvcnt + 1;
}
/*
* log_dump -- dump the LOG file content
*/
static void
log_dump(struct log_ctx *ctx)
{
struct log *log = ctx->log;
fwrite(log->data, sizeof(log->data[0]), log->header.used, stdout);
printf("\n");
}
/*
* parse_log_vec -- allocate and fill log_vec structure
*/
static int
parse_log_vec(struct log_vec *vec, uint8_t *addr, size_t log_vcnt)
{
for (size_t i = 0; i < log_vcnt; ++i) {
vec[i].addr = addr;
vec[i].len = strlen((char *)addr);
addr += vec[i].len + 1;
}
return 0;
}
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage:\n"
"\tlog <file> <COMMAND_1> [COMMAND_2 ...]\n"
"\tlog help\n"
"Available commands:\n"
"append DATA\t\t\t- add a new element to the LOG\n"
"appendv N DATA_1 ... DATA_N\t- add N new elements to the LOG\n"
"rewind\t\t\t\t- remove the LOG content\n"
"dump\t\t\t\t- dump the file content to the console\n"
"help\t\t\t\t- print this help info\n");
}
int
main(int argc, char *argv[])
{
if (argv[1] && strcmp(argv[1], "help") == 0) {
print_usage();
return 0;
}
if (argc < 3) {
print_usage();
return 1;
}
struct log_ctx ctx;
int ret = log_init(&ctx, argv[1]);
if (ret)
return ret;
/* skip executable name and the LOG file name */
argc -= 2;
argv += 2;
while (argc > 0) {
if (strcmp(argv[0], "append") == 0 && argc > 1) {
uint8_t *data = (uint8_t *)argv[1];
size_t data_len = strlen((char *)data);
ret = log_append(&ctx, data, data_len);
} else if (strcmp(argv[0], "appendv") == 0 && argc > 1) {
errno = 0;
char *end;
size_t log_vcnt = strtoul(argv[1], &end, 0);
if (log_vcnt == 0 || (errno != 0 && *end == '\0')) {
fprintf(stderr,
"log_appendv: invalid N argument: %s\n",
argv[1]);
ret = 1;
goto err_out;
}
/* number of needed arguments: 2 + log_vcnt */
if (argc < 2 + log_vcnt) {
fprintf(stderr,
"log_appendv: a too small number of strings provided\n");
ret = 1;
goto err_out;
}
struct log_vec *vec;
vec = malloc(sizeof(struct log_vec) * log_vcnt);
if (!vec) {
perror("malloc");
ret = 1;
goto err_out;
}
uint8_t *addr = (uint8_t *)argv[2];
ret = parse_log_vec(vec, addr, log_vcnt);
if (ret) {
free(vec);
goto err_out;
}
ret = log_appendv(&ctx, vec, log_vcnt);
free(vec);
} else if (strcmp(argv[0], "dump") == 0) {
log_dump(&ctx);
} else if (strcmp(argv[0], "rewind") == 0) {
log_rewind(&ctx);
} else {
fprintf(stderr,
"log: %s - unknown command or a too small number of arguments\n",
argv[0]);
print_usage();
ret = -1;
}
if (ret < 0) {
ret = 1;
goto err_out;
} else {
/*
* One argument consumed by the name of the performed
* operation.
*/
ret += 1;
argc -= ret;
argv += ret;
ret = 0;
}
}
ret = log_fini(&ctx);
return ret;
err_out:
(void) log_fini(&ctx);
return ret;
}
| 8,495 | 19.423077 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_ssh.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_ssh.h -- rpmem ssh transport layer header file
*/
#ifndef RPMEM_SSH_H
#define RPMEM_SSH_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_ssh;
struct rpmem_ssh *rpmem_ssh_open(const struct rpmem_target_info *info);
struct rpmem_ssh *rpmem_ssh_exec(const struct rpmem_target_info *info, ...);
struct rpmem_ssh *rpmem_ssh_execv(const struct rpmem_target_info *info,
const char **argv);
int rpmem_ssh_close(struct rpmem_ssh *rps);
int rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len);
int rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len);
int rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock);
const char *rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno);
#ifdef __cplusplus
}
#endif
#endif
| 866 | 23.771429 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_fip.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_fip.h -- rpmem libfabric provider module header file
*/
#ifndef RPMEM_FIP_H
#define RPMEM_FIP_H
#include <stdint.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_fip;
struct rpmem_fip_attr {
enum rpmem_provider provider;
size_t max_wq_size;
enum rpmem_persist_method persist_method;
void *laddr;
size_t size;
size_t buff_size;
unsigned nlanes;
void *raddr;
uint64_t rkey;
};
struct rpmem_fip *rpmem_fip_init(const char *node, const char *service,
struct rpmem_fip_attr *attr, unsigned *nlanes);
void rpmem_fip_fini(struct rpmem_fip *fip);
int rpmem_fip_connect(struct rpmem_fip *fip);
int rpmem_fip_close(struct rpmem_fip *fip);
int rpmem_fip_process_start(struct rpmem_fip *fip);
int rpmem_fip_process_stop(struct rpmem_fip *fip);
int rpmem_fip_flush(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_drain(struct rpmem_fip *fip, unsigned lane);
int rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_read(struct rpmem_fip *fip, void *buff,
size_t len, size_t off, unsigned lane);
void rpmem_fip_probe_fork_safety(void);
size_t rpmem_fip_get_wq_size(struct rpmem_fip *fip);
#ifdef __cplusplus
}
#endif
#endif
| 1,427 | 22.032258 | 71 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem.c -- main source file for librpmem
*/
#include <stdlib.h>
#include <netdb.h>
#include <stdio.h>
#include <errno.h>
#include <limits.h>
#include <inttypes.h>
#include "librpmem.h"
#include "out.h"
#include "os.h"
#include "os_thread.h"
#include "util.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmem_fip.h"
#include "rpmem_fip_common.h"
#include "rpmem_ssh.h"
#include "rpmem_proto.h"
#define RPMEM_REMOVE_FLAGS_ALL (\
RPMEM_REMOVE_FORCE | \
RPMEM_REMOVE_POOL_SET \
)
#define RPMEM_CHECK_FORK() do {\
if (Rpmem_fork_unsafe) {\
ERR("libfabric is initialized without fork() support");\
return NULL;\
}\
} while (0)
static os_once_t Rpmem_fork_unsafe_key_once = OS_ONCE_INIT;
/*
* rpmem_pool -- remote pool context
*/
struct rpmem_pool {
struct rpmem_obc *obc; /* out-of-band connection handle */
struct rpmem_fip *fip; /* fabric provider handle */
struct rpmem_target_info *info;
char fip_service[NI_MAXSERV];
enum rpmem_provider provider;
size_t max_wq_size; /* max WQ size supported by provider */
os_thread_t monitor;
int closing;
int no_headers;
/*
* Last error code, need to be volatile because it can
* be accessed by multiple threads.
*/
volatile int error;
};
/*
* env_get_bool -- parse value of specified environment variable as a bool
*
* Return values:
* 0 - defined, valp has value
* 1 - not defined
* -1 - parsing error
*/
static int
env_get_bool(const char *name, int *valp)
{
LOG(3, "name %s, valp %p", name, valp);
const char *env = os_getenv(name);
if (!env)
return 1;
char *endptr;
errno = 0;
long val = strtol(env, &endptr, 10);
if (*endptr != '\0' || errno)
goto err;
if (val < INT_MIN || val > INT_MAX)
goto err;
*valp = (int)val;
return 0;
err:
RPMEM_LOG(ERR, "!parsing '%s' environment variable failed", name);
return -1;
}
/*
* rpmem_get_provider -- set provider based on node address and environment
*/
static int
rpmem_set_provider(RPMEMpool *rpp, const char *node)
{
LOG(3, "rpp %p, node %s", rpp, node);
struct rpmem_fip_probe probe;
enum rpmem_provider prov = RPMEM_PROV_UNKNOWN;
int ret = rpmem_fip_probe_get(node, &probe);
if (ret)
return -1;
/*
* The sockets provider can be used only if specified environment
* variable is set to 1.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_SOCKET_ENV, &enable);
if (!ret && enable) {
prov = RPMEM_PROV_LIBFABRIC_SOCKETS;
}
}
/*
* The verbs provider is enabled by default. If appropriate
* environment variable is set to 0, the verbs provider is disabled.
*
* The verbs provider has higher priority than sockets provider.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_VERBS_ENV, &enable);
if (ret == 1 || (!ret && enable))
prov = RPMEM_PROV_LIBFABRIC_VERBS;
}
if (prov == RPMEM_PROV_UNKNOWN)
return -1;
RPMEM_ASSERT(prov < MAX_RPMEM_PROV);
rpp->max_wq_size = probe.max_wq_size[prov];
rpp->provider = prov;
return 0;
}
/*
* rpmem_monitor_thread -- connection monitor background thread
*/
static void *
rpmem_monitor_thread(void *arg)
{
LOG(3, "arg %p", arg);
RPMEMpool *rpp = arg;
int ret = rpmem_obc_monitor(rpp->obc, 0);
if (ret && !rpp->closing) {
RPMEM_LOG(ERR, "unexpected data received");
rpp->error = errno;
}
return NULL;
}
/*
* rpmem_common_init -- common routine for initialization
*/
static RPMEMpool *
rpmem_common_init(const char *target)
{
LOG(3, "target %s", target);
int ret;
RPMEMpool *rpp = calloc(1, sizeof(*rpp));
if (!rpp) {
ERR("!calloc");
goto err_malloc_rpmem;
}
rpp->info = rpmem_target_parse(target);
if (!rpp->info) {
ERR("!parsing target node address failed");
goto err_target_split;
}
ret = rpmem_set_provider(rpp, rpp->info->node);
if (ret) {
errno = ENOMEDIUM;
ERR("cannot find provider");
goto err_provider;
}
RPMEM_LOG(NOTICE, "provider: %s", rpmem_provider_to_str(rpp->provider));
if (rpp->provider == RPMEM_PROV_LIBFABRIC_SOCKETS) {
/* libfabric's sockets provider does not support IPv6 */
RPMEM_LOG(NOTICE, "forcing using IPv4");
rpp->info->flags |= RPMEM_FLAGS_USE_IPV4;
}
rpp->obc = rpmem_obc_init();
if (!rpp->obc) {
ERR("!out-of-band connection initialization failed");
goto err_obc_init;
}
RPMEM_LOG(INFO, "establishing out-of-band connection");
ret = rpmem_obc_connect(rpp->obc, rpp->info);
if (ret) {
ERR("!out-of-band connection failed");
goto err_obc_connect;
}
RPMEM_LOG(NOTICE, "out-of-band connection established");
return rpp;
err_obc_connect:
rpmem_obc_fini(rpp->obc);
err_obc_init:
err_provider:
rpmem_target_free(rpp->info);
err_target_split:
free(rpp);
err_malloc_rpmem:
return NULL;
}
/*
* rpmem_common_fini -- common routing for deinitialization
*/
static void
rpmem_common_fini(RPMEMpool *rpp, int join)
{
LOG(3, "rpp %p, join %d", rpp, join);
rpmem_obc_disconnect(rpp->obc);
if (join) {
int ret = os_thread_join(&rpp->monitor, NULL);
if (ret) {
errno = ret;
ERR("joining monitor thread failed");
}
}
rpmem_obc_fini(rpp->obc);
rpmem_target_free(rpp->info);
free(rpp);
}
/*
* rpmem_common_fip_init -- common routine for initializing fabric provider
*/
static int
rpmem_common_fip_init(RPMEMpool *rpp, struct rpmem_req_attr *req,
struct rpmem_resp_attr *resp, void *pool_addr, size_t pool_size,
unsigned *nlanes, size_t buff_size)
{
LOG(3, "rpp %p, req %p, resp %p, pool_addr %p, pool_size %zu, nlanes "
"%p", rpp, req, resp, pool_addr, pool_size, nlanes);
int ret;
struct rpmem_fip_attr fip_attr = {
.provider = req->provider,
.max_wq_size = rpp->max_wq_size,
.persist_method = resp->persist_method,
.laddr = pool_addr,
.size = pool_size,
.buff_size = buff_size,
.nlanes = min(*nlanes, resp->nlanes),
.raddr = (void *)resp->raddr,
.rkey = resp->rkey,
};
ret = util_snprintf(rpp->fip_service, sizeof(rpp->fip_service),
"%u", resp->port);
if (ret < 0) {
ERR("!snprintf");
goto err_port;
}
rpp->fip = rpmem_fip_init(rpp->info->node, rpp->fip_service,
&fip_attr, nlanes);
if (!rpp->fip) {
ERR("!in-band connection initialization failed");
ret = -1;
goto err_fip_init;
}
RPMEM_LOG(NOTICE, "final nlanes: %u", *nlanes);
RPMEM_LOG(INFO, "establishing in-band connection");
ret = rpmem_fip_connect(rpp->fip);
if (ret) {
ERR("!establishing in-band connection failed");
goto err_fip_connect;
}
RPMEM_LOG(NOTICE, "in-band connection established");
return 0;
err_fip_connect:
rpmem_fip_fini(rpp->fip);
err_fip_init:
err_port:
return ret;
}
/*
* rpmem_common_fip_fini -- common routine for deinitializing fabric provider
*/
static void
rpmem_common_fip_fini(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing in-band connection");
rpmem_fip_fini(rpp->fip);
RPMEM_LOG(NOTICE, "in-band connection closed");
}
/*
* rpmem_log_args -- log input arguments for rpmem_create and rpmem_open
*/
static void
rpmem_log_args(const char *req, const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned nlanes)
{
LOG(3, "req %s, target %s, pool_set_name %s, pool_addr %p, pool_size "
"%zu, nlanes %d", req, target, pool_set_name, pool_addr,
pool_size, nlanes);
RPMEM_LOG(NOTICE, "%s request:", req);
RPMEM_LOG(NOTICE, "\ttarget: %s", target);
RPMEM_LOG(NOTICE, "\tpool set: %s", pool_set_name);
RPMEM_LOG(INFO, "\tpool addr: %p", pool_addr);
RPMEM_LOG(INFO, "\tpool size: %lu", pool_size);
RPMEM_LOG(NOTICE, "\tnlanes: %u", nlanes);
}
/*
* rpmem_log_resp -- log response attributes
*/
static void
rpmem_log_resp(const char *req, const struct rpmem_resp_attr *resp)
{
LOG(3, "req %s, resp %p", req, resp);
RPMEM_LOG(NOTICE, "%s request response:", req);
RPMEM_LOG(NOTICE, "\tnlanes: %u", resp->nlanes);
RPMEM_LOG(NOTICE, "\tport: %u", resp->port);
RPMEM_LOG(NOTICE, "\tpersist method: %s",
rpmem_persist_method_to_str(resp->persist_method));
RPMEM_LOG(NOTICE, "\tremote addr: 0x%" PRIx64, resp->raddr);
}
/*
* rpmem_check_args -- validate user's arguments
*/
static int
rpmem_check_args(void *pool_addr, size_t pool_size, unsigned *nlanes)
{
LOG(3, "pool_addr %p, pool_size %zu, nlanes %p", pool_addr, pool_size,
nlanes);
if (!pool_addr) {
errno = EINVAL;
ERR("invalid pool address");
return -1;
}
if (!IS_PAGE_ALIGNED((uintptr_t)pool_addr)) {
errno = EINVAL;
ERR("Pool address must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!IS_PAGE_ALIGNED(pool_size)) {
errno = EINVAL;
ERR("Pool size must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!pool_size) {
errno = EINVAL;
ERR("invalid pool size");
return -1;
}
if (!nlanes) {
errno = EINVAL;
ERR("lanes pointer cannot be NULL");
return -1;
}
if (!(*nlanes)) {
errno = EINVAL;
ERR("number of lanes must be positive");
return -1;
}
return 0;
}
/*
* rpmem_create -- create remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* create_attr -- pool attributes used for creating the pool on remote node
*/
RPMEMpool *
rpmem_create(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
const struct rpmem_pool_attr *create_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, create_attr);
os_once(&Rpmem_fork_unsafe_key_once, &rpmem_fip_probe_fork_safety);
RPMEM_CHECK_FORK();
rpmem_log_args("create", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_create(rpp->obc, &req, &resp, create_attr);
if (ret) {
RPMEM_LOG(ERR, "!create request failed");
goto err_obc_create;
}
if (create_attr == NULL ||
util_is_zeroed(create_attr, sizeof(*create_attr)))
rpp->no_headers = 1;
rpmem_log_resp("create", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, RPMEM_CLOSE_FLAGS_REMOVE);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_open -- open remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* open_attr -- pool attributes, received from remote host
*/
RPMEMpool *
rpmem_open(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
struct rpmem_pool_attr *open_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, open_attr);
os_once(&Rpmem_fork_unsafe_key_once, &rpmem_fip_probe_fork_safety);
RPMEM_CHECK_FORK();
rpmem_log_args("open", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_open(rpp->obc, &req, &resp, open_attr);
if (ret) {
RPMEM_LOG(ERR, "!open request failed");
goto err_obc_create;
}
if (open_attr == NULL || util_is_zeroed(open_attr, sizeof(*open_attr)))
rpp->no_headers = 1;
rpmem_log_resp("open", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, 0);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_close -- close remote pool on target node
*/
int
rpmem_close(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing out-of-band connection");
util_fetch_and_or32(&rpp->closing, 1);
rpmem_fip_close(rpp->fip);
int ret = rpmem_obc_close(rpp->obc, 0);
if (ret)
ERR("!close request failed");
RPMEM_LOG(NOTICE, "out-of-band connection closed");
rpmem_common_fip_fini(rpp);
rpmem_common_fini(rpp, 1);
return ret;
}
/*
* rpmem_flush -- flush to target node operation
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of flush operation
* lane -- lane number
* flags -- additional flags
*/
int
rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x",
rpp, offset, length, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags & RPMEM_FLUSH_FLAGS_MASK) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
/*
* By default use RDMA SEND flush mode which has atomicity
* guarantees. For relaxed flush use RDMA WRITE.
*/
unsigned mode = RPMEM_PERSIST_SEND;
if (flags & RPMEM_FLUSH_RELAXED)
mode = RPMEM_FLUSH_WRITE;
int ret = rpmem_fip_flush(rpp->fip, offset, length, lane, mode);
if (unlikely(ret)) {
LOG(2, "flush operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_drain -- drain on target node operation
*
* rpp -- remote pool handle
* lane -- lane number
* flags -- additional flags
*/
int
rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, lane %d, flags 0x%x", rpp, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags != 0) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
int ret = rpmem_fip_drain(rpp->fip, lane);
if (unlikely(ret)) {
LOG(2, "drain operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_persist -- persist operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of persist operation
* lane -- lane number
*/
int
rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x",
rpp, offset, length, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags & RPMEM_PERSIST_FLAGS_MASK) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
/*
* By default use RDMA SEND persist mode which has atomicity
* guarantees. For relaxed persist use RDMA WRITE.
*/
unsigned mode = RPMEM_PERSIST_SEND;
if (flags & RPMEM_PERSIST_RELAXED)
mode = RPMEM_FLUSH_WRITE;
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, mode);
if (unlikely(ret)) {
LOG(2, "persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_deep_persist -- deep flush operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of deep flush operation
* lane -- lane number
*/
int
rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d", rpp, offset, length,
lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, RPMEM_DEEP_PERSIST);
if (unlikely(ret)) {
ERR("persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_read -- read data from remote pool:
*
* rpp -- remote pool handle
* buff -- output buffer
* offset -- offset in pool
* length -- length of read operation
*/
int
rpmem_read(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane)
{
LOG(3, "rpp %p, buff %p, offset %zu, length %zu, lane %d", rpp, buff,
offset, length, lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE)
LOG(1, "reading from pool at offset (%zu) less than %d bytes",
offset, RPMEM_HDR_SIZE);
int ret = rpmem_fip_read(rpp->fip, buff, length, offset, lane);
if (unlikely(ret)) {
errno = ret;
ERR("!read operation failed");
rpp->error = ret;
return -1;
}
return 0;
}
/*
* rpmem_set_attr -- overwrite pool attributes on the remote node
*
* rpp -- remote pool handle
* attr -- new pool attributes for the pool on remote node
*/
int
rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr)
{
LOG(3, "rpp %p, attr %p", rpp, attr);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
int ret = rpmem_obc_set_attr(rpp->obc, attr);
if (ret) {
RPMEM_LOG(ERR, "!set attributes request failed");
}
return ret;
}
/*
* rpmem_remove -- remove pool from remote node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* flags -- bitwise OR of one or more of the following flags:
* - RPMEM_REMOVE_FORCE
* - RPMEM_REMOVE_POOL_SET
*/
int
rpmem_remove(const char *target, const char *pool_set, int flags)
{
LOG(3, "target %s, pool_set %s, flags %d", target, pool_set, flags);
if (flags & ~(RPMEM_REMOVE_FLAGS_ALL)) {
ERR("invalid flags specified");
errno = EINVAL;
return -1;
}
struct rpmem_target_info *info = rpmem_target_parse(target);
if (!info) {
ERR("!parsing target node address failed");
goto err_target;
}
const char *argv[5];
argv[0] = "--remove";
argv[1] = pool_set;
const char **cur = &argv[2];
if (flags & RPMEM_REMOVE_FORCE)
*cur++ = "--force";
if (flags & RPMEM_REMOVE_POOL_SET)
*cur++ = "--pool-set";
*cur = NULL;
struct rpmem_ssh *ssh = rpmem_ssh_execv(info, argv);
if (!ssh) {
ERR("!executing ssh command failed");
goto err_ssh_exec;
}
int ret;
ret = rpmem_ssh_monitor(ssh, 0);
if (ret) {
ERR("!waiting for remote command failed");
goto err_ssh_monitor;
}
ret = rpmem_ssh_close(ssh);
if (ret) {
errno = ret;
ERR("remote command failed");
goto err_ssh_close;
}
rpmem_target_free(info);
return 0;
err_ssh_monitor:
rpmem_ssh_close(ssh);
err_ssh_close:
err_ssh_exec:
rpmem_target_free(info);
err_target:
return -1;
}
#if FAULT_INJECTION
void
rpmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
return core_inject_fault_at(type, nth, at);
}
int
rpmem_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 20,542 | 21.451366 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_fip.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem_fip.c -- rpmem libfabric provider module source file
*/
#include <stdio.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <limits.h>
#include <rdma/fabric.h>
#include <rdma/fi_domain.h>
#include <rdma/fi_endpoint.h>
#include <rdma/fi_cm.h>
#include <rdma/fi_errno.h>
#include <rdma/fi_rma.h>
#include "out.h"
#include "util.h"
#include "os_thread.h"
#include "os.h"
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
#include "rpmem_proto.h"
#include "rpmem_util.h"
#include "rpmem_fip_msg.h"
#include "rpmem_fip.h"
#include "valgrind_internal.h"
#define RPMEM_FI_ERR(e, fmt, args...)\
ERR(fmt ": %s", ## args, fi_strerror((e)))
#define RPMEM_FI_CLOSE(f, fmt, args...) (\
{\
int oerrno = errno;\
int ret = fi_close(&(f)->fid);\
if (ret)\
RPMEM_FI_ERR(ret, fmt, ## args);\
errno = oerrno;\
ret;\
})
#define LANE_ALIGN_SIZE 64
#define LANE_ALIGN __attribute__((aligned(LANE_ALIGN_SIZE)))
#define RPMEM_RAW_BUFF_SIZE 4096
#define RPMEM_RAW_SIZE 8
typedef ssize_t (*rpmem_fip_flush_fn)(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags);
typedef int (*rpmem_fip_drain_fn)(struct rpmem_fip *fip, unsigned lane);
typedef ssize_t (*rpmem_fip_persist_fn)(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags);
typedef int (*rpmem_fip_init_fn)(struct rpmem_fip *fip);
typedef void (*rpmem_fip_fini_fn)(struct rpmem_fip *fip);
typedef ssize_t (*cq_read_fn)(struct fid_cq *cq, void *buf, size_t count);
static ssize_t
cq_read_infinite(struct fid_cq *cq, void *buf, size_t count)
{
return fi_cq_sread(cq, buf, count, NULL, -1);
}
/*
* rpmem_fip_ops -- operations specific for persistency method
*/
struct rpmem_fip_ops {
rpmem_fip_flush_fn flush;
rpmem_fip_drain_fn drain;
rpmem_fip_persist_fn persist;
rpmem_fip_init_fn lanes_init;
rpmem_fip_init_fn lanes_init_mem;
rpmem_fip_fini_fn lanes_fini;
rpmem_fip_init_fn lanes_post;
};
/*
* rpmem_fip_lane -- base lane structure
*/
struct rpmem_fip_lane {
struct fid_ep *ep; /* endpoint */
struct fid_cq *cq; /* completion queue */
uint64_t event;
size_t wq_elems; /* # of elements in work queue */
int wq_is_flushing; /* work queue is during flush */
};
/*
* rpmem_fip_plane -- persist operation's lane
*/
struct rpmem_fip_plane {
struct rpmem_fip_lane base; /* base lane structure */
struct rpmem_fip_rma write; /* WRITE message */
struct rpmem_fip_rma write_cq; /* WRITE message with completion */
struct rpmem_fip_rma read; /* READ message */
struct rpmem_fip_msg send; /* SEND message */
struct rpmem_fip_msg recv; /* RECV message */
} LANE_ALIGN;
/*
* rpmem_fip_rlane -- read operation's lane
*/
struct rpmem_fip_rlane {
struct rpmem_fip_lane base; /* base lane structure */
struct rpmem_fip_rma read; /* READ message */
};
struct rpmem_fip {
struct fi_info *fi; /* fabric interface information */
struct fid_fabric *fabric; /* fabric domain */
struct fid_domain *domain; /* fabric protection domain */
struct fid_eq *eq; /* event queue */
int closing; /* closing connections in progress */
size_t cq_size; /* completion queue size */
uint64_t raddr; /* remote memory base address */
uint64_t rkey; /* remote memory protection key */
void *laddr; /* local memory base address */
size_t size; /* memory size */
struct fid_mr *mr; /* local memory region */
void *mr_desc; /* local memory descriptor */
enum rpmem_persist_method persist_method;
const struct rpmem_fip_ops *ops;
unsigned nlanes;
size_t buff_size;
struct rpmem_fip_plane *lanes;
os_thread_t monitor;
void *pmsg; /* persist message buffer */
size_t pmsg_size;
struct fid_mr *pmsg_mr; /* persist message memory region */
void *pmsg_mr_desc; /* persist message memory descriptor */
struct rpmem_msg_persist_resp *pres; /* persist response buffer */
struct fid_mr *pres_mr; /* persist response memory region */
void *pres_mr_desc; /* persist response memory descriptor */
void *raw_buff; /* READ-after-WRITE buffer */
struct fid_mr *raw_mr; /* RAW memory region */
void *raw_mr_desc; /* RAW memory descriptor */
cq_read_fn cq_read; /* CQ read function */
};
/*
* rpmem_fip_is_closing -- (internal) atomically reads and returns the
* closing flag
*/
static inline int
rpmem_fip_is_closing(struct rpmem_fip *fip)
{
int ret;
util_atomic_load_explicit32(&fip->closing, &ret, memory_order_acquire);
return ret;
}
/*
* rpmem_fip_set_closing -- (internal) atomically set the closing flag
*/
static inline void
rpmem_fip_set_closing(struct rpmem_fip *fip)
{
/*
* load and store without barriers should be good enough here.
* fetch_and_or are used as workaround for helgrind issue.
*/
util_fetch_and_or32(&fip->closing, 1);
}
/*
* rpmem_fip_lane_begin -- (internal) initialize list of events for lane
*/
static inline void
rpmem_fip_lane_begin(struct rpmem_fip_lane *lanep, uint64_t event)
{
lanep->event = event;
}
/*
* rpmem_fip_lane_init -- (internal) initialize single lane
*/
static int
rpmem_fip_lane_init(struct rpmem_fip *fip, struct rpmem_fip_lane *lanep)
{
int ret;
struct fi_cq_attr cq_attr = {
.size = fip->cq_size,
.flags = 0,
.format = FI_CQ_FORMAT_MSG,
.wait_obj = FI_WAIT_UNSPEC,
.signaling_vector = 0,
.wait_cond = FI_CQ_COND_NONE,
.wait_set = NULL,
};
/* create a completion queue */
ret = fi_cq_open(fip->domain, &cq_attr, &lanep->cq, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening completion queue");
goto err_cq_open;
}
/* create an endpoint */
ret = fi_endpoint(fip->domain, fip->fi, &lanep->ep, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "allocating endpoint");
goto err_endpoint;
}
/*
* Bind an event queue to an endpoint to get
* connection-related events for the endpoint.
*/
ret = fi_ep_bind(lanep->ep, &fip->eq->fid, 0);
if (ret) {
RPMEM_FI_ERR(ret, "binding event queue to endpoint");
goto err_ep_bind_eq;
}
/*
* Bind a completion queue to an endpoint to get completion
* events of specified inbound/outbound operations.
*
* FI_SELECTIVE_COMPLETION means all inbound/outbound operations
* must explicitly specify if the completion event should be
* generated or not using FI_COMPLETION flag.
*
* The completion events received are highly related to the
* persistency method used and are configured in lanes
* initialization specified for persistency method utilized.
*/
ret = fi_ep_bind(lanep->ep, &lanep->cq->fid,
FI_RECV | FI_TRANSMIT | FI_SELECTIVE_COMPLETION);
if (ret) {
RPMEM_FI_ERR(ret, "binding completion queue to endpoint");
goto err_ep_bind_cq;
}
/*
* Enable endpoint so it is possible to post inbound/outbound
* operations if required.
*/
ret = fi_enable(lanep->ep);
if (ret) {
RPMEM_FI_ERR(ret, "activating endpoint");
goto err_fi_enable;
}
return 0;
err_fi_enable:
err_ep_bind_cq:
err_ep_bind_eq:
err_endpoint:
RPMEM_FI_CLOSE(lanep->cq, "closing completion queue");
err_cq_open:
return -1;
}
/*
* rpmem_fip_lane_fini -- (internal) deinitialize single lane
*/
static int
rpmem_fip_lane_fini(struct rpmem_fip_lane *lanep)
{
int ret;
int lret = 0;
ret = RPMEM_FI_CLOSE(lanep->ep, "closing endpoint");
if (ret)
lret = ret;
ret = RPMEM_FI_CLOSE(lanep->cq, "closing completion queue");
if (ret)
lret = ret;
return lret;
}
/*
* rpmem_fip_lane_wait -- (internal) wait for specific event on completion queue
*/
static int
rpmem_fip_lane_wait(struct rpmem_fip *fip, struct rpmem_fip_lane *lanep,
uint64_t e)
{
ssize_t sret = 0;
struct fi_cq_err_entry err;
const char *str_err;
int ret = 0;
struct fi_cq_msg_entry cq_entry;
while (lanep->event & e) {
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET;
sret = fip->cq_read(lanep->cq, &cq_entry, 1);
if (unlikely(sret == -FI_EAGAIN) || sret == 0)
continue;
if (unlikely(sret < 0)) {
ret = (int)sret;
goto err_cq_read;
}
lanep->event &= ~cq_entry.flags;
}
return 0;
err_cq_read:
sret = fi_cq_readerr(lanep->cq, &err, 0);
if (sret < 0) {
RPMEM_FI_ERR((int)sret, "error reading from completion queue: "
"cannot read error from event queue");
goto err;
}
str_err = fi_cq_strerror(lanep->cq, err.prov_errno, NULL, NULL, 0);
RPMEM_LOG(ERR, "error reading from completion queue: %s", str_err);
err:
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* rpmem_fip_set_nlanes -- (internal) set maximum number of lanes supported
*/
static void
rpmem_fip_set_nlanes(struct rpmem_fip *fip, unsigned nlanes)
{
size_t max_nlanes = rpmem_fip_max_nlanes(fip->fi);
RPMEM_ASSERT(max_nlanes < UINT_MAX);
fip->nlanes = min((unsigned)max_nlanes, nlanes);
}
/*
* rpmem_fip_getinfo -- (internal) get fabric interface information
*/
static int
rpmem_fip_getinfo(struct rpmem_fip *fip, const char *node, const char *service,
enum rpmem_provider provider, size_t max_wq_size,
enum rpmem_persist_method pm)
{
int ret = -1;
struct fi_info *hints = rpmem_fip_get_hints(provider);
if (!hints) {
RPMEM_LOG(ERR, "!getting fabric interface information hints");
goto err_hints;
}
/*
* WQ size is:
* - >= size required by persist method (pm_wq_size)
* - >= size forced by environment variable (Rpmem_wq_size)
* - but it has to be <= max_wq_size reported by provider
*/
size_t pm_wq_size = rpmem_fip_wq_size(pm, RPMEM_FIP_NODE_CLIENT);
hints->tx_attr->size =
min(
max(pm_wq_size, Rpmem_wq_size),
max_wq_size);
hints->rx_attr->size = rpmem_fip_rx_size(pm, RPMEM_FIP_NODE_CLIENT);
/* get maximum available */
ret = fi_getinfo(RPMEM_FIVERSION, node, service, 0, hints, &fip->fi);
if (ret) {
RPMEM_FI_ERR(ret, "getting fabric interface information");
goto err_fi_getinfo;
}
rpmem_fip_print_info(fip->fi);
/* fallback to free the hints */
err_fi_getinfo:
fi_freeinfo(hints);
err_hints:
return ret;
}
/*
* rpmem_fip_init_fabric_res -- (internal) initialize common fabric resources
*/
static int
rpmem_fip_init_fabric_res(struct rpmem_fip *fip)
{
int ret;
ret = fi_fabric(fip->fi->fabric_attr, &fip->fabric, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening fabric domain");
goto err_fi_fabric;
}
ret = fi_domain(fip->fabric, fip->fi, &fip->domain, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening fabric access domain");
goto err_fi_domain;
}
struct fi_eq_attr eq_attr = {
.size = 0, /* use default value */
.flags = 0,
.wait_obj = FI_WAIT_UNSPEC,
.signaling_vector = 0,
.wait_set = NULL,
};
ret = fi_eq_open(fip->fabric, &eq_attr, &fip->eq, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening event queue");
goto err_eq_open;
}
return 0;
err_eq_open:
RPMEM_FI_CLOSE(fip->domain, "closing fabric access domain");
err_fi_domain:
RPMEM_FI_CLOSE(fip->fabric, "closing fabric domain");
err_fi_fabric:
return ret;
}
/*
* rpmem_fip_fini_fabric_res -- (internal) deinitialize common fabric resources
*/
static void
rpmem_fip_fini_fabric_res(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->eq, "closing event queue");
RPMEM_FI_CLOSE(fip->domain, "closing fabric access domain");
RPMEM_FI_CLOSE(fip->fabric, "closing fabric domain");
}
/*
* rpmem_fip_init_memory -- (internal) initialize common memory resources
*/
static int
rpmem_fip_init_memory(struct rpmem_fip *fip)
{
ASSERTne(Pagesize, 0);
int ret;
/*
* Register local memory space. The local memory will be used
* with WRITE operation in rpmem_fip_persist function thus
* the FI_WRITE access flag.
*/
ret = fi_mr_reg(fip->domain, fip->laddr, fip->size,
FI_WRITE, 0, 0, 0, &fip->mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registrating memory");
return ret;
}
/* get local memory descriptor */
fip->mr_desc = fi_mr_desc(fip->mr);
return 0;
}
/*
* rpmem_fip_fini_memory -- (internal) deinitialize common memory resources
*/
static void
rpmem_fip_fini_memory(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->mr, "unregistering memory");
}
/*
* rpmem_fip_lanes_init_common -- (internal) initialize common lanes resources
*/
static int
rpmem_fip_lanes_init_common(struct rpmem_fip *fip)
{
int ret;
ret = posix_memalign((void **)&fip->lanes, LANE_ALIGN_SIZE,
fip->nlanes * sizeof(*fip->lanes));
if (ret) {
RPMEM_LOG(ERR, "!allocating lanes");
goto err_alloc_lanes;
}
memset(fip->lanes, 0, fip->nlanes * sizeof(*fip->lanes));
unsigned i;
for (i = 0; i < fip->nlanes; i++) {
ret = rpmem_fip_lane_init(fip, &fip->lanes[i].base);
if (ret)
goto err_lane_init;
}
return 0;
err_lane_init:
for (unsigned j = 0; j < i; j++)
rpmem_fip_lane_fini(&fip->lanes[i].base);
free(fip->lanes);
err_alloc_lanes:
return -1;
}
/*
* rpmem_fip_lanes_fini_common -- (internal) deinitialize common lanes
* resrouces
*/
static int
rpmem_fip_lanes_fini_common(struct rpmem_fip *fip)
{
int lret = 0;
int ret;
for (unsigned i = 0; i < fip->nlanes; i++) {
ret = rpmem_fip_lane_fini(&fip->lanes[i].base);
if (ret)
lret = ret;
}
free(fip->lanes);
return lret;
}
/*
* rpmem_fip_lanes_init -- (internal) initialize lanes
*/
static int
rpmem_fip_lanes_init(struct rpmem_fip *fip)
{
int ret;
ret = rpmem_fip_lanes_init_common(fip);
if (ret)
return ret;
ret = fip->ops->lanes_init(fip);
if (ret)
goto err_init_lanes;
return 0;
err_init_lanes:
rpmem_fip_lanes_fini_common(fip);
return ret;
}
/*
* rpmem_fip_lane_connect -- (internal) connect on a single lane
*/
static int
rpmem_fip_lane_connect(struct rpmem_fip *fip, struct rpmem_fip_lane *lanep)
{
struct fi_eq_cm_entry entry;
int ret;
ret = fi_connect(lanep->ep, fip->fi->dest_addr, NULL, 0);
if (ret) {
RPMEM_FI_ERR(ret, "initiating connection request");
return ret;
}
return rpmem_fip_read_eq_check(fip->eq, &entry, FI_CONNECTED,
&lanep->ep->fid,
RPMEM_CONNECT_TIMEOUT);
}
/*
* rpmem_fip_lanes_connect -- (internal) establish connections on all lanes
*/
static int
rpmem_fip_lanes_connect(struct rpmem_fip *fip)
{
int ret;
for (unsigned i = 0; i < fip->nlanes; i++) {
struct rpmem_fip_lane *lanep = &fip->lanes[i].base;
ret = rpmem_fip_lane_connect(fip, lanep);
if (ret)
return ret;
}
return 0;
}
/*
* rpmem_fip_lanes_shutdown -- shutdown all endpoints
*/
static int
rpmem_fip_lanes_shutdown(struct rpmem_fip *fip)
{
int ret;
int lret = 0;
for (unsigned i = 0; i < fip->nlanes; i++) {
ret = fi_shutdown(fip->lanes[i].base.ep, 0);
if (ret) {
RPMEM_FI_ERR(ret, "disconnecting endpoint");
lret = ret;
}
}
return lret;
}
/*
* rpmem_fip_lane_prep_write -- (internal) choose right WRITE structure
* according to flags and prepare for collecting its completion
*/
static inline struct rpmem_fip_rma *
rpmem_fip_lane_prep_write(struct rpmem_fip_plane *lanep, unsigned flags)
{
if (flags & RPMEM_COMPLETION) {
rpmem_fip_lane_begin(&lanep->base, FI_WRITE);
return &lanep->write_cq;
}
return &lanep->write;
}
/*
* rpmem_fip_monitor_thread -- (internal) monitor in-band connection
*/
static void *
rpmem_fip_monitor_thread(void *arg)
{
struct rpmem_fip *fip = (struct rpmem_fip *)arg;
struct fi_eq_cm_entry entry;
uint32_t event;
int ret;
while (!rpmem_fip_is_closing(fip)) {
ret = rpmem_fip_read_eq(fip->eq, &entry, &event,
RPMEM_MONITOR_TIMEOUT);
if (unlikely(ret == 0) && event == FI_SHUTDOWN) {
RPMEM_LOG(ERR, "event queue got FI_SHUTDOWN");
/* mark in-band connection as closing */
rpmem_fip_set_closing(fip);
for (unsigned i = 0; i < fip->nlanes; i++) {
fi_cq_signal(fip->lanes[i].base.cq);
}
}
}
return NULL;
}
/*
* rpmem_fip_monitor_init -- (internal) initialize in-band monitor
*/
static int
rpmem_fip_monitor_init(struct rpmem_fip *fip)
{
errno = os_thread_create(&fip->monitor, NULL, rpmem_fip_monitor_thread,
fip);
if (errno) {
RPMEM_LOG(ERR, "!connenction monitor thread");
return -1;
}
return 0;
}
/*
* rpmem_fip_monitor_fini -- (internal) finalize in-band monitor
*/
static int
rpmem_fip_monitor_fini(struct rpmem_fip *fip)
{
rpmem_fip_set_closing(fip);
int ret = os_thread_join(&fip->monitor, NULL);
if (ret) {
RPMEM_LOG(ERR, "joining monitor thread failed");
}
return ret;
}
/*
* rpmem_fip_init_lanes_common -- (internal) initialize lanes
*/
static int
rpmem_fip_init_lanes_common(struct rpmem_fip *fip)
{
ASSERTne(Pagesize, 0);
int ret = 0;
/* allocate persist messages buffer */
fip->pmsg_size = roundup(sizeof(struct rpmem_msg_persist) +
fip->buff_size, (size_t)64);
size_t msg_size = fip->nlanes * fip->pmsg_size;
msg_size = PAGE_ALIGNED_UP_SIZE(msg_size);
errno = posix_memalign((void **)&fip->pmsg, Pagesize, msg_size);
if (errno) {
RPMEM_LOG(ERR, "!allocating messages buffer");
ret = -1;
goto err_malloc_pmsg;
}
/*
* Register persist messages buffer. The persist messages
* are sent to daemon thus the FI_SEND access flag.
*/
ret = fi_mr_reg(fip->domain, fip->pmsg, msg_size, FI_SEND,
0, 0, 0, &fip->pmsg_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registering messages buffer");
goto err_fi_mr_reg_pmsg;
}
/* get persist messages buffer local descriptor */
fip->pmsg_mr_desc = fi_mr_desc(fip->pmsg_mr);
/* allocate persist response messages buffer */
size_t msg_resp_size = fip->nlanes *
sizeof(struct rpmem_msg_persist_resp);
msg_resp_size = PAGE_ALIGNED_UP_SIZE(msg_resp_size);
errno = posix_memalign((void **)&fip->pres, Pagesize, msg_resp_size);
if (errno) {
RPMEM_LOG(ERR, "!allocating messages response buffer");
ret = -1;
goto err_malloc_pres;
}
/*
* Register persist messages response buffer. The persist response
* messages are received from daemon thus the FI_RECV access flag.
*/
ret = fi_mr_reg(fip->domain, fip->pres, msg_resp_size, FI_RECV,
0, 0, 0, &fip->pres_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registering messages response buffer");
goto err_fi_mr_reg_pres;
}
/* get persist response messages buffer local descriptor */
fip->pres_mr_desc = fi_mr_desc(fip->pres_mr);
return 0;
err_fi_mr_reg_pres:
free(fip->pres);
err_malloc_pres:
RPMEM_FI_CLOSE(fip->pmsg_mr, "unregistering messages buffer");
err_fi_mr_reg_pmsg:
free(fip->pmsg);
err_malloc_pmsg:
return ret;
}
/*
* rpmem_fip_get_pmsg -- return persist message buffer
*/
static inline struct rpmem_msg_persist *
rpmem_fip_get_pmsg(struct rpmem_fip *fip, size_t idx)
{
return (struct rpmem_msg_persist *)
((uintptr_t)fip->pmsg + idx * fip->pmsg_size);
}
/*
* rpmem_fip_init_mem_lanes_gpspm -- initialize lanes rma structures
*/
static int
rpmem_fip_init_mem_lanes_gpspm(struct rpmem_fip *fip)
{
/*
* Initialize all required structures for:
* WRITE, SEND and RECV operations.
*
* If the completion is required the FI_COMPLETION flag and
* appropriate context should be used.
*
* In GPSPM only the RECV and SEND completions are required.
*
* For RECV the context is RECV operation structure used for
* fi_recvmsg(3) function call.
*
* For SEND the context is lane structure.
*
* The received buffer contains a lane id which is used
* to obtain a lane which must be signaled that operation
* has been completed.
*/
unsigned i;
for (i = 0; i < fip->nlanes; i++) {
/* WRITE */
rpmem_fip_rma_init(&fip->lanes[i].write,
fip->mr_desc, 0,
fip->rkey,
&fip->lanes[i],
0);
/* SEND */
rpmem_fip_msg_init(&fip->lanes[i].send,
fip->pmsg_mr_desc, 0,
&fip->lanes[i],
rpmem_fip_get_pmsg(fip, i),
0 /* size must be provided when sending msg */,
FI_COMPLETION);
/* RECV */
rpmem_fip_msg_init(&fip->lanes[i].recv,
fip->pres_mr_desc, 0,
&fip->lanes[i].recv,
&fip->pres[i],
sizeof(fip->pres[i]),
FI_COMPLETION);
}
return 0;
}
/*
* rpmem_fip_fini_lanes_common -- (internal) deinitialize lanes for GPSPM
*/
static void
rpmem_fip_fini_lanes_common(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->pmsg_mr, "unregistering messages buffer");
RPMEM_FI_CLOSE(fip->pres_mr, "unregistering messages "
"response buffer");
free(fip->pmsg);
free(fip->pres);
}
/*
* rpmem_fip_init_lanes_apm -- (internal) initialize lanes for APM
*/
static int
rpmem_fip_init_lanes_apm(struct rpmem_fip *fip)
{
ASSERTne(Pagesize, 0);
int ret;
ret = rpmem_fip_init_lanes_common(fip);
if (ret)
goto err_init_lanes_common;
ASSERT(IS_PAGE_ALIGNED(RPMEM_RAW_BUFF_SIZE));
errno = posix_memalign((void **)&fip->raw_buff, Pagesize,
RPMEM_RAW_BUFF_SIZE);
if (errno) {
RPMEM_LOG(ERR, "!allocating APM RAW buffer");
goto err_malloc_raw;
}
/* register read-after-write buffer */
ret = fi_mr_reg(fip->domain, fip->raw_buff, RPMEM_RAW_BUFF_SIZE,
FI_REMOTE_WRITE, 0, 0, 0, &fip->raw_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registering APM read buffer");
goto err_fi_raw_mr;
}
/* get read-after-write buffer local descriptor */
fip->raw_mr_desc = fi_mr_desc(fip->raw_mr);
return 0;
err_fi_raw_mr:
free(fip->raw_buff);
err_malloc_raw:
rpmem_fip_fini_lanes_common(fip);
err_init_lanes_common:
return -1;
}
/*
* rpmem_fip_init_mem_lanes_apm -- initialize lanes rma structures
*/
static int
rpmem_fip_init_mem_lanes_apm(struct rpmem_fip *fip)
{
/*
* Initialize all required structures for:
* WRITE and READ operations.
*
* If the completion is required the FI_COMPLETION flag and
* appropriate context should be used.
*
* In APM only the READ completion is required.
* The context is a lane structure.
*/
for (unsigned i = 0; i < fip->nlanes; i++) {
/* WRITE */
rpmem_fip_rma_init(&fip->lanes[i].write,
fip->mr_desc, 0,
fip->rkey,
&fip->lanes[i],
0);
/* WRITE + FI_COMPLETION */
rpmem_fip_rma_init(&fip->lanes[i].write_cq,
fip->mr_desc, 0,
fip->rkey,
&fip->lanes[i],
FI_COMPLETION);
/* READ */
rpmem_fip_rma_init(&fip->lanes[i].read,
fip->raw_mr_desc, 0,
fip->rkey,
&fip->lanes[i],
FI_COMPLETION);
/* SEND */
rpmem_fip_msg_init(&fip->lanes[i].send,
fip->pmsg_mr_desc, 0,
&fip->lanes[i],
rpmem_fip_get_pmsg(fip, i),
fip->pmsg_size,
FI_COMPLETION);
/* RECV */
rpmem_fip_msg_init(&fip->lanes[i].recv,
fip->pres_mr_desc, 0,
&fip->lanes[i].recv,
&fip->pres[i],
sizeof(fip->pres[i]),
FI_COMPLETION);
}
return 0;
}
/*
* rpmem_fip_fini_lanes_apm -- (internal) deinitialize lanes for APM
*/
static void
rpmem_fip_fini_lanes_apm(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->raw_mr, "unregistering APM read buffer");
free(fip->raw_buff);
rpmem_fip_fini_lanes_common(fip);
}
/*
* rpmem_fip_wq_inc -- (internal) increment number of elements in WQ
*/
static inline void
rpmem_fip_wq_inc(struct rpmem_fip_plane *lanep)
{
++lanep->base.wq_elems;
}
/*
* rpmem_fip_wq_set_empty -- (internal) zero number of elements in WQ
*/
static inline void
rpmem_fip_wq_set_empty(struct rpmem_fip_plane *lanep)
{
RPMEM_ASSERT(!lanep->base.wq_is_flushing);
lanep->base.wq_elems = 0;
}
/*
* rpmem_fip_wq_require_flush -- (internal) is WQ almost full
*/
static inline int
rpmem_fip_wq_require_flush(struct rpmem_fip *fip, struct rpmem_fip_plane *lanep)
{
RPMEM_ASSERT(lanep->base.wq_elems < fip->fi->tx_attr->size);
return lanep->base.wq_elems + 1 == fip->fi->tx_attr->size;
}
/*
* rpmem_fip_wq_is_flushing -- (internal) is WQ flush started
*/
static inline int
rpmem_fip_wq_is_flushing(struct rpmem_fip_plane *lanep)
{
return lanep->base.wq_is_flushing;
}
/*
* rpmem_fip_wq_set_flushing -- (internal) mark WQ flush start
*/
static inline void
rpmem_fip_wq_set_flushing(struct rpmem_fip_plane *lanep)
{
lanep->base.wq_is_flushing = 1;
}
/*
* if WQ is almost full last WRITE has to report its completion
* otherwise it is unknown when subsequent commands can be posted
*/
#define RPMEM_FIP_WQ_FLUSH_REQ RPMEM_COMPLETION
/*
* rpmem_fip_wq_flush_wait -- (internal) wait for WRITE completion
* to make sure WQ can accept subsequent commands
*/
static inline int
rpmem_fip_wq_flush_wait(struct rpmem_fip *fip, struct rpmem_fip_plane *lanep)
{
RPMEM_ASSERT(lanep->base.wq_elems == fip->fi->tx_attr->size);
RPMEM_ASSERT(lanep->base.wq_is_flushing);
/* wait for WRITE completion */
int ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_WRITE);
if (unlikely(ret)) {
LOG(2, "waiting for WRITE completion failed");
return ret;
}
/* when WRITE completion is reaped WQ is empty */
lanep->base.wq_is_flushing = 0;
rpmem_fip_wq_set_empty(lanep);
return 0;
}
/*
* rpmem_fip_wq_inc_and_flush -- (internal) increment number of elements in WQ
* and flush it
*/
static inline int
rpmem_fip_wq_inc_and_flush(struct rpmem_fip *fip, struct rpmem_fip_plane *lanep)
{
rpmem_fip_wq_inc(lanep);
rpmem_fip_wq_set_flushing(lanep);
return rpmem_fip_wq_flush_wait(fip, lanep);
}
/*
* rpmem_fip_wq_flush_check -- (internal) check if WQ requires flush or it is
* during flushing and handle each case
*/
static inline int
rpmem_fip_wq_flush_check(struct rpmem_fip *fip, struct rpmem_fip_plane *lanep,
unsigned *flags)
{
if (rpmem_fip_wq_is_flushing(lanep))
return rpmem_fip_wq_flush_wait(fip, lanep);
if (rpmem_fip_wq_require_flush(fip, lanep))
*flags |= RPMEM_FIP_WQ_FLUSH_REQ;
return 0;
}
/*
* rpmem_fip_get_wq_size -- get WQ size (for validation purposes only)
*/
inline size_t
rpmem_fip_get_wq_size(struct rpmem_fip *fip)
{
RPMEM_ASSERT(fip);
RPMEM_ASSERT(fip->fi);
RPMEM_ASSERT(fip->fi->tx_attr);
return fip->fi->tx_attr->size;
}
/*
* rpmem_fip_flush_raw -- (internal) perform flush operation using rma WRITE
*/
static int
rpmem_fip_flush_raw(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags)
{
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
int ret;
void *laddr = (void *)((uintptr_t)fip->laddr + offset);
uint64_t raddr = fip->raddr + offset;
struct rpmem_fip_rma *write = rpmem_fip_lane_prep_write(lanep, flags);
/* WRITE for requested memory region */
ret = rpmem_fip_writemsg(lanep->base.ep, write, laddr, len, raddr);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "RMA write");
return ret;
}
if (flags & RPMEM_FIP_WQ_FLUSH_REQ)
rpmem_fip_wq_set_flushing(lanep);
return 0;
}
/*
* rpmem_fip_drain_raw -- (internal) perform drain operation using rma READ
*/
static int
rpmem_fip_drain_raw(struct rpmem_fip *fip, unsigned lane)
{
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
int ret;
rpmem_fip_lane_begin(&lanep->base, FI_READ);
/* READ to read-after-write buffer */
ret = rpmem_fip_readmsg(lanep->base.ep, &lanep->read, fip->raw_buff,
RPMEM_RAW_SIZE, fip->raddr);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "RMA read");
return ret;
}
/* wait for READ completion */
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_READ);
if (unlikely(ret)) {
ERR("waiting for READ completion failed");
return ret;
}
return 0;
}
/*
* rpmem_fip_persist_raw -- (internal) perform persist operation using
* READ after WRITE mechanism
*/
static int
rpmem_fip_persist_raw(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
int ret;
ret = rpmem_fip_flush_raw(fip, offset, len, lane, flags);
if (unlikely(ret))
return ret;
/* flush WQ prior to posting subsequent message */
if (flags & RPMEM_FIP_WQ_FLUSH_REQ) {
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
ret = rpmem_fip_wq_inc_and_flush(fip, lanep);
if (unlikely(ret))
return ret;
}
return rpmem_fip_drain_raw(fip, lane);
}
/*
* rpmem_fip_post_resp -- (internal) post persist response message buffer
*/
static inline int
rpmem_fip_post_resp(struct rpmem_fip *fip,
struct rpmem_fip_plane *lanep)
{
int ret = rpmem_fip_recvmsg(lanep->base.ep, &lanep->recv);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "posting recv buffer");
return ret;
}
return 0;
}
/*
* rpmem_fip_persist_saw -- (internal) perform persist operation using
* SEND after WRITE mechanism
*/
static int
rpmem_fip_persist_saw(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
void *laddr = (void *)((uintptr_t)fip->laddr + offset);
uint64_t raddr = fip->raddr + offset;
struct rpmem_msg_persist *msg;
int ret;
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_SEND);
if (unlikely(ret)) {
ERR("waiting for SEND completion failed");
return ret;
}
struct rpmem_fip_rma *write = rpmem_fip_lane_prep_write(lanep, flags);
/* WRITE for requested memory region */
ret = rpmem_fip_writemsg(lanep->base.ep, write, laddr, len, raddr);
if (unlikely(ret)) {
RPMEM_FI_ERR((int)ret, "RMA write");
return ret;
}
/* flush WQ prior to posting subsequent message */
if (flags & RPMEM_FIP_WQ_FLUSH_REQ) {
ret = rpmem_fip_wq_inc_and_flush(fip, lanep);
if (unlikely(ret))
return ret;
}
rpmem_fip_lane_begin(&lanep->base, FI_RECV | FI_SEND);
/* SEND persist message */
msg = rpmem_fip_msg_get_pmsg(&lanep->send);
msg->flags = (flags & RPMEM_FLUSH_PERSIST_MASK);
msg->lane = lane;
msg->addr = raddr;
msg->size = len;
ret = rpmem_fip_sendmsg(lanep->base.ep, &lanep->send, sizeof(*msg));
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "MSG send");
return ret;
}
/* wait for persist operation completion */
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_RECV);
if (unlikely(ret)) {
ERR("waiting for RECV completion failed");
return ret;
}
ret = rpmem_fip_post_resp(fip, lanep);
if (unlikely(ret)) {
ERR("posting RECV buffer failed");
return ret;
}
return 0;
}
/*
* rpmem_fip_persist_send -- (internal) perform persist operation using
* RDMA SEND operation with data inlined in the message buffer.
*/
static int
rpmem_fip_persist_send(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
RPMEM_ASSERT(len <= fip->buff_size);
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
void *laddr = (void *)((uintptr_t)fip->laddr + offset);
uint64_t raddr = fip->raddr + offset;
struct rpmem_msg_persist *msg;
int ret;
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_SEND);
if (unlikely(ret)) {
ERR("waiting for SEND completion failed");
return ret;
}
rpmem_fip_lane_begin(&lanep->base, FI_RECV | FI_SEND);
/* SEND persist message */
msg = rpmem_fip_msg_get_pmsg(&lanep->send);
msg->flags = flags;
msg->lane = lane;
msg->addr = raddr;
msg->size = len;
memcpy(msg->data, laddr, len);
ret = rpmem_fip_sendmsg(lanep->base.ep, &lanep->send,
sizeof(*msg) + len);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "MSG send");
return ret;
}
/* wait for persist operation completion */
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_RECV);
if (unlikely(ret)) {
ERR("waiting for RECV completion failed");
return ret;
}
ret = rpmem_fip_post_resp(fip, lanep);
if (unlikely(ret)) {
ERR("posting RECV buffer failed");
return ret;
}
return 0;
}
/*
* rpmem_fip_persist_gpspm_sockets -- (internal) perform persist operation
* for GPSPM - sockets provider implementation which doesn't use the
* inline persist operation
*/
static ssize_t
rpmem_fip_persist_gpspm_sockets(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
unsigned mode = flags & RPMEM_FLUSH_PERSIST_MASK;
if (mode == RPMEM_PERSIST_SEND)
flags = (flags & ~RPMEM_FLUSH_PERSIST_MASK) | RPMEM_FLUSH_WRITE;
int ret = rpmem_fip_wq_flush_check(fip, &fip->lanes[lane], &flags);
if (unlikely(ret))
return -abs(ret);
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
ret = rpmem_fip_persist_saw(fip, offset, len, lane, flags);
if (ret)
return -abs(ret);
rpmem_fip_wq_set_empty(&fip->lanes[lane]);
return (ssize_t)len;
}
/*
* rpmem_fip_persist_apm_sockets -- (internal) perform persist operation
* for APM - sockets provider implementation which doesn't use the
* inline persist operation
*/
static ssize_t
rpmem_fip_persist_apm_sockets(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
int ret = rpmem_fip_wq_flush_check(fip, &fip->lanes[lane], &flags);
if (unlikely(ret))
return -abs(ret);
ret = rpmem_fip_persist_raw(fip, offset, len, lane, flags);
if (unlikely(ret))
return -abs(ret);
rpmem_fip_wq_set_empty(&fip->lanes[lane]);
return (ssize_t)len;
}
/*
* rpmem_fip_persist_gpspm -- (internal) perform persist operation for GPSPM
*/
static ssize_t
rpmem_fip_persist_gpspm(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
unsigned mode = flags & RPMEM_FLUSH_PERSIST_MASK;
int ret = rpmem_fip_wq_flush_check(fip, &fip->lanes[lane], &flags);
if (unlikely(ret))
return -abs(ret);
if (mode == RPMEM_PERSIST_SEND) {
len = min(len, fip->buff_size);
ret = rpmem_fip_persist_send(fip, offset, len, lane, flags);
} else {
ret = rpmem_fip_persist_saw(fip, offset, len, lane, flags);
}
if (ret)
return -abs(ret);
rpmem_fip_wq_set_empty(&fip->lanes[lane]);
return (ssize_t)len;
}
/*
* rpmem_fip_drain_nop -- (internal) perform drain operation as NOP
*/
static int
rpmem_fip_drain_nop(struct rpmem_fip *fip, unsigned lane)
{
(void) fip;
(void) lane;
return 0;
}
/*
* rpmem_fip_flush_apm -- (internal) perform flush operation for APM
*/
static ssize_t
rpmem_fip_flush_apm(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
int ret;
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
unsigned mode = flags & RPMEM_FLUSH_PERSIST_MASK;
ret = rpmem_fip_wq_flush_check(fip, lanep, &flags);
if (unlikely(ret))
return ret;
if (mode == RPMEM_PERSIST_SEND) {
/*
* XXX: Probably posting Send in the flush and waiting for the
* response in the drain will give some performance gains.
*/
len = min(len, fip->buff_size);
ret = rpmem_fip_persist_send(fip, offset, len, lane, flags);
} else {
ret = rpmem_fip_flush_raw(fip, offset, len, lane, flags);
}
if (ret)
return -abs(ret);
rpmem_fip_wq_inc(lanep);
return (ssize_t)len;
}
/*
* rpmem_fip_drain_apm -- (internal) perform drain operation for APM
*/
static int
rpmem_fip_drain_apm(struct rpmem_fip *fip, unsigned lane)
{
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
int ret;
if (unlikely(rpmem_fip_wq_is_flushing(lanep))) {
ret = rpmem_fip_wq_flush_wait(fip, lanep);
if (unlikely(ret))
return ret;
}
ret = rpmem_fip_drain_raw(fip, lane);
/* successful drain means WQ is empty */
if (likely(!ret))
rpmem_fip_wq_set_empty(lanep);
return ret;
}
/*
* rpmem_fip_persist_apm -- (internal) perform persist operation for APM
*/
static ssize_t
rpmem_fip_persist_apm(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
unsigned mode = flags & RPMEM_FLUSH_PERSIST_MASK;
int ret = rpmem_fip_wq_flush_check(fip, &fip->lanes[lane], &flags);
if (unlikely(ret))
return -abs(ret);
if (unlikely(mode == RPMEM_DEEP_PERSIST))
ret = rpmem_fip_persist_saw(fip, offset, len, lane, flags);
else if (mode == RPMEM_PERSIST_SEND) {
len = min(len, fip->buff_size);
ret = rpmem_fip_persist_send(fip, offset, len, lane, flags);
} else {
ret = rpmem_fip_persist_raw(fip, offset, len, lane, flags);
}
if (unlikely(ret))
return -abs(ret);
rpmem_fip_wq_set_empty(&fip->lanes[lane]);
return (ssize_t)len;
}
/*
* rpmem_fip_post_lanes_common -- (internal) post all persist response message
* buffers
*/
static int
rpmem_fip_post_lanes_common(struct rpmem_fip *fip)
{
int ret = 0;
for (unsigned i = 0; i < fip->nlanes; i++) {
ret = rpmem_fip_post_resp(fip, &fip->lanes[i]);
if (ret)
break;
}
return ret;
}
/*
* rpmem_fip_ops -- some operations specific for persistency method used
*
* Note: GPSPM flush is emulated by persist whereas drain is a nop.
*
* Probably splitting Send-after-Write into two stages (flush + drain)
* will give some performance gains for GPSPM mode.
*/
static const struct rpmem_fip_ops
rpmem_fip_ops[MAX_RPMEM_PROV][MAX_RPMEM_PM] = {
[RPMEM_PROV_LIBFABRIC_VERBS] = {
[RPMEM_PM_GPSPM] = {
.flush = rpmem_fip_persist_gpspm,
.drain = rpmem_fip_drain_nop,
.persist = rpmem_fip_persist_gpspm,
.lanes_init = rpmem_fip_init_lanes_common,
.lanes_init_mem = rpmem_fip_init_mem_lanes_gpspm,
.lanes_fini = rpmem_fip_fini_lanes_common,
.lanes_post = rpmem_fip_post_lanes_common,
},
[RPMEM_PM_APM] = {
.flush = rpmem_fip_flush_apm,
.drain = rpmem_fip_drain_apm,
.persist = rpmem_fip_persist_apm,
.lanes_init = rpmem_fip_init_lanes_apm,
.lanes_init_mem = rpmem_fip_init_mem_lanes_apm,
.lanes_fini = rpmem_fip_fini_lanes_apm,
.lanes_post = rpmem_fip_post_lanes_common,
},
},
[RPMEM_PROV_LIBFABRIC_SOCKETS] = {
[RPMEM_PM_GPSPM] = {
.flush = rpmem_fip_persist_gpspm_sockets,
.drain = rpmem_fip_drain_nop,
.persist = rpmem_fip_persist_gpspm_sockets,
.lanes_init = rpmem_fip_init_lanes_common,
.lanes_init_mem = rpmem_fip_init_mem_lanes_gpspm,
.lanes_fini = rpmem_fip_fini_lanes_common,
.lanes_post = rpmem_fip_post_lanes_common,
},
[RPMEM_PM_APM] = {
.flush = rpmem_fip_flush_apm,
.drain = rpmem_fip_drain_apm,
.persist = rpmem_fip_persist_apm_sockets,
.lanes_init = rpmem_fip_init_lanes_apm,
.lanes_init_mem = rpmem_fip_init_mem_lanes_apm,
.lanes_fini = rpmem_fip_fini_lanes_apm,
.lanes_post = rpmem_fip_post_lanes_common,
},
}
};
/*
* rpmem_fip_set_attr -- (internal) set required attributes
*/
static void
rpmem_fip_set_attr(struct rpmem_fip *fip, struct rpmem_fip_attr *attr)
{
fip->raddr = (uint64_t)attr->raddr;
fip->rkey = attr->rkey;
fip->laddr = attr->laddr;
fip->size = attr->size;
fip->buff_size = attr->buff_size;
fip->persist_method = attr->persist_method;
rpmem_fip_set_nlanes(fip, attr->nlanes);
/* one for read operation */
fip->cq_size = rpmem_fip_cq_size(fip->persist_method,
RPMEM_FIP_NODE_CLIENT);
fip->ops = &rpmem_fip_ops[attr->provider][fip->persist_method];
}
/*
* rpmem_fip_init -- initialize fabric provider
*/
struct rpmem_fip *
rpmem_fip_init(const char *node, const char *service,
struct rpmem_fip_attr *attr, unsigned *nlanes)
{
int ret;
struct rpmem_fip *fip = calloc(1, sizeof(*fip));
if (!fip) {
RPMEM_LOG(ERR, "!allocating fabric handle");
return NULL;
}
ret = rpmem_fip_getinfo(fip, node, service,
attr->provider, attr->max_wq_size, attr->persist_method);
if (ret)
goto err_getinfo;
fip->cq_read = attr->provider == RPMEM_PROV_LIBFABRIC_VERBS ?
fi_cq_read : cq_read_infinite;
rpmem_fip_set_attr(fip, attr);
*nlanes = fip->nlanes;
ret = rpmem_fip_init_fabric_res(fip);
if (ret)
goto err_init_fabric_res;
ret = rpmem_fip_lanes_init(fip);
if (ret)
goto err_init_lanes;
return fip;
err_init_lanes:
rpmem_fip_fini_fabric_res(fip);
err_init_fabric_res:
fi_freeinfo(fip->fi);
err_getinfo:
free(fip);
return NULL;
}
/*
* rpmem_fip_fini -- deinitialize fabric provider
*/
void
rpmem_fip_fini(struct rpmem_fip *fip)
{
fip->ops->lanes_fini(fip);
rpmem_fip_lanes_fini_common(fip);
rpmem_fip_fini_fabric_res(fip);
fi_freeinfo(fip->fi);
free(fip);
}
/*
* rpmem_fip_connect -- connect to remote peer
*/
int
rpmem_fip_connect(struct rpmem_fip *fip)
{
int ret;
ret = rpmem_fip_lanes_connect(fip);
if (ret)
goto err_lanes_connect;
ret = rpmem_fip_monitor_init(fip);
if (ret)
goto err_monitor;
ret = rpmem_fip_init_memory(fip);
if (ret)
goto err_init_memory;
ret = fip->ops->lanes_init_mem(fip);
if (ret)
goto err_init_lanes_mem;
ret = fip->ops->lanes_post(fip);
if (ret)
goto err_lanes_post;
return 0;
err_lanes_post:
err_init_lanes_mem:
rpmem_fip_fini_memory(fip);
err_init_memory:
rpmem_fip_monitor_fini(fip);
err_monitor:
rpmem_fip_lanes_shutdown(fip);
err_lanes_connect:
return ret;
}
/*
* rpmem_fip_close -- close connection to remote peer
*/
int
rpmem_fip_close(struct rpmem_fip *fip)
{
int ret;
int lret = 0;
if (unlikely(rpmem_fip_is_closing(fip)))
goto close_monitor;
rpmem_fip_fini_memory(fip);
ret = rpmem_fip_lanes_shutdown(fip);
if (ret)
lret = ret;
close_monitor:
/* close fip monitor */
ret = rpmem_fip_monitor_fini(fip);
if (ret)
lret = ret;
return lret;
}
/*
* rpmem_fip_flush -- perform remote flush operation
*/
int
rpmem_fip_flush(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags)
{
RPMEM_ASSERT((flags & RPMEM_FLUSH_PERSIST_MASK) <= RPMEM_PERSIST_MAX);
RPMEM_ASSERT(flags != RPMEM_DEEP_PERSIST);
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
RPMEM_ASSERT(lane < fip->nlanes);
if (unlikely(lane >= fip->nlanes))
return EINVAL; /* it will be passed to errno */
if (unlikely(offset >= fip->size || offset + len > fip->size))
return EINVAL; /* it will be passed to errno */
if (unlikely(len == 0))
return 0;
int ret = 0;
while (len > 0) {
size_t tmplen = min(len, fip->fi->ep_attr->max_msg_size);
ssize_t r = fip->ops->flush(fip, offset, tmplen, lane, flags);
if (r < 0) {
RPMEM_LOG(ERR, "flush operation failed");
ret = (int)r;
goto err;
}
tmplen = (size_t)r;
offset += tmplen;
len -= tmplen;
}
err:
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* rpmem_fip_drain -- perform remote drain operation
*/
int
rpmem_fip_drain(struct rpmem_fip *fip, unsigned lane)
{
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
RPMEM_ASSERT(lane < fip->nlanes);
if (unlikely(lane >= fip->nlanes))
return EINVAL; /* it will be passed to errno */
int ret = fip->ops->drain(fip, lane);
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* rpmem_fip_persist -- perform remote persist operation
*/
int
rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags)
{
RPMEM_ASSERT((flags & RPMEM_FLUSH_PERSIST_MASK) <= RPMEM_PERSIST_MAX);
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
RPMEM_ASSERT(lane < fip->nlanes);
if (unlikely(lane >= fip->nlanes))
return EINVAL; /* it will be passed to errno */
if (unlikely(offset >= fip->size || offset + len > fip->size))
return EINVAL; /* it will be passed to errno */
if (unlikely(len == 0))
return 0;
int ret = 0;
while (len > 0) {
size_t tmplen = min(len, fip->fi->ep_attr->max_msg_size);
ssize_t r = fip->ops->persist(fip, offset, tmplen, lane, flags);
if (r < 0) {
RPMEM_LOG(ERR, "persist operation failed");
ret = (int)r;
goto err;
}
tmplen = (size_t)r;
offset += tmplen;
len -= tmplen;
}
err:
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* rpmem_fip_read -- perform read operation
*/
int
rpmem_fip_read(struct rpmem_fip *fip, void *buff, size_t len,
size_t off, unsigned lane)
{
int ret;
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
RPMEM_ASSERT(lane < fip->nlanes);
if (unlikely(lane >= fip->nlanes))
return EINVAL; /* it will be passed to errno */
if (unlikely(len == 0)) {
return 0;
}
size_t rd_buff_len = len < fip->fi->ep_attr->max_msg_size ?
len : fip->fi->ep_attr->max_msg_size;
void *rd_buff; /* buffer for read operation */
struct fid_mr *rd_mr; /* read buffer memory region */
void *rd_mr_desc; /* read buffer memory descriptor */
struct rpmem_fip_rlane rd_lane;
/* allocate buffer for read operation */
errno = posix_memalign((void **)&rd_buff, Pagesize,
rd_buff_len);
if (errno) {
RPMEM_LOG(ERR, "!allocating read buffer");
ret = errno;
goto err_malloc_rd_buff;
}
/*
* Register buffer for read operation.
* The read operation utilizes READ operation thus
* the FI_REMOTE_WRITE flag.
*/
ret = fi_mr_reg(fip->domain, rd_buff,
rd_buff_len, FI_REMOTE_WRITE,
0, 0, 0, &rd_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registrating read buffer");
goto err_rd_mr;
}
/* get read buffer local memory descriptor */
rd_mr_desc = fi_mr_desc(rd_mr);
/*
* Initialize READ message. The completion is required in order
* to signal thread that READ operation has been completed.
*/
rpmem_fip_rma_init(&rd_lane.read, rd_mr_desc, 0,
fip->rkey, &rd_lane, FI_COMPLETION);
size_t rd = 0;
uint8_t *cbuff = buff;
struct rpmem_fip_lane *lanep = &fip->lanes[lane].base;
while (rd < len) {
size_t rd_len = len - rd < rd_buff_len ?
len - rd : rd_buff_len;
size_t rd_off = off + rd;
uint64_t raddr = fip->raddr + rd_off;
rpmem_fip_lane_begin(lanep, FI_READ);
ret = rpmem_fip_readmsg(lanep->ep, &rd_lane.read,
rd_buff, rd_len, raddr);
if (ret) {
RPMEM_FI_ERR(ret, "RMA read");
goto err_readmsg;
}
VALGRIND_DO_MAKE_MEM_DEFINED(rd_buff, rd_len);
ret = rpmem_fip_lane_wait(fip, lanep, FI_READ);
if (ret) {
ERR("error when processing read request");
goto err_lane_wait;
}
memcpy(&cbuff[rd], rd_buff, rd_len);
rd += rd_len;
}
ret = 0;
err_lane_wait:
err_readmsg:
RPMEM_FI_CLOSE(rd_mr, "unregistering memory");
err_rd_mr:
free(rd_buff);
err_malloc_rd_buff:
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* parse_bool -- convert string value to boolean
*/
static int
parse_bool(const char *str_value)
{
if (strcmp(str_value, "0") == 0 ||
strcasecmp(str_value, "false") == 0 ||
strcasecmp(str_value, "no") == 0 ||
strcasecmp(str_value, "off") == 0) {
return 0;
}
if (strcmp(str_value, "1") == 0 ||
strcasecmp(str_value, "true") == 0 ||
strcasecmp(str_value, "yes") == 0 ||
strcasecmp(str_value, "on") == 0) {
return 1;
}
return -1;
}
/*
* rpmem_fip_param_get -- read environment variable in the libfabric way
*
* - If parameter does not exist the output value is not changed.
* - If the environment variable is not set the output value is not changed.
* - If the environment variable is set and its value is not correct the output
* value is set to error value.
* - If the environment variable is set and its value is correct the output
* value is set according to the environment variable value.
*/
static void
rpmem_fip_param_get(const char *var_name, int *value)
{
struct fi_param *params;
int count;
int ret = fi_getparams(¶ms, &count);
if (ret != FI_SUCCESS) {
RPMEM_FI_ERR(ret, "getting fabric parameters list");
return;
}
for (int i = 0; i < count; ++i) {
if (strcmp(params[i].name, var_name) != 0)
continue;
if (!params[i].value) {
break;
}
*value = parse_bool(params[i].value);
break;
}
fi_freeparams(params);
}
#define LIBFABRIC_FORK_UNSAFE_VAR "FI_FORK_UNSAFE"
/*
* rpmem_fip_probe_fork_safety -- probe if libfabric is fork safe
*/
void
rpmem_fip_probe_fork_safety(void)
{
int *fork_unsafe = &Rpmem_fork_unsafe; /* false by default */
rpmem_fip_param_get(LIBFABRIC_FORK_UNSAFE_VAR, fork_unsafe);
}
| 47,114 | 22.699698 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem.h -- internal definitions for librpmem
*/
#include "alloc.h"
#include "fault_injection.h"
#define RPMEM_LOG_PREFIX "librpmem"
#define RPMEM_LOG_LEVEL_VAR "RPMEM_LOG_LEVEL"
#define RPMEM_LOG_FILE_VAR "RPMEM_LOG_FILE"
#if FAULT_INJECTION
void
rpmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
rpmem_fault_injection_enabled(void);
#else
static inline void
rpmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
rpmem_fault_injection_enabled(void)
{
return 0;
}
#endif
| 672 | 18.228571 | 62 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_util.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_util.h -- util functions for librpmem header file
*/
#ifndef RPMEM_UTIL_H
#define RPMEM_UTIL_H 1
#ifdef __cplusplus
extern "C" {
#endif
enum {
LERR = 1,
LWARN = 2,
LNOTICE = 3,
LINFO = 4,
_LDBG = 10,
};
#define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args)
#define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args)
#define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args)
#define RPMEM_ASSERT(cond) ASSERT(cond)
#define RPMEM_PERSIST_FLAGS_ALL RPMEM_PERSIST_RELAXED
#define RPMEM_PERSIST_FLAGS_MASK ((unsigned)(~RPMEM_PERSIST_FLAGS_ALL))
#define RPMEM_FLUSH_FLAGS_ALL RPMEM_FLUSH_RELAXED
#define RPMEM_FLUSH_FLAGS_MASK ((unsigned)(~RPMEM_FLUSH_FLAGS_ALL))
const char *rpmem_util_proto_errstr(enum rpmem_err err);
int rpmem_util_proto_errno(enum rpmem_err err);
void rpmem_util_cmds_init(void);
void rpmem_util_cmds_fini(void);
const char *rpmem_util_cmd_get(void);
void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes);
void rpmem_util_get_env_wq_size(unsigned *wq_size);
#ifdef __cplusplus
}
#endif
#endif
| 1,137 | 22.708333 | 71 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_util.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_util.c -- util functions for librpmem source file
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include "out.h"
#include "os.h"
#include "librpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
static const struct rpmem_err_str_errno {
int err;
const char *str;
} rpmem_err_str_errno[MAX_RPMEM_ERR] = {
[RPMEM_SUCCESS] = {
.err = 0,
.str = "Success",
},
[RPMEM_ERR_BADPROTO] = {
.err = EPROTONOSUPPORT,
.str = "Protocol version number mismatch",
},
[RPMEM_ERR_BADNAME] = {
.err = EINVAL,
.str = "Invalid pool descriptor",
},
[RPMEM_ERR_BADSIZE] = {
.err = EFBIG,
.str = "Invalid pool size",
},
[RPMEM_ERR_BADNLANES] = {
.err = EINVAL,
.str = "Invalid number of lanes",
},
[RPMEM_ERR_BADPROVIDER] = {
.err = EINVAL,
.str = "Invalid provider",
},
[RPMEM_ERR_FATAL] = {
.err = EREMOTEIO,
.str = "Fatal error",
},
[RPMEM_ERR_FATAL_CONN] = {
.err = ECONNABORTED,
.str = "Fatal in-band connection error",
},
[RPMEM_ERR_BUSY] = {
.err = EBUSY,
.str = "Pool already in use",
},
[RPMEM_ERR_EXISTS] = {
.err = EEXIST,
.str = "Pool already exists",
},
[RPMEM_ERR_PROVNOSUP] = {
.err = EMEDIUMTYPE,
.str = "Provider not supported",
},
[RPMEM_ERR_NOEXIST] = {
.err = ENOENT,
.str = "Pool set or its part doesn't exist or it is "
"unavailable",
},
[RPMEM_ERR_NOACCESS] = {
.err = EACCES,
.str = "Pool set permission denied",
},
[RPMEM_ERR_POOL_CFG] = {
.err = EINVAL,
.str = "Invalid pool set configuration",
},
};
static char *Rpmem_cmds;
static char **Rpmem_cmd_arr;
static size_t Rpmem_current_cmd;
static size_t Rpmem_ncmds;
#define RPMEM_CMD_SEPARATOR '|'
/*
* rpmem_util_proto_errstr -- return error string for error code
*/
const char *
rpmem_util_proto_errstr(enum rpmem_err err)
{
RPMEM_ASSERT(err < MAX_RPMEM_ERR);
const char *ret = rpmem_err_str_errno[err].str;
RPMEM_ASSERT(ret);
return ret;
}
/*
* rpmem_util_proto_errno -- return appropriate errno value for error code
*/
int
rpmem_util_proto_errno(enum rpmem_err err)
{
RPMEM_ASSERT(err < MAX_RPMEM_ERR);
return rpmem_err_str_errno[err].err;
}
/*
* rpmem_util_cmds_inc -- increase size of array for rpmem commands
*/
static void
rpmem_util_cmds_inc(void)
{
Rpmem_ncmds++;
Rpmem_cmd_arr = realloc(Rpmem_cmd_arr,
Rpmem_ncmds * sizeof(*Rpmem_cmd_arr));
if (!Rpmem_cmd_arr)
RPMEM_FATAL("!realloc");
}
/*
* rpmem_util_cmds_init -- read a RPMEM_CMD from the environment variable
*/
void
rpmem_util_cmds_init(void)
{
char *cmd = os_getenv(RPMEM_CMD_ENV);
if (!cmd)
cmd = RPMEM_DEF_CMD;
Rpmem_cmds = strdup(cmd);
if (!Rpmem_cmds)
RPMEM_FATAL("!strdup");
char *next = Rpmem_cmds;
while (next) {
rpmem_util_cmds_inc();
Rpmem_cmd_arr[Rpmem_ncmds - 1] = next;
next = strchr(next, RPMEM_CMD_SEPARATOR);
if (next) {
*next = '\0';
next++;
}
}
}
/*
* rpmem_util_env_fini -- release RPMEM_CMD copy
*/
void
rpmem_util_cmds_fini(void)
{
RPMEM_ASSERT(Rpmem_cmds);
RPMEM_ASSERT(Rpmem_cmd_arr);
RPMEM_ASSERT(Rpmem_current_cmd < Rpmem_ncmds);
free(Rpmem_cmds);
Rpmem_cmds = NULL;
free(Rpmem_cmd_arr);
Rpmem_cmd_arr = NULL;
Rpmem_ncmds = 0;
Rpmem_current_cmd = 0;
}
/*
* rpmem_util_cmd_get -- get a next command from RPMEM_CMD
*
* RPMEM_CMD can contain multiple commands separated by RPMEM_CMD_SEPARATOR.
* Commands from RPMEM_CMD are read sequentially and used to establish out of
* band connections to remote nodes in the order read from a poolset file.
*
*/
const char *
rpmem_util_cmd_get(void)
{
RPMEM_ASSERT(Rpmem_cmds);
RPMEM_ASSERT(Rpmem_cmd_arr);
RPMEM_ASSERT(Rpmem_current_cmd < Rpmem_ncmds);
char *ret = Rpmem_cmd_arr[Rpmem_current_cmd];
Rpmem_current_cmd = (Rpmem_current_cmd + 1) % Rpmem_ncmds;
return ret;
}
/*
* rpmem_util_get_env_uint -- read the unsigned value from environment
*/
static void
rpmem_util_get_env_uint(const char *env, unsigned *pval)
{
char *env_val = os_getenv(env);
if (env_val && env_val[0] != '\0') {
char *endptr;
errno = 0;
long val = strtol(env_val, &endptr, 10);
if (endptr[0] != '\0' || val <= 0 ||
(errno == ERANGE &&
(val == LONG_MAX || val == LONG_MIN))) {
RPMEM_LOG(ERR, "%s variable must be a positive integer",
env);
} else {
*pval = val < UINT_MAX ? (unsigned)val: UINT_MAX;
}
}
}
/*
* rpmem_util_get_env_max_nlanes -- read the maximum number of lanes from
* RPMEM_MAX_NLANES
*/
void
rpmem_util_get_env_max_nlanes(unsigned *max_nlanes)
{
rpmem_util_get_env_uint(RPMEM_MAX_NLANES_ENV, max_nlanes);
}
/*
* rpmem_util_get_env_wq_size -- read the required WQ size from env
*/
void
rpmem_util_get_env_wq_size(unsigned *wq_size)
{
rpmem_util_get_env_uint(RPMEM_WQ_SIZE_ENV, wq_size);
}
| 4,857 | 19.241667 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_obc.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc.h -- rpmem out-of-band connection client header file
*/
#ifndef RPMEM_OBC_H
#define RPMEM_OBC_H 1
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_obc;
struct rpmem_obc *rpmem_obc_init(void);
void rpmem_obc_fini(struct rpmem_obc *rpc);
int rpmem_obc_connect(struct rpmem_obc *rpc,
const struct rpmem_target_info *info);
int rpmem_obc_disconnect(struct rpmem_obc *rpc);
int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock);
int rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr);
int rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_close(struct rpmem_obc *rpc, int flags);
#ifdef __cplusplus
}
#endif
#endif
| 1,100 | 21.9375 | 65 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/librpmem.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* librpmem.c -- entry points for librpmem
*/
#include <stdio.h>
#include <stdint.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_fip.h"
#include "util.h"
#include "out.h"
/*
* librpmem_init -- load-time initialization for librpmem
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
librpmem_init(void)
{
util_init();
out_init(RPMEM_LOG_PREFIX, RPMEM_LOG_LEVEL_VAR, RPMEM_LOG_FILE_VAR,
RPMEM_MAJOR_VERSION, RPMEM_MINOR_VERSION);
LOG(3, NULL);
rpmem_util_cmds_init();
rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes);
rpmem_util_get_env_wq_size(&Rpmem_wq_size);
}
/*
* librpmem_fini -- librpmem cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
librpmem_fini(void)
{
LOG(3, NULL);
rpmem_util_cmds_fini();
out_fini();
}
/*
* rpmem_check_version -- see if library meets application version requirements
*/
const char *
rpmem_check_version(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != RPMEM_MAJOR_VERSION) {
ERR("librpmem major version mismatch (need %u, found %u)",
major_required, RPMEM_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > RPMEM_MINOR_VERSION) {
ERR("librpmem minor version mismatch (need %u, found %u)",
minor_required, RPMEM_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
/*
* rpmem_errormsg -- return the last error message
*/
const char *
rpmem_errormsg(void)
{
return out_get_errormsg();
}
| 1,723 | 19.282353 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_obc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_obc.c -- rpmem out-of-band connection client source file
*/
#include <stdlib.h>
#include <netdb.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_obc.h"
#include "rpmem_proto.h"
#include "rpmem_util.h"
#include "rpmem_ssh.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
/*
* rpmem_obc -- rpmem out-of-band client connection handle
*/
struct rpmem_obc {
struct rpmem_ssh *ssh;
};
/*
* rpmem_obc_is_connected -- (internal) return non-zero value if client is
* connected
*/
static inline int
rpmem_obc_is_connected(struct rpmem_obc *rpc)
{
return rpc->ssh != NULL;
}
/*
* rpmem_obc_check_ibc_attr -- (internal) check in-band connection
* attributes
*/
static int
rpmem_obc_check_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
if (ibc->port == 0 || ibc->port > UINT16_MAX) {
ERR("invalid port number received -- %u", ibc->port);
errno = EPROTO;
return -1;
}
if (ibc->persist_method != RPMEM_PM_GPSPM &&
ibc->persist_method != RPMEM_PM_APM) {
ERR("invalid persistency method received -- %u",
ibc->persist_method);
errno = EPROTO;
return -1;
}
return 0;
}
/*
* rpmem_obc_check_port -- (internal) verify target node port number
*/
static int
rpmem_obc_check_port(const struct rpmem_target_info *info)
{
if (!(info->flags & RPMEM_HAS_SERVICE))
return 0;
if (*info->service == '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
errno = 0;
char *endptr;
long port = strtol(info->service, &endptr, 10);
if (errno || *endptr != '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
if (port < 1) {
ERR("port number must be positive -- '%s'", info->service);
goto err;
}
if (port > UINT16_MAX) {
ERR("port number too large -- '%s'", info->service);
goto err;
}
return 0;
err:
errno = EINVAL;
return -1;
}
/*
* rpmem_obc_close_conn -- (internal) close connection
*/
static void
rpmem_obc_close_conn(struct rpmem_obc *rpc)
{
rpmem_ssh_close(rpc->ssh);
(void) util_fetch_and_and64(&rpc->ssh, 0);
}
/*
* rpmem_obc_init_msg_hdr -- (internal) initialize message header
*/
static void
rpmem_obc_set_msg_hdr(struct rpmem_msg_hdr *hdrp,
enum rpmem_msg_type type, size_t size)
{
hdrp->type = type;
hdrp->size = size;
}
/*
* rpmem_obc_set_pool_desc -- (internal) fill the pool descriptor field
*/
static void
rpmem_obc_set_pool_desc(struct rpmem_msg_pool_desc *pool_desc,
const char *desc, size_t size)
{
RPMEM_ASSERT(size <= UINT32_MAX);
RPMEM_ASSERT(size > 0);
pool_desc->size = (uint32_t)size;
memcpy(pool_desc->desc, desc, size);
pool_desc->desc[size - 1] = '\0';
}
/*
* rpmem_obc_alloc_create_msg -- (internal) allocate and fill create request
* message
*/
static struct rpmem_msg_create *
rpmem_obc_alloc_create_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_msg_create *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate create request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_CREATE, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
if (pool_attr) {
pack_rpmem_pool_attr(pool_attr, &msg->pool_attr);
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg->pool_attr, 0, sizeof(msg->pool_attr));
}
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_req -- (internal) check request attributes
*/
static int
rpmem_obc_check_req(const struct rpmem_req_attr *req)
{
if (req->provider >= MAX_RPMEM_PROV) {
ERR("invalid provider specified -- %u", req->provider);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* rpmem_obj_check_hdr_resp -- (internal) check response message header
*/
static int
rpmem_obc_check_hdr_resp(struct rpmem_msg_hdr_resp *resp,
enum rpmem_msg_type type, size_t size)
{
if (resp->type != type) {
ERR("invalid message type received -- %u", resp->type);
errno = EPROTO;
return -1;
}
if (resp->size != size) {
ERR("invalid message size received -- %lu", resp->size);
errno = EPROTO;
return -1;
}
if (resp->status >= MAX_RPMEM_ERR) {
ERR("invalid status received -- %u", resp->status);
errno = EPROTO;
return -1;
}
if (resp->status) {
enum rpmem_err status = (enum rpmem_err)resp->status;
ERR("%s", rpmem_util_proto_errstr(status));
errno = rpmem_util_proto_errno(status);
return -1;
}
return 0;
}
/*
* rpmem_obc_check_create_resp -- (internal) check create response message
*/
static int
rpmem_obc_check_create_resp(struct rpmem_msg_create_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CREATE_RESP,
sizeof(struct rpmem_msg_create_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_get_res -- (internal) read response attributes
*/
static void
rpmem_obc_get_res(struct rpmem_resp_attr *res,
struct rpmem_msg_ibc_attr *ibc)
{
res->port = (unsigned short)ibc->port;
res->rkey = ibc->rkey;
res->raddr = ibc->raddr;
res->persist_method =
(enum rpmem_persist_method)ibc->persist_method;
res->nlanes = ibc->nlanes;
}
/*
* rpmem_obc_alloc_open_msg -- (internal) allocate and fill open request message
*/
static struct rpmem_msg_open *
rpmem_obc_alloc_open_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
struct rpmem_msg_open *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate open request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_OPEN, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_open_resp -- (internal) check open response message
*/
static int
rpmem_obc_check_open_resp(struct rpmem_msg_open_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_OPEN_RESP,
sizeof(struct rpmem_msg_open_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_check_close_resp -- (internal) check close response message
*/
static int
rpmem_obc_check_close_resp(struct rpmem_msg_close_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CLOSE_RESP,
sizeof(struct rpmem_msg_close_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_check_set_attr_resp -- (internal) check set attributes response
* message
*/
static int
rpmem_obc_check_set_attr_resp(struct rpmem_msg_set_attr_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_SET_ATTR_RESP,
sizeof(struct rpmem_msg_set_attr_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_init -- initialize rpmem obc handle
*/
struct rpmem_obc *
rpmem_obc_init(void)
{
struct rpmem_obc *rpc = calloc(1, sizeof(*rpc));
if (!rpc) {
RPMEM_LOG(ERR, "!allocation of rpmem obc failed");
return NULL;
}
return rpc;
}
/*
* rpmem_obc_fini -- destroy rpmem obc handle
*
* This function must be called with connection already closed - after calling
* the rpmem_obc_disconnect or after receiving relevant value from
* rpmem_obc_monitor.
*/
void
rpmem_obc_fini(struct rpmem_obc *rpc)
{
free(rpc);
}
/*
* rpmem_obc_connect -- connect to target node
*
* Connects to target node, the target must be in the following format:
* <addr>[:<port>]. If the port number is not specified the default
* ssh port will be used. The <addr> is translated into IP address.
*
* Returns an error if connection is already established.
*/
int
rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info)
{
if (rpmem_obc_is_connected(rpc)) {
errno = EALREADY;
goto err_notconnected;
}
if (rpmem_obc_check_port(info))
goto err_port;
rpc->ssh = rpmem_ssh_open(info);
if (!rpc->ssh)
goto err_ssh_open;
return 0;
err_ssh_open:
err_port:
err_notconnected:
return -1;
}
/*
* rpmem_obc_disconnect -- close the connection to target node
*
* Returns error if socket is not connected.
*/
int
rpmem_obc_disconnect(struct rpmem_obc *rpc)
{
if (rpmem_obc_is_connected(rpc)) {
rpmem_obc_close_conn(rpc);
return 0;
}
errno = ENOTCONN;
return -1;
}
/*
* rpmem_obc_monitor -- monitor connection with target node
*
* The nonblock variable indicates whether this function should return
* immediately (= 1) or may block (= 0).
*
* If the function detects that socket was closed by remote peer it is
* closed on local side and set to -1, so there is no need to call
* rpmem_obc_disconnect function. Please take a look at functions'
* descriptions to see which functions cannot be used if the connection
* has been already closed.
*
* This function expects there is no data pending on socket, if any data
* is pending this function returns an error and sets errno to EPROTO.
*
* Return values:
* 0 - not connected
* 1 - connected
* < 0 - error
*/
int
rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock)
{
if (!rpmem_obc_is_connected(rpc))
return 0;
return rpmem_ssh_monitor(rpc->ssh, nonblock);
}
/*
* rpmem_obc_create -- perform create request operation
*
* Returns error if connection has not been established yet.
*/
int
rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_create *msg =
rpmem_obc_alloc_create_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending create request message");
rpmem_hton_msg_create(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending create request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "create request message sent");
RPMEM_LOG(INFO, "receiving create request response");
struct rpmem_msg_create_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving create request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "create request response received");
rpmem_ntoh_msg_create_resp(&resp);
if (rpmem_obc_check_create_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_open -- perform open request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_open *msg =
rpmem_obc_alloc_open_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending open request message");
rpmem_hton_msg_open(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending open request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "open request message sent");
RPMEM_LOG(INFO, "receiving open request response");
struct rpmem_msg_open_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) {
ERR("!receiving open request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "open request response received");
rpmem_ntoh_msg_open_resp(&resp);
if (rpmem_obc_check_open_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
if (pool_attr)
unpack_rpmem_pool_attr(&resp.pool_attr, pool_attr);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_set_attr -- perform set attributes request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
struct rpmem_msg_set_attr msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_SET_ATTR, sizeof(msg));
if (pool_attr) {
memcpy(&msg.pool_attr, pool_attr, sizeof(msg.pool_attr));
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg.pool_attr, 0, sizeof(msg.pool_attr));
}
RPMEM_LOG(INFO, "sending set attributes request message");
rpmem_hton_msg_set_attr(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
ERR("!sending set attributes request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "set attributes request message sent");
RPMEM_LOG(INFO, "receiving set attributes request response");
struct rpmem_msg_set_attr_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving set attributes request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "set attributes request response received");
rpmem_ntoh_msg_set_attr_resp(&resp);
if (rpmem_obc_check_set_attr_resp(&resp))
goto err_msg_resp;
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
err_notconnected:
return -1;
}
/*
* rpmem_obc_close -- perform close request operation
*
* Returns error if connection is not already established.
*
* NOTE: this function does not close the connection, but sends close request
* message to remote node and receives a response. The connection must be
* closed using rpmem_obc_disconnect function.
*/
int
rpmem_obc_close(struct rpmem_obc *rpc, int flags)
{
if (!rpmem_obc_is_connected(rpc)) {
errno = ENOTCONN;
return -1;
}
struct rpmem_msg_close msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_CLOSE, sizeof(msg));
msg.flags = (uint32_t)flags;
RPMEM_LOG(INFO, "sending close request message");
rpmem_hton_msg_close(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
RPMEM_LOG(ERR, "!sending close request failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request message sent");
RPMEM_LOG(INFO, "receiving close request response");
struct rpmem_msg_close_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
RPMEM_LOG(ERR, "!receiving close request response failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request response received");
rpmem_ntoh_msg_close_resp(&resp);
if (rpmem_obc_check_close_resp(&resp))
return -1;
return 0;
}
| 15,410 | 21.730088 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_cmd.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem_cmd.c -- simple interface for running an executable in child process
*/
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdint.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <signal.h>
#include <errno.h>
#include "util.h"
#include "out.h"
#include "os.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_cmd.h"
/*
* rpmem_cmd_init -- initialize command
*/
struct rpmem_cmd *
rpmem_cmd_init(void)
{
struct rpmem_cmd *cmd = calloc(1, sizeof(*cmd));
if (!cmd) {
RPMEM_LOG(ERR, "allocating command buffer");
goto err_alloc_cmd;
}
return cmd;
err_alloc_cmd:
return NULL;
}
/*
* rpmem_cmd_fini -- deinitialize command
*/
void
rpmem_cmd_fini(struct rpmem_cmd *cmd)
{
for (int i = 0; i < cmd->args.argc; i++)
free(cmd->args.argv[i]);
free(cmd->args.argv);
free(cmd);
}
/*
* rpmem_cmd_push -- push back command's argument
*/
int
rpmem_cmd_push(struct rpmem_cmd *cmd, const char *arg)
{
size_t argv_count = (size_t)cmd->args.argc + 2;
char **argv = realloc(cmd->args.argv, argv_count * sizeof(char *));
if (!argv) {
RPMEM_LOG(ERR, "reallocating command argv");
goto err_realloc;
}
cmd->args.argv = argv;
char *arg_dup = strdup(arg);
if (!arg_dup) {
RPMEM_LOG(ERR, "allocating argument");
goto err_strdup;
}
cmd->args.argv[cmd->args.argc] = arg_dup;
cmd->args.argc++;
cmd->args.argv[cmd->args.argc] = NULL;
return 0;
err_strdup:
err_realloc:
return -1;
}
/*
* rpmem_cmd_log -- print executing command
*/
static void
rpmem_cmd_log(struct rpmem_cmd *cmd)
{
RPMEM_ASSERT(cmd->args.argc > 0);
size_t size = 0;
for (int i = 0; i < cmd->args.argc; i++) {
size += strlen(cmd->args.argv[i]) + 1;
}
char *buff = malloc(size);
if (!buff) {
RPMEM_LOG(ERR, "allocating log buffer for command");
return;
}
size_t pos = 0;
for (int i = 0; pos < size && i < cmd->args.argc; i++) {
int ret = util_snprintf(&buff[pos], size - pos, "%s%s",
cmd->args.argv[i], i == cmd->args.argc - 1 ?
"" : " ");
if (ret < 0) {
RPMEM_LOG(ERR, "!snprintf");
goto out;
}
pos += (size_t)ret;
}
RPMEM_LOG(INFO, "executing command '%s'", buff);
out:
free(buff);
}
/*
* rpmem_cmd_run -- run command and connect with stdin, stdout and stderr
* using unix sockets.
*
* The communication with child process is done via socketpairs on
* stdin, stdout and stderr. The socketpairs are used instead of pipes
* because reading from disconnected pipe causes a SIGPIPE signal.
* When using socketpair it is possible to read data using recv(3)
* function with MSG_NOSIGNAL flag, which doesn't send a signal.
*/
int
rpmem_cmd_run(struct rpmem_cmd *cmd)
{
int fd_in[2];
int fd_out[2];
int fd_err[2];
rpmem_cmd_log(cmd);
/* socketpair for stdin */
int ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_in);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stdin");
goto err_pipe_in;
}
/* parent process stdin socket */
cmd->fd_in = fd_in[1];
/* socketpair for stdout */
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_out);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stdout");
goto err_pipe_out;
}
/* parent process stdout socket */
cmd->fd_out = fd_out[0];
/* socketpair for stderr */
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_err);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stderr");
goto err_pipe_err;
}
/* socketpair for stderr */
cmd->fd_err = fd_err[0];
cmd->pid = fork();
if (cmd->pid == -1) {
RPMEM_LOG(ERR, "forking command");
goto err_fork;
}
if (!cmd->pid) {
dup2(fd_in[0], 0);
dup2(fd_out[1], 1);
dup2(fd_err[1], 2);
execvp(cmd->args.argv[0], cmd->args.argv);
exit(EXIT_FAILURE);
}
os_close(fd_in[0]);
os_close(fd_out[1]);
os_close(fd_err[1]);
return 0;
err_fork:
os_close(fd_err[0]);
os_close(fd_err[1]);
err_pipe_err:
os_close(fd_out[0]);
os_close(fd_out[1]);
err_pipe_out:
os_close(fd_in[0]);
os_close(fd_in[1]);
err_pipe_in:
return -1;
}
/*
* rpmem_cmd_wait -- wait for process to change state
*/
int
rpmem_cmd_wait(struct rpmem_cmd *cmd, int *status)
{
if (cmd->pid <= 0) {
RPMEM_LOG(ERR, "wrong PID: %i", cmd->pid);
errno = EINVAL;
return -1;
}
if (waitpid(cmd->pid, status, 0) != cmd->pid) {
RPMEM_LOG(ERR, "!waitpid failed");
return -1;
}
return 0;
}
/*
* rpmem_cmd_term -- close child process's unix sockets
*/
void
rpmem_cmd_term(struct rpmem_cmd *cmd)
{
os_close(cmd->fd_in);
os_close(cmd->fd_out);
os_close(cmd->fd_err);
RPMEM_ASSERT(cmd->pid > 0);
}
| 4,625 | 18.275 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_cmd.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_cmd.h -- helper module for invoking separate process
*/
#ifndef RPMEM_CMD_H
#define RPMEM_CMD_H 1
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_cmd {
int fd_in; /* stdin */
int fd_out; /* stdout */
int fd_err; /* stderr */
struct {
char **argv;
int argc;
} args; /* command arguments */
pid_t pid; /* pid of process */
};
struct rpmem_cmd *rpmem_cmd_init(void);
int rpmem_cmd_push(struct rpmem_cmd *cmd, const char *arg);
int rpmem_cmd_run(struct rpmem_cmd *cmd);
void rpmem_cmd_term(struct rpmem_cmd *cmd);
int rpmem_cmd_wait(struct rpmem_cmd *cmd, int *status);
void rpmem_cmd_fini(struct rpmem_cmd *cmd);
#ifdef __cplusplus
}
#endif
#endif
| 790 | 18.775 | 61 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_ssh.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem_ssh.c -- rpmem ssh transport layer source file
*/
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "util.h"
#include "os.h"
#include "out.h"
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#include "rpmem_cmd.h"
#include "rpmem_util.h"
#define ERR_BUFF_LEN 4095
/* +1 in order to be sure it is always null-terminated */
static char error_str[ERR_BUFF_LEN + 1];
struct rpmem_ssh {
struct rpmem_cmd *cmd;
};
/*
* get_ssh -- return ssh command name
*/
static const char *
get_ssh(void)
{
char *cmd = os_getenv(RPMEM_SSH_ENV);
if (!cmd)
cmd = RPMEM_DEF_SSH;
return cmd;
}
/*
* get_user_at_node -- returns string containing user@node
*/
static char *
get_user_at_node(const struct rpmem_target_info *info)
{
char *user_at_node = NULL;
if (info->flags & RPMEM_HAS_USER) {
size_t ulen = strlen(info->user);
size_t nlen = strlen(info->node);
size_t len = ulen + 1 + nlen + 1;
user_at_node = malloc(len);
if (!user_at_node)
goto err_malloc;
int ret = util_snprintf(user_at_node, len, "%s@%s",
info->user, info->node);
if (ret < 0)
goto err_printf;
} else {
user_at_node = strdup(info->node);
if (!user_at_node)
goto err_malloc;
}
return user_at_node;
err_printf:
free(user_at_node);
err_malloc:
return NULL;
}
/*
* get_cmd -- return an RPMEM_CMD with appended list of arguments
*/
static char *
get_cmd(const char **argv)
{
const char *env_cmd = rpmem_util_cmd_get();
char *cmd = strdup(env_cmd);
if (!cmd)
return NULL;
size_t cmd_len = strlen(cmd) + 1;
const char *arg;
while ((arg = *argv++) != NULL) {
size_t len = strlen(arg);
size_t new_cmd_len = cmd_len + len + 1;
char *tmp = realloc(cmd, new_cmd_len);
if (!tmp)
goto err;
cmd = tmp;
/* append the argument to the command */
cmd[cmd_len - 1] = ' ';
memcpy(&cmd[cmd_len], arg, len);
cmd[cmd_len + len] = '\0';
cmd_len = new_cmd_len;
}
return cmd;
err:
free(cmd);
return NULL;
}
/*
* valist_to_argv -- convert va_list to argv array
*/
static const char **
valist_to_argv(va_list args)
{
const char **argv = malloc(sizeof(const char *));
if (!argv)
return NULL;
argv[0] = NULL;
size_t nargs = 0;
const char *arg;
while ((arg = va_arg(args, const char *)) != NULL) {
nargs++;
const char **tmp = realloc(argv,
(nargs + 1) * sizeof(const char *));
if (!tmp)
goto err;
argv = tmp;
argv[nargs - 1] = arg;
argv[nargs] = NULL;
}
return argv;
err:
free(argv);
return NULL;
}
/*
* rpmem_ssh_execv -- open ssh connection and run $RPMEMD_CMD with
* additional NULL-terminated list of arguments.
*/
struct rpmem_ssh *
rpmem_ssh_execv(const struct rpmem_target_info *info, const char **argv)
{
struct rpmem_ssh *rps = calloc(1, sizeof(*rps));
if (!rps)
goto err_zalloc;
char *user_at_node = get_user_at_node(info);
if (!user_at_node)
goto err_user_node;
rps->cmd = rpmem_cmd_init();
if (!rps->cmd)
goto err_cmd_init;
char *cmd = get_cmd(argv);
if (!cmd)
goto err_cmd;
int ret = rpmem_cmd_push(rps->cmd, get_ssh());
if (ret)
goto err_push;
if (info->flags & RPMEM_HAS_SERVICE) {
/* port number is optional */
ret = rpmem_cmd_push(rps->cmd, "-p");
if (ret)
goto err_push;
ret = rpmem_cmd_push(rps->cmd, info->service);
if (ret)
goto err_push;
}
/*
* Disable allocating pseudo-terminal in order to transfer binary
* data safely.
*/
ret = rpmem_cmd_push(rps->cmd, "-T");
if (ret)
goto err_push;
if (info->flags & RPMEM_FLAGS_USE_IPV4) {
ret = rpmem_cmd_push(rps->cmd, "-4");
if (ret)
goto err_push;
}
/* fail if password required for authentication */
ret = rpmem_cmd_push(rps->cmd, "-oBatchMode=yes");
if (ret)
goto err_push;
ret = rpmem_cmd_push(rps->cmd, user_at_node);
if (ret)
goto err_push;
ret = rpmem_cmd_push(rps->cmd, cmd);
if (ret)
goto err_push;
ret = rpmem_cmd_run(rps->cmd);
if (ret)
goto err_run;
free(user_at_node);
free(cmd);
return rps;
err_run:
err_push:
free(cmd);
err_cmd:
rpmem_cmd_fini(rps->cmd);
err_cmd_init:
free(user_at_node);
err_user_node:
free(rps);
err_zalloc:
return NULL;
}
/*
* rpmem_ssh_exec -- open ssh connection and run $RPMEMD_CMD with
* additional NULL-terminated list of arguments.
*/
struct rpmem_ssh *
rpmem_ssh_exec(const struct rpmem_target_info *info, ...)
{
struct rpmem_ssh *ssh;
va_list args;
va_start(args, info);
const char **argv = valist_to_argv(args);
if (argv)
ssh = rpmem_ssh_execv(info, argv);
else
ssh = NULL;
va_end(args);
free(argv);
return ssh;
}
/*
* rpmem_ssh_open -- open ssh connection with specified node and wait for status
*/
struct rpmem_ssh *
rpmem_ssh_open(const struct rpmem_target_info *info)
{
struct rpmem_ssh *ssh = rpmem_ssh_exec(info, NULL);
if (!ssh)
return NULL;
/*
* Read initial status from invoked command.
* This is for synchronization purposes and to make it possible
* to inform client that command's initialization failed.
*/
int32_t status;
int ret = rpmem_ssh_recv(ssh, &status, sizeof(status));
if (ret) {
if (ret == 1 || errno == ECONNRESET)
ERR("%s", rpmem_ssh_strerror(ssh, errno));
else
ERR("!%s", info->node);
goto err_recv_status;
}
if (status) {
ERR("%s: unexpected status received -- '%d'",
info->node, status);
errno = status;
goto err_status;
}
RPMEM_LOG(INFO, "received status: %u", status);
return ssh;
err_recv_status:
err_status:
rpmem_ssh_close(ssh);
return NULL;
}
/*
* rpmem_ssh_close -- close ssh connection
*/
int
rpmem_ssh_close(struct rpmem_ssh *rps)
{
int ret, rv;
rpmem_cmd_term(rps->cmd);
rv = rpmem_cmd_wait(rps->cmd, &ret);
if (rv)
return rv;
rpmem_cmd_fini(rps->cmd);
free(rps);
if (WIFEXITED(ret))
return WEXITSTATUS(ret);
if (WIFSIGNALED(ret)) {
ERR("signal received -- %d", WTERMSIG(ret));
return -1;
}
ERR("exit status -- %d", WEXITSTATUS(ret));
return -1;
}
/*
* rpmem_ssh_send -- send data using ssh transport layer
*
* The data is encoded using base64.
*/
int
rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len)
{
int ret = rpmem_xwrite(rps->cmd->fd_in, buff, len, MSG_NOSIGNAL);
if (ret == 1) {
errno = ECONNRESET;
} else if (ret < 0) {
if (errno == EPIPE)
errno = ECONNRESET;
}
return ret;
}
/*
* rpmem_ssh_recv -- receive data using ssh transport layer
*
* The received data is decoded using base64.
*/
int
rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len)
{
int ret = rpmem_xread(rps->cmd->fd_out, buff,
len, MSG_NOSIGNAL);
if (ret == 1) {
errno = ECONNRESET;
} else if (ret < 0) {
if (errno == EPIPE)
errno = ECONNRESET;
}
return ret;
}
/*
* rpmem_ssh_monitor -- check connection state of ssh
*
* Return value:
* 0 - disconnected
* 1 - connected
* <0 - error
*/
int
rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock)
{
uint32_t buff;
int flags = MSG_PEEK;
if (nonblock)
flags |= MSG_DONTWAIT;
int ret = rpmem_xread(rps->cmd->fd_out, &buff, sizeof(buff), flags);
if (!ret) {
errno = EPROTO;
return -1;
}
if (ret < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
return 1;
else
return ret;
}
return 0;
}
/*
* rpmem_ssh_strerror -- read error using stderr channel
*/
const char *
rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno)
{
size_t len = 0;
ssize_t ret;
while ((ret = read(rps->cmd->fd_err, error_str + len,
ERR_BUFF_LEN - len))) {
if (ret < 0)
return "reading error string failed";
len += (size_t)ret;
}
error_str[len] = '\0';
if (len == 0) {
int ret;
if (oerrno) {
char buff[UTIL_MAX_ERR_MSG];
util_strerror(oerrno, buff, UTIL_MAX_ERR_MSG);
ret = util_snprintf(error_str, ERR_BUFF_LEN,
"%s", buff);
} else {
ret = util_snprintf(error_str, ERR_BUFF_LEN,
"unknown error");
}
if (ret < 0)
FATAL("!snprintf");
} else {
/* get rid of new line and carriage return chars */
char *cr = strchr(error_str, '\r');
if (cr)
*cr = '\0';
char *nl = strchr(error_str, '\n');
if (nl)
*nl = '\0';
}
return error_str;
}
| 8,221 | 17.559819 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/blk.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* blk.h -- internal definitions for libpmem blk module
*/
#ifndef BLK_H
#define BLK_H 1
#include <stddef.h>
#include "ctl.h"
#include "os_thread.h"
#include "pool_hdr.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMBLK_LOG_PREFIX "libpmemblk"
#define PMEMBLK_LOG_LEVEL_VAR "PMEMBLK_LOG_LEVEL"
#define PMEMBLK_LOG_FILE_VAR "PMEMBLK_LOG_FILE"
/* attributes of the blk memory pool format for the pool header */
#define BLK_HDR_SIG "PMEMBLK" /* must be 8 bytes including '\0' */
#define BLK_FORMAT_MAJOR 1
#define BLK_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define BLK_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t blk_format_feat_default = BLK_FORMAT_FEAT_DEFAULT;
struct pmemblk {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint32_t bsize; /* block size */
/* flag indicating if the pool was zero-initialized */
int is_zeroed;
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
void *data; /* post-header data area */
size_t datasize; /* size of data area */
size_t nlba; /* number of LBAs in pool */
struct btt *bttp; /* btt handle */
unsigned nlane; /* number of lanes */
unsigned next_lane; /* used to rotate through lanes */
os_mutex_t *locks; /* one per lane */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
#ifdef DEBUG
/* held during read/write mprotected sections */
os_mutex_t write_lock;
#endif
};
/* data area starts at this alignment after the struct pmemblk above */
#define BLK_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE)
#if FAULT_INJECTION
void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemblk_fault_injection_enabled(void);
#else
static inline void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemblk_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 2,483 | 23.116505 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/libpmemblk.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* libpmemblk.c -- pmem entry points for libpmemblk
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemblk.h"
#include "ctl_global.h"
#include "pmemcommon.h"
#include "blk.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define BLK_CONFIG_ENV_VARIABLE "PMEMBLK_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define BLK_CONFIG_FILE_ENV_VARIABLE "PMEMBLK_CONF_FILE"
/*
* blk_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
blk_ctl_init_and_load(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
if (pbp != NULL && (pbp->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
char *env_config = os_getenv(BLK_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(pbp ? pbp->ctl : NULL,
pbp, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
BLK_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(BLK_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(pbp ? pbp->ctl : NULL,
pbp, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
BLK_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (pbp)
ctl_delete(pbp->ctl);
return -1;
}
/*
* libpmemblk_init -- (internal) load-time initialization for blk
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemblk_init(void)
{
ctl_global_register();
if (blk_ctl_init_and_load(NULL))
FATAL("error: %s", pmemblk_errormsg());
common_init(PMEMBLK_LOG_PREFIX, PMEMBLK_LOG_LEVEL_VAR,
PMEMBLK_LOG_FILE_VAR, PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION);
LOG(3, NULL);
}
/*
* libpmemblk_fini -- libpmemblk cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemblk_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmemblk_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMBLK_MAJOR_VERSION) {
ERR("libpmemblk major version mismatch (need %u, found %u)",
major_required, PMEMBLK_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMBLK_MINOR_VERSION) {
ERR("libpmemblk minor version mismatch (need %u, found %u)",
minor_required, PMEMBLK_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_check_version -- see if lib meets application version requirements
*/
const char *
pmemblk_check_version(unsigned major_required, unsigned minor_required)
{
return pmemblk_check_versionU(major_required, minor_required);
}
#else
/*
* pmemblk_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemblk_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemblk_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemblk_set_funcs -- allow overriding libpmemblk's call to malloc, etc.
*/
void
pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemblk_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemblk_errormsg -- return last error message
*/
const char *
pmemblk_errormsg(void)
{
return pmemblk_errormsgU();
}
#else
/*
* pmemblk_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemblk_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,318 | 20.487562 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/btt.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt.h -- btt module definitions
*/
#ifndef BTT_H
#define BTT_H 1
#ifdef __cplusplus
extern "C" {
#endif
/* callback functions passed to btt_init() */
struct ns_callback {
int (*nsread)(void *ns, unsigned lane,
void *buf, size_t count, uint64_t off);
int (*nswrite)(void *ns, unsigned lane,
const void *buf, size_t count, uint64_t off);
int (*nszero)(void *ns, unsigned lane, size_t count, uint64_t off);
ssize_t (*nsmap)(void *ns, unsigned lane, void **addrp,
size_t len, uint64_t off);
void (*nssync)(void *ns, unsigned lane, void *addr, size_t len);
int ns_is_zeroed;
};
struct btt_info;
struct btt *btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp);
unsigned btt_nlane(struct btt *bttp);
size_t btt_nlba(struct btt *bttp);
int btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf);
int btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf);
int btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_check(struct btt *bttp);
void btt_fini(struct btt *bttp);
uint64_t btt_flog_size(uint32_t nfree);
uint64_t btt_map_size(uint32_t external_nlba);
uint64_t btt_arena_datasize(uint64_t arena_size, uint32_t nfree);
int btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left);
struct btt_flog *btt_flog_get_valid(struct btt_flog *flog_pair, int *next);
int map_entry_is_initial(uint32_t map_entry);
void btt_info_convert2h(struct btt_info *infop);
void btt_info_convert2le(struct btt_info *infop);
void btt_flog_convert2h(struct btt_flog *flogp);
void btt_flog_convert2le(struct btt_flog *flogp);
#ifdef __cplusplus
}
#endif
#endif
| 1,908 | 30.816667 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/btt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* btt.c -- block translation table providing atomic block updates
*
* This is a user-space implementation of the BTT mechanism providing
* single block powerfail write atomicity, as described by:
* The NVDIMM Namespace Specification
*
* To use this module, the caller must provide five routines for
* accessing the namespace containing the data (in this context,
* "namespace" refers to the storage containing the BTT layout, such
* as a file). All namespace I/O is done by these callbacks:
*
* nsread Read count bytes from namespace at offset off
* nswrite Write count bytes to namespace at offset off
* nszero Zero count bytes in namespace at offset off
* nsmap Return direct access to a range of a namespace
* nssync Flush changes made to an nsmap'd range
*
* Data written by the nswrite callback is flushed out to the media
* (made durable) when the call returns. Data written directly via
* the nsmap callback must be flushed explicitly using nssync.
*
* The caller passes these callbacks, along with information such as
* namespace size and UUID to btt_init() and gets back an opaque handle
* which is then used with the rest of the entry points.
*
* Here is a brief list of the entry points to this module:
*
* btt_nlane Returns number of concurrent threads allowed
*
* btt_nlba Returns the usable size, as a count of LBAs
*
* btt_read Reads a single block at a given LBA
*
* btt_write Writes a single block (atomically) at a given LBA
*
* btt_set_zero Sets a block to read back as zeros
*
* btt_set_error Sets a block to return error on read
*
* btt_check Checks the BTT metadata for consistency
*
* btt_fini Frees run-time state, done using namespace
*
* If the caller is multi-threaded, it must only allow btt_nlane() threads
* to enter this module at a time, each assigned a unique "lane" number
* between 0 and btt_nlane() - 1.
*
* There are a number of static routines defined in this module. Here's
* a brief overview of the most important routines:
*
* read_layout Checks for valid BTT layout and builds run-time state.
* A number of helper functions are used by read_layout
* to handle various parts of the metadata:
* read_info
* read_arenas
* read_arena
* read_flogs
* read_flog_pair
*
* write_layout Generates a new BTT layout when one doesn't exist.
* Once a new layout is written, write_layout uses
* the same helper functions above to construct the
* run-time state.
*
* invalid_lba Range check done by each entry point that takes
* an LBA.
*
* lba_to_arena_lba
* Find the arena and LBA in that arena for a given
* external LBA. This is the heart of the arena
* range matching logic.
*
* flog_update Update the BTT free list/log combined data structure
* (known as the "flog"). This is the heart of the
* logic that makes writes powerfail atomic.
*
* map_lock These routines provide atomic access to the BTT map
* map_unlock data structure in an area.
* map_abort
*
* map_entry_setf Common code for btt_set_zero() and btt_set_error().
*
* zero_block Generate a block of all zeros (instead of actually
* doing a read), when the metadata indicates the
* block should read as zeros.
*
* build_rtt These routines construct the run-time tracking
* build_map_locks data structures used during I/O.
*/
#include <inttypes.h>
#include <stdio.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "uuid.h"
#include "btt.h"
#include "btt_layout.h"
#include "sys_util.h"
#include "util.h"
#include "alloc.h"
/*
* The opaque btt handle containing state tracked by this module
* for the btt namespace. This is created by btt_init(), handed to
* all the other btt_* entry points, and deleted by btt_fini().
*/
struct btt {
unsigned nlane; /* number of concurrent threads allowed per btt */
/*
* The laidout flag indicates whether the namespace contains valid BTT
* metadata. It is initialized by read_layout() and if no valid layout
* is found, all reads return zeros and the first write will write the
* BTT layout. The layout_write_mutex protects the laidout flag so
* only one write threads ends up writing the initial metadata by
* calling write_layout().
*/
os_mutex_t layout_write_mutex;
int laidout;
/*
* UUID of the BTT
*/
uint8_t uuid[BTTINFO_UUID_LEN];
/*
* UUID of the containing namespace, used to validate BTT metadata.
*/
uint8_t parent_uuid[BTTINFO_UUID_LEN];
/*
* Parameters controlling/describing the BTT layout.
*/
uint64_t rawsize; /* size of containing namespace */
uint32_t lbasize; /* external LBA size */
uint32_t nfree; /* available flog entries */
uint64_t nlba; /* total number of external LBAs */
unsigned narena; /* number of arenas */
/* run-time state kept for each arena */
struct arena {
uint32_t flags; /* arena flags (btt_info) */
uint32_t external_nlba; /* LBAs that live in this arena */
uint32_t internal_lbasize;
uint32_t internal_nlba;
/*
* The following offsets are relative to the beginning of
* the encapsulating namespace. This is different from
* how these offsets are stored on-media, where they are
* relative to the start of the arena. The offset are
* converted by read_layout() to make them more convenient
* for run-time use.
*/
uint64_t startoff; /* offset to start of arena */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t nextoff; /* offset to next arena */
/*
* Run-time flog state. Indexed by lane.
*
* The write path uses the flog to find the free block
* it writes to before atomically making it the new
* active block for an external LBA.
*
* The read path doesn't use the flog at all.
*/
struct flog_runtime {
struct btt_flog flog; /* current info */
uint64_t entries[2]; /* offsets for flog pair */
int next; /* next write (0 or 1) */
} *flogs;
/*
* Read tracking table. Indexed by lane.
*
* Before using a free block found in the flog, the write path
* scans the rtt to see if there are any outstanding reads on
* that block (reads that started before the block was freed by
* a concurrent write). Unused slots in the rtt are indicated
* by setting the error bit, BTT_MAP_ENTRY_ERROR, so that the
* entry won't match any post-map LBA when checked.
*/
uint32_t volatile *rtt;
/*
* Map locking. Indexed by pre-map LBA modulo nlane.
*/
os_mutex_t *map_locks;
/*
* Arena info block locking.
*/
os_mutex_t info_lock;
} *arenas;
/*
* Callbacks for doing I/O to namespace. These are provided by
* the code calling the BTT module, which passes them in to
* btt_init(). All namespace I/O is done using these.
*
* The opaque namespace handle "ns" was provided by the code calling
* the BTT module and is passed to each callback to identify the
* namespace being accessed.
*/
void *ns;
const struct ns_callback *ns_cbp;
};
/*
* Signature for arena info blocks. Total size is 16 bytes, including
* the '\0' added to the string by the declaration (the last two bytes
* of the string are '\0').
*/
static const char Sig[] = BTTINFO_SIG;
/*
* Zeroed out flog entry, used when initializing the flog.
*/
static const struct btt_flog Zflog;
/*
* Lookup table and macro for looking up sequence numbers. These are
* the 2-bit numbers that cycle between 01, 10, and 11.
*
* To advance a sequence number to the next number, use something like:
* seq = NSEQ(seq);
*/
static const unsigned Nseq[] = { 0, 2, 3, 1 };
#define NSEQ(seq) (Nseq[(seq) & 3])
/*
* get_map_lock_num -- (internal) Calculate offset into map_locks[]
*
* map_locks[] contains nfree locks which are used to protect the map
* from concurrent access to the same cache line. The index into
* map_locks[] is calculated by looking at the byte offset into the map
* (premap_lba * BTT_MAP_ENTRY_SIZE), figuring out how many cache lines
* that is into the map that is (dividing by BTT_MAP_LOCK_ALIGN), and
* then selecting one of nfree locks (the modulo at the end).
*
* The extra cast is to keep gcc from generating a false positive
* 64-32 bit conversion error when -fsanitize is set.
*/
static inline uint32_t
get_map_lock_num(uint32_t premap_lba, uint32_t nfree)
{
return (uint32_t)(premap_lba * BTT_MAP_ENTRY_SIZE / BTT_MAP_LOCK_ALIGN)
% nfree;
}
/*
* invalid_lba -- (internal) set errno and return true if lba is invalid
*
* This function is used at the top of the entry points where an external
* LBA is provided, like this:
*
* if (invalid_lba(bttp, lba))
* return -1;
*/
static int
invalid_lba(struct btt *bttp, uint64_t lba)
{
LOG(3, "bttp %p lba %" PRIu64, bttp, lba);
if (lba >= bttp->nlba) {
ERR("lba out of range (nlba %" PRIu64 ")", bttp->nlba);
errno = EINVAL;
return 1;
}
return 0;
}
/*
* read_info -- (internal) convert btt_info to host byte order & validate
*
* Returns true if info block is valid, and all the integer fields are
* converted to host byte order. If the info block is not valid, this
* routine returns false and the info block passed in is left in an
* unknown state.
*/
static int
read_info(struct btt *bttp, struct btt_info *infop)
{
LOG(3, "infop %p", infop);
if (memcmp(infop->sig, Sig, BTTINFO_SIG_LEN)) {
LOG(3, "signature invalid");
return 0;
}
if (memcmp(infop->parent_uuid, bttp->parent_uuid, BTTINFO_UUID_LEN)) {
LOG(3, "parent UUID mismatch");
return 0;
}
/* to be valid, the fields must checksum correctly */
if (!util_checksum(infop, sizeof(*infop), &infop->checksum, 0, 0)) {
LOG(3, "invalid checksum");
return 0;
}
/* to be valid, info block must have a major version of at least 1 */
if ((infop->major = le16toh(infop->major)) == 0) {
LOG(3, "invalid major version (0)");
return 0;
}
infop->flags = le32toh(infop->flags);
infop->minor = le16toh(infop->minor);
infop->external_lbasize = le32toh(infop->external_lbasize);
infop->external_nlba = le32toh(infop->external_nlba);
infop->internal_lbasize = le32toh(infop->internal_lbasize);
infop->internal_nlba = le32toh(infop->internal_nlba);
infop->nfree = le32toh(infop->nfree);
infop->infosize = le32toh(infop->infosize);
infop->nextoff = le64toh(infop->nextoff);
infop->dataoff = le64toh(infop->dataoff);
infop->mapoff = le64toh(infop->mapoff);
infop->flogoff = le64toh(infop->flogoff);
infop->infooff = le64toh(infop->infooff);
return 1;
}
/*
* map_entry_is_zero -- (internal) checks if map_entry is in zero state
*/
static inline int
map_entry_is_zero(uint32_t map_entry)
{
return (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ZERO;
}
/*
* map_entry_is_error -- (internal) checks if map_entry is in error state
*/
static inline int
map_entry_is_error(uint32_t map_entry)
{
return (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ERROR;
}
/*
* map_entry_is_initial -- checks if map_entry is in initial state
*/
int
map_entry_is_initial(uint32_t map_entry)
{
return (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == 0;
}
/*
* map_entry_is_zero_or_initial -- (internal) checks if map_entry is in initial
* or zero state
*/
static inline int
map_entry_is_zero_or_initial(uint32_t map_entry)
{
uint32_t entry_flags = map_entry & ~BTT_MAP_ENTRY_LBA_MASK;
return entry_flags == 0 || entry_flags == BTT_MAP_ENTRY_ZERO;
}
/*
* btt_flog_get_valid -- return valid and current flog entry
*/
struct btt_flog *
btt_flog_get_valid(struct btt_flog *flog_pair, int *next)
{
/*
* Interesting cases:
* - no valid seq numbers: layout consistency error
* - one valid seq number: that's the current entry
* - two valid seq numbers: higher number is current entry
* - identical seq numbers: layout consistency error
*/
if (flog_pair[0].seq == flog_pair[1].seq) {
return NULL;
} else if (flog_pair[0].seq == 0) {
/* singleton valid flog at flog_pair[1] */
*next = 0;
return &flog_pair[1];
} else if (flog_pair[1].seq == 0) {
/* singleton valid flog at flog_pair[0] */
*next = 1;
return &flog_pair[0];
} else if (NSEQ(flog_pair[0].seq) == flog_pair[1].seq) {
/* flog_pair[1] has the later sequence number */
*next = 0;
return &flog_pair[1];
} else {
/* flog_pair[0] has the later sequence number */
*next = 1;
return &flog_pair[0];
}
}
/*
* read_flog_pair -- (internal) load up a single flog pair
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
read_flog_pair(struct btt *bttp, unsigned lane, struct arena *arenap,
uint64_t flog_off, struct flog_runtime *flog_runtimep, uint32_t flognum)
{
LOG(5, "bttp %p lane %u arenap %p flog_off %" PRIu64 " runtimep %p "
"flognum %u", bttp, lane, arenap, flog_off, flog_runtimep,
flognum);
flog_runtimep->entries[0] = flog_off;
flog_runtimep->entries[1] = flog_off + sizeof(struct btt_flog);
if (lane >= bttp->nfree) {
ERR("invalid lane %u among nfree %d", lane, bttp->nfree);
errno = EINVAL;
return -1;
}
if (flog_off == 0) {
ERR("invalid flog offset %" PRIu64, flog_off);
errno = EINVAL;
return -1;
}
struct btt_flog flog_pair[2];
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, flog_pair,
sizeof(flog_pair), flog_off) < 0)
return -1;
btt_flog_convert2h(&flog_pair[0]);
if (invalid_lba(bttp, flog_pair[0].lba))
return -1;
btt_flog_convert2h(&flog_pair[1]);
if (invalid_lba(bttp, flog_pair[1].lba))
return -1;
LOG(6, "flog_pair[0] flog_off %" PRIu64 " old_map %u new_map %u seq %u",
flog_off, flog_pair[0].old_map,
flog_pair[0].new_map, flog_pair[0].seq);
LOG(6, "flog_pair[1] old_map %u new_map %u seq %u",
flog_pair[1].old_map, flog_pair[1].new_map,
flog_pair[1].seq);
struct btt_flog *currentp = btt_flog_get_valid(flog_pair,
&flog_runtimep->next);
if (currentp == NULL) {
ERR("flog layout error: bad seq numbers %d %d",
flog_pair[0].seq, flog_pair[1].seq);
arenap->flags |= BTTINFO_FLAG_ERROR;
return 0;
}
LOG(6, "run-time flog next is %d", flog_runtimep->next);
/* copy current flog into run-time flog state */
flog_runtimep->flog = *currentp;
LOG(9, "read flog[%u]: lba %u old %u%s%s%s new %u%s%s%s", flognum,
currentp->lba,
currentp->old_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(currentp->old_map)) ? " ERROR" : "",
(map_entry_is_zero(currentp->old_map)) ? " ZERO" : "",
(map_entry_is_initial(currentp->old_map)) ? " INIT" : "",
currentp->new_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(currentp->new_map)) ? " ERROR" : "",
(map_entry_is_zero(currentp->new_map)) ? " ZERO" : "",
(map_entry_is_initial(currentp->new_map)) ? " INIT" : "");
/*
* Decide if the current flog info represents a completed
* operation or an incomplete operation. If completed, the
* old_map field will contain the free block to be used for
* the next write. But if the operation didn't complete (indicated
* by the map entry not being updated), then the operation is
* completed now by updating the map entry.
*
* A special case, used by flog entries when first created, is
* when old_map == new_map. This counts as a complete entry
* and doesn't require reading the map to see if recovery is
* required.
*/
if (currentp->old_map == currentp->new_map) {
LOG(9, "flog[%u] entry complete (initial state)", flognum);
return 0;
}
/* convert pre-map LBA into an offset into the map */
uint64_t map_entry_off = arenap->mapoff +
BTT_MAP_ENTRY_SIZE * currentp->lba;
/* read current map entry */
uint32_t entry;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &entry,
sizeof(entry), map_entry_off) < 0)
return -1;
entry = le32toh(entry);
/* map entry in initial state */
if (map_entry_is_initial(entry))
entry = currentp->lba | BTT_MAP_ENTRY_NORMAL;
if (currentp->new_map != entry && currentp->old_map == entry) {
/* last update didn't complete */
LOG(9, "recover flog[%u]: map[%u]: %u",
flognum, currentp->lba, currentp->new_map);
/*
* Recovery step is to complete the transaction by
* updating the map entry.
*/
entry = htole32(currentp->new_map);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &entry,
sizeof(uint32_t), map_entry_off) < 0)
return -1;
}
return 0;
}
/*
* flog_update -- (internal) write out an updated flog entry
*
* The flog entries are not checksummed. Instead, increasing sequence
* numbers are used to atomically switch the active flog entry between
* the first and second struct btt_flog in each slot. In order for this
* to work, the sequence number must be updated only after all the other
* fields in the flog are updated. So the writes to the flog are broken
* into two writes, one for the first three fields (lba, old_map, new_map)
* and, only after those fields are known to be written durably, the
* second write for the seq field is done.
*
* Returns 0 on success, otherwise -1/errno.
*/
static int
flog_update(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t lba, uint32_t old_map, uint32_t new_map)
{
LOG(3, "bttp %p lane %u arenap %p lba %u old_map %u new_map %u",
bttp, lane, arenap, lba, old_map, new_map);
/* construct new flog entry in little-endian byte order */
struct btt_flog new_flog;
new_flog.lba = lba;
new_flog.old_map = old_map;
new_flog.new_map = new_map;
new_flog.seq = NSEQ(arenap->flogs[lane].flog.seq);
btt_flog_convert2le(&new_flog);
uint64_t new_flog_off =
arenap->flogs[lane].entries[arenap->flogs[lane].next];
/* write out first two fields first */
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &new_flog,
sizeof(uint32_t) * 2, new_flog_off) < 0)
return -1;
new_flog_off += sizeof(uint32_t) * 2;
/* write out new_map and seq field to make it active */
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &new_flog.new_map,
sizeof(uint32_t) * 2, new_flog_off) < 0)
return -1;
/* flog entry written successfully, update run-time state */
arenap->flogs[lane].next = 1 - arenap->flogs[lane].next;
arenap->flogs[lane].flog.lba = lba;
arenap->flogs[lane].flog.old_map = old_map;
arenap->flogs[lane].flog.new_map = new_map;
arenap->flogs[lane].flog.seq = NSEQ(arenap->flogs[lane].flog.seq);
LOG(9, "update flog[%u]: lba %u old %u%s%s%s new %u%s%s%s", lane, lba,
old_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(old_map)) ? " ERROR" : "",
(map_entry_is_zero(old_map)) ? " ZERO" : "",
(map_entry_is_initial(old_map)) ? " INIT" : "",
new_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(new_map)) ? " ERROR" : "",
(map_entry_is_zero(new_map)) ? " ZERO" : "",
(map_entry_is_initial(new_map)) ? " INIT" : "");
return 0;
}
/*
* arena_setf -- (internal) updates the given flag for the arena info block
*/
static int
arena_setf(struct btt *bttp, struct arena *arenap, unsigned lane, uint32_t setf)
{
LOG(3, "bttp %p arenap %p lane %u setf 0x%x", bttp, arenap, lane, setf);
/* update runtime state */
util_fetch_and_or32(&arenap->flags, setf);
if (!bttp->laidout) {
/* no layout yet to update */
return 0;
}
/*
* Read, modify and write out the info block
* at both the beginning and end of the arena.
*/
uint64_t arena_off = arenap->startoff;
struct btt_info info;
/* protect from simultaneous writes to the layout */
util_mutex_lock(&arenap->info_lock);
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0) {
goto err;
}
uint64_t infooff = le64toh(info.infooff);
/* update flags */
info.flags |= htole32(setf);
/* update checksum */
util_checksum(&info, sizeof(info), &info.checksum, 1, 0);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0) {
goto err;
}
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off + infooff) < 0) {
goto err;
}
util_mutex_unlock(&arenap->info_lock);
return 0;
err:
util_mutex_unlock(&arenap->info_lock);
return -1;
}
/*
* set_arena_error -- (internal) set the error flag for the given arena
*/
static int
set_arena_error(struct btt *bttp, struct arena *arenap, unsigned lane)
{
LOG(3, "bttp %p arena %p lane %u", bttp, arenap, lane);
return arena_setf(bttp, arenap, lane, BTTINFO_FLAG_ERROR);
}
/*
* read_flogs -- (internal) load up all the flog entries for an arena
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
read_flogs(struct btt *bttp, unsigned lane, struct arena *arenap)
{
if ((arenap->flogs = Zalloc(bttp->nfree *
sizeof(struct flog_runtime))) == NULL) {
ERR("!Malloc for %u flog entries", bttp->nfree);
return -1;
}
/*
* Load up the flog state. read_flog_pair() will determine if
* any recovery steps are required take them on the in-memory
* data structures it creates. Sets error flag when it
* determines an invalid state.
*/
uint64_t flog_off = arenap->flogoff;
struct flog_runtime *flog_runtimep = arenap->flogs;
for (uint32_t i = 0; i < bttp->nfree; i++) {
if (read_flog_pair(bttp, lane, arenap, flog_off,
flog_runtimep, i) < 0) {
set_arena_error(bttp, arenap, lane);
return -1;
}
/* prepare for next time around the loop */
flog_off += roundup(2 * sizeof(struct btt_flog),
BTT_FLOG_PAIR_ALIGN);
flog_runtimep++;
}
return 0;
}
/*
* build_rtt -- (internal) construct a read tracking table for an arena
*
* Zero is returned on success, otherwise -1/errno.
*
* The rtt is big enough to hold an entry for each free block (nfree)
* since nlane can't be bigger than nfree. nlane may end up smaller,
* in which case some of the high rtt entries will be unused.
*/
static int
build_rtt(struct btt *bttp, struct arena *arenap)
{
if ((arenap->rtt = Malloc(bttp->nfree * sizeof(uint32_t)))
== NULL) {
ERR("!Malloc for %d rtt entries", bttp->nfree);
return -1;
}
for (uint32_t lane = 0; lane < bttp->nfree; lane++)
arenap->rtt[lane] = BTT_MAP_ENTRY_ERROR;
util_synchronize();
return 0;
}
/*
* build_map_locks -- (internal) construct map locks
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
build_map_locks(struct btt *bttp, struct arena *arenap)
{
if ((arenap->map_locks =
Malloc(bttp->nfree * sizeof(*arenap->map_locks)))
== NULL) {
ERR("!Malloc for %d map_lock entries", bttp->nfree);
return -1;
}
for (uint32_t lane = 0; lane < bttp->nfree; lane++)
util_mutex_init(&arenap->map_locks[lane]);
return 0;
}
/*
* read_arena -- (internal) load up an arena and build run-time state
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
read_arena(struct btt *bttp, unsigned lane, uint64_t arena_off,
struct arena *arenap)
{
LOG(3, "bttp %p lane %u arena_off %" PRIu64 " arenap %p",
bttp, lane, arena_off, arenap);
struct btt_info info;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &info, sizeof(info),
arena_off) < 0)
return -1;
arenap->flags = le32toh(info.flags);
arenap->external_nlba = le32toh(info.external_nlba);
arenap->internal_lbasize = le32toh(info.internal_lbasize);
arenap->internal_nlba = le32toh(info.internal_nlba);
arenap->startoff = arena_off;
arenap->dataoff = arena_off + le64toh(info.dataoff);
arenap->mapoff = arena_off + le64toh(info.mapoff);
arenap->flogoff = arena_off + le64toh(info.flogoff);
arenap->nextoff = arena_off + le64toh(info.nextoff);
if (read_flogs(bttp, lane, arenap) < 0)
return -1;
if (build_rtt(bttp, arenap) < 0)
return -1;
if (build_map_locks(bttp, arenap) < 0)
return -1;
/* initialize the per arena info block lock */
util_mutex_init(&arenap->info_lock);
return 0;
}
/*
* util_convert2h_btt_info -- convert btt_info to host byte order
*/
void
btt_info_convert2h(struct btt_info *infop)
{
infop->flags = le32toh(infop->flags);
infop->major = le16toh(infop->major);
infop->minor = le16toh(infop->minor);
infop->external_lbasize = le32toh(infop->external_lbasize);
infop->external_nlba = le32toh(infop->external_nlba);
infop->internal_lbasize = le32toh(infop->internal_lbasize);
infop->internal_nlba = le32toh(infop->internal_nlba);
infop->nfree = le32toh(infop->nfree);
infop->infosize = le32toh(infop->infosize);
infop->nextoff = le64toh(infop->nextoff);
infop->dataoff = le64toh(infop->dataoff);
infop->mapoff = le64toh(infop->mapoff);
infop->flogoff = le64toh(infop->flogoff);
infop->infooff = le64toh(infop->infooff);
}
/*
* btt_info_convert2le -- convert btt_info to little-endian byte order
*/
void
btt_info_convert2le(struct btt_info *infop)
{
infop->flags = le32toh(infop->flags);
infop->major = le16toh(infop->major);
infop->minor = le16toh(infop->minor);
infop->external_lbasize = le32toh(infop->external_lbasize);
infop->external_nlba = le32toh(infop->external_nlba);
infop->internal_lbasize = le32toh(infop->internal_lbasize);
infop->internal_nlba = le32toh(infop->internal_nlba);
infop->nfree = le32toh(infop->nfree);
infop->infosize = le32toh(infop->infosize);
infop->nextoff = le64toh(infop->nextoff);
infop->dataoff = le64toh(infop->dataoff);
infop->mapoff = le64toh(infop->mapoff);
infop->flogoff = le64toh(infop->flogoff);
infop->infooff = le64toh(infop->infooff);
}
/*
* btt_flog_convert2h -- convert btt_flog to host byte order
*/
void
btt_flog_convert2h(struct btt_flog *flogp)
{
flogp->lba = le32toh(flogp->lba);
flogp->old_map = le32toh(flogp->old_map);
flogp->new_map = le32toh(flogp->new_map);
flogp->seq = le32toh(flogp->seq);
}
/*
* btt_flog_convert2le -- convert btt_flog to LE byte order
*/
void
btt_flog_convert2le(struct btt_flog *flogp)
{
flogp->lba = htole32(flogp->lba);
flogp->old_map = htole32(flogp->old_map);
flogp->new_map = htole32(flogp->new_map);
flogp->seq = htole32(flogp->seq);
}
/*
* read_arenas -- (internal) load up all arenas and build run-time state
*
* On entry, layout must be known to be valid, and the number of arenas
* must be known. Zero is returned on success, otherwise -1/errno.
*/
static int
read_arenas(struct btt *bttp, unsigned lane, unsigned narena)
{
LOG(3, "bttp %p lane %u narena %d", bttp, lane, narena);
if ((bttp->arenas = Zalloc(narena * sizeof(*bttp->arenas))) == NULL) {
ERR("!Malloc for %u arenas", narena);
goto err;
}
uint64_t arena_off = 0;
struct arena *arenap = bttp->arenas;
for (unsigned i = 0; i < narena; i++) {
if (read_arena(bttp, lane, arena_off, arenap) < 0)
goto err;
/* prepare for next time around the loop */
arena_off = arenap->nextoff;
arenap++;
}
bttp->laidout = 1;
return 0;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (bttp->arenas) {
for (unsigned i = 0; i < bttp->narena; i++) {
if (bttp->arenas[i].flogs)
Free(bttp->arenas[i].flogs);
if (bttp->arenas[i].rtt)
Free((void *)bttp->arenas[i].rtt);
if (bttp->arenas[i].map_locks)
Free((void *)bttp->arenas[i].map_locks);
}
Free(bttp->arenas);
bttp->arenas = NULL;
}
errno = oerrno;
return -1;
}
/*
* internal_lbasize -- (internal) calculate internal LBA size
*/
static inline uint32_t
internal_lbasize(uint32_t external_lbasize)
{
uint32_t internal_lbasize = external_lbasize;
if (internal_lbasize < BTT_MIN_LBA_SIZE)
internal_lbasize = BTT_MIN_LBA_SIZE;
internal_lbasize =
roundup(internal_lbasize, BTT_INTERNAL_LBA_ALIGNMENT);
/* check for overflow */
if (internal_lbasize < BTT_INTERNAL_LBA_ALIGNMENT) {
errno = EINVAL;
ERR("!Invalid lba size after alignment: %u ", internal_lbasize);
return 0;
}
return internal_lbasize;
}
/*
* btt_flog_size -- calculate flog data size
*/
uint64_t
btt_flog_size(uint32_t nfree)
{
uint64_t flog_size = nfree * roundup(2 * sizeof(struct btt_flog),
BTT_FLOG_PAIR_ALIGN);
return roundup(flog_size, BTT_ALIGNMENT);
}
/*
* btt_map_size -- calculate map data size
*/
uint64_t
btt_map_size(uint32_t external_nlba)
{
return roundup(external_nlba * BTT_MAP_ENTRY_SIZE, BTT_ALIGNMENT);
}
/*
* btt_arena_datasize -- whole arena size without BTT Info header, backup and
* flog means size of blocks and map
*/
uint64_t
btt_arena_datasize(uint64_t arena_size, uint32_t nfree)
{
return arena_size - 2 * sizeof(struct btt_info) - btt_flog_size(nfree);
}
/*
* btt_info_set_params -- (internal) calculate and set BTT Info
* external_lbasize, internal_lbasize, nfree, infosize, external_nlba and
* internal_nlba
*/
static int
btt_info_set_params(struct btt_info *info, uint32_t external_lbasize,
uint32_t internal_lbasize, uint32_t nfree, uint64_t arena_size)
{
info->external_lbasize = external_lbasize;
info->internal_lbasize = internal_lbasize;
info->nfree = nfree;
info->infosize = sizeof(*info);
uint64_t arena_data_size = btt_arena_datasize(arena_size, nfree);
/* allow for map alignment padding */
uint64_t internal_nlba = (arena_data_size - BTT_ALIGNMENT) /
(info->internal_lbasize + BTT_MAP_ENTRY_SIZE);
/* ensure the number of blocks is at least 2*nfree */
if (internal_nlba < 2 * nfree) {
errno = EINVAL;
ERR("!number of internal blocks: %" PRIu64
" expected at least %u",
internal_nlba, 2 * nfree);
return -1;
}
ASSERT(internal_nlba <= UINT32_MAX);
uint32_t internal_nlba_u32 = (uint32_t)internal_nlba;
info->internal_nlba = internal_nlba_u32;
/* external LBA does not include free blocks */
info->external_nlba = internal_nlba_u32 - info->nfree;
ASSERT((arena_data_size - btt_map_size(info->external_nlba)) /
internal_lbasize >= internal_nlba);
return 0;
}
/*
* btt_info_set_offs -- (internal) calculate and set the BTT Info dataoff,
* nextoff, infooff, flogoff and mapoff. These are all relative to the
* beginning of the arena.
*/
static void
btt_info_set_offs(struct btt_info *info, uint64_t arena_size,
uint64_t space_left)
{
info->dataoff = info->infosize;
/* set offset to next valid arena */
if (space_left >= BTT_MIN_SIZE)
info->nextoff = arena_size;
else
info->nextoff = 0;
info->infooff = arena_size - sizeof(struct btt_info);
info->flogoff = info->infooff - btt_flog_size(info->nfree);
info->mapoff = info->flogoff - btt_map_size(info->external_nlba);
ASSERTeq(btt_arena_datasize(arena_size, info->nfree) -
btt_map_size(info->external_nlba), info->mapoff -
info->dataoff);
}
/*
* btt_info_set -- set BTT Info params and offsets
*/
int
btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left)
{
/* calculate internal LBA size */
uint32_t internal_lba_size = internal_lbasize(external_lbasize);
if (internal_lba_size == 0)
return -1;
/* set params and offsets */
if (btt_info_set_params(info, external_lbasize,
internal_lba_size, nfree, arena_size))
return -1;
btt_info_set_offs(info, arena_size, space_left);
return 0;
}
/*
* write_layout -- (internal) write out the initial btt metadata layout
*
* Called with write == 1 only once in the life time of a btt namespace, when
* the first write happens. The caller of this routine is responsible for
* locking out multiple threads. This routine doesn't read anything -- by the
* time it is called, it is known there's no layout in the namespace and a new
* layout should be written.
*
* Calling with write == 0 tells this routine to do the calculations for
* bttp->narena and bttp->nlba, but don't write out any metadata.
*
* If successful, sets bttp->layout to 1 and returns 0. Otherwise -1
* is returned and errno is set, and bttp->layout remains 0 so that
* later attempts to write will try again to create the layout.
*/
static int
write_layout(struct btt *bttp, unsigned lane, int write)
{
LOG(3, "bttp %p lane %u write %d", bttp, lane, write);
ASSERT(bttp->rawsize >= BTT_MIN_SIZE);
ASSERT(bttp->nfree);
/*
* If a new layout is being written, generate the BTT's UUID.
*/
if (write) {
int ret = util_uuid_generate(bttp->uuid);
if (ret < 0) {
LOG(2, "util_uuid_generate failed");
return -1;
}
}
/*
* The number of arenas is the number of full arena of
* size BTT_MAX_ARENA that fit into rawsize and then, if
* the remainder is at least BTT_MIN_SIZE in size, then
* that adds one more arena.
*/
bttp->narena = (unsigned)(bttp->rawsize / BTT_MAX_ARENA);
if (bttp->rawsize % BTT_MAX_ARENA >= BTT_MIN_SIZE)
bttp->narena++;
LOG(4, "narena %u", bttp->narena);
uint32_t internal_lba_size = internal_lbasize(bttp->lbasize);
if (internal_lba_size == 0)
return -1;
LOG(4, "adjusted internal_lbasize %u", internal_lba_size);
uint64_t total_nlba = 0;
uint64_t rawsize = bttp->rawsize;
unsigned arena_num = 0;
uint64_t arena_off = 0;
/*
* for each arena...
*/
while (rawsize >= BTT_MIN_SIZE) {
LOG(4, "layout arena %u", arena_num);
uint64_t arena_rawsize = rawsize;
if (arena_rawsize > BTT_MAX_ARENA) {
arena_rawsize = BTT_MAX_ARENA;
}
rawsize -= arena_rawsize;
arena_num++;
struct btt_info info;
memset(&info, '\0', sizeof(info));
if (btt_info_set_params(&info, bttp->lbasize,
internal_lba_size, bttp->nfree, arena_rawsize))
return -1;
LOG(4, "internal_nlba %u external_nlba %u",
info.internal_nlba, info.external_nlba);
total_nlba += info.external_nlba;
/*
* The rest of the loop body calculates metadata structures
* and lays it out for this arena. So only continue if
* the write flag is set.
*/
if (!write)
continue;
btt_info_set_offs(&info, arena_rawsize, rawsize);
LOG(4, "nextoff 0x%016" PRIx64, info.nextoff);
LOG(4, "dataoff 0x%016" PRIx64, info.dataoff);
LOG(4, "mapoff 0x%016" PRIx64, info.mapoff);
LOG(4, "flogoff 0x%016" PRIx64, info.flogoff);
LOG(4, "infooff 0x%016" PRIx64, info.infooff);
/* zero map if ns is not zero-initialized */
if (!bttp->ns_cbp->ns_is_zeroed) {
uint64_t mapsize = btt_map_size(info.external_nlba);
if ((*bttp->ns_cbp->nszero)(bttp->ns, lane, mapsize,
info.mapoff) < 0)
return -1;
}
/* write out the initial flog */
uint64_t flog_entry_off = arena_off + info.flogoff;
uint32_t next_free_lba = info.external_nlba;
for (uint32_t i = 0; i < bttp->nfree; i++) {
struct btt_flog flog;
flog.lba = htole32(i);
flog.old_map = flog.new_map =
htole32(next_free_lba | BTT_MAP_ENTRY_ZERO);
flog.seq = htole32(1);
/*
* Write both btt_flog structs in the pair, writing
* the second one as all zeros.
*/
LOG(6, "flog[%u] entry off %" PRIu64
" initial %u + zero = %u",
i, flog_entry_off,
next_free_lba,
next_free_lba | BTT_MAP_ENTRY_ZERO);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &flog,
sizeof(flog), flog_entry_off) < 0)
return -1;
flog_entry_off += sizeof(flog);
LOG(6, "flog[%u] entry off %" PRIu64 " zeros",
i, flog_entry_off);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &Zflog,
sizeof(Zflog), flog_entry_off) < 0)
return -1;
flog_entry_off += sizeof(flog);
flog_entry_off = roundup(flog_entry_off,
BTT_FLOG_PAIR_ALIGN);
next_free_lba++;
}
/*
* Construct the BTT info block and write it out
* at both the beginning and end of the arena.
*/
memcpy(info.sig, Sig, BTTINFO_SIG_LEN);
memcpy(info.uuid, bttp->uuid, BTTINFO_UUID_LEN);
memcpy(info.parent_uuid, bttp->parent_uuid, BTTINFO_UUID_LEN);
info.major = BTTINFO_MAJOR_VERSION;
info.minor = BTTINFO_MINOR_VERSION;
btt_info_convert2le(&info);
util_checksum(&info, sizeof(info), &info.checksum, 1, 0);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0)
return -1;
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off + info.infooff) < 0)
return -1;
arena_off += info.nextoff;
}
ASSERTeq(bttp->narena, arena_num);
bttp->nlba = total_nlba;
if (write) {
/*
* The layout is written now, so load up the arenas.
*/
return read_arenas(bttp, lane, bttp->narena);
}
return 0;
}
/*
* read_layout -- (internal) load up layout info from btt namespace
*
* Called once when the btt namespace is opened for use.
* Sets bttp->layout to 0 if no valid layout is found, 1 otherwise.
*
* Any recovery actions required (as indicated by the flog state) are
* performed by this routine.
*
* Any quick checks for layout consistency are performed by this routine
* (quick enough to be done each time a BTT area is opened for use, not
* like the slow consistency checks done by btt_check()).
*
* Returns 0 if no errors are encountered accessing the namespace (in this
* context, detecting there's no layout is not an error if the nsread function
* didn't have any problems doing the reads). Otherwise, -1 is returned
* and errno is set.
*/
static int
read_layout(struct btt *bttp, unsigned lane)
{
LOG(3, "bttp %p", bttp);
ASSERT(bttp->rawsize >= BTT_MIN_SIZE);
unsigned narena = 0;
uint32_t smallest_nfree = UINT32_MAX;
uint64_t rawsize = bttp->rawsize;
uint64_t total_nlba = 0;
uint64_t arena_off = 0;
bttp->nfree = BTT_DEFAULT_NFREE;
/*
* For each arena, see if there's a valid info block
*/
while (rawsize >= BTT_MIN_SIZE) {
narena++;
struct btt_info info;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0)
return -1;
if (!read_info(bttp, &info)) {
/*
* Failed to find complete BTT metadata. Just
* calculate the narena and nlba values that will
* result when write_layout() gets called. This
* allows checks against nlba to work correctly
* even before the layout is written.
*/
return write_layout(bttp, lane, 0);
}
if (info.external_lbasize != bttp->lbasize) {
/* can't read it assuming the wrong block size */
ERR("inconsistent lbasize");
errno = EINVAL;
return -1;
}
if (info.nfree == 0) {
ERR("invalid nfree");
errno = EINVAL;
return -1;
}
if (info.external_nlba == 0) {
ERR("invalid external_nlba");
errno = EINVAL;
return -1;
}
if (info.nextoff && (info.nextoff != BTT_MAX_ARENA)) {
ERR("invalid arena size");
errno = EINVAL;
return -1;
}
if (info.nfree < smallest_nfree)
smallest_nfree = info.nfree;
total_nlba += info.external_nlba;
arena_off += info.nextoff;
if (info.nextoff == 0)
break;
if (info.nextoff > rawsize) {
ERR("invalid next arena offset");
errno = EINVAL;
return -1;
}
rawsize -= info.nextoff;
}
ASSERT(narena);
bttp->narena = narena;
bttp->nlba = total_nlba;
/*
* All arenas were valid. nfree should be the smallest value found
* among different arenas.
*/
if (smallest_nfree < bttp->nfree)
bttp->nfree = smallest_nfree;
/*
* Load up arenas.
*/
return read_arenas(bttp, lane, narena);
}
/*
* zero_block -- (internal) satisfy a read with a block of zeros
*
* Returns 0 on success, otherwise -1/errno.
*/
static int
zero_block(struct btt *bttp, void *buf)
{
LOG(3, "bttp %p", bttp);
memset(buf, '\0', bttp->lbasize);
return 0;
}
/*
* lba_to_arena_lba -- (internal) calculate the arena & pre-map LBA
*
* This routine takes the external LBA and matches it to the
* appropriate arena, adjusting the lba for use within that arena.
*
* If successful, zero is returned, *arenapp is a pointer to the appropriate
* arena struct in the run-time state, and *premap_lbap is the LBA adjusted
* to an arena-internal LBA (also known as the pre-map LBA). Otherwise
* -1/errno.
*/
static int
lba_to_arena_lba(struct btt *bttp, uint64_t lba,
struct arena **arenapp, uint32_t *premap_lbap)
{
LOG(3, "bttp %p lba %" PRIu64, bttp, lba);
ASSERT(bttp->laidout);
unsigned arena;
for (arena = 0; arena < bttp->narena; arena++)
if (lba < bttp->arenas[arena].external_nlba)
break;
else
lba -= bttp->arenas[arena].external_nlba;
ASSERT(arena < bttp->narena);
*arenapp = &bttp->arenas[arena];
ASSERT(lba <= UINT32_MAX);
*premap_lbap = (uint32_t)lba;
LOG(3, "arenap %p pre-map LBA %u", *arenapp, *premap_lbap);
return 0;
}
/*
* btt_init -- prepare a btt namespace for use, returning an opaque handle
*
* Returns handle on success, otherwise NULL/errno.
*
* When submitted a pristine namespace it will be formatted implicitly when
* touched for the first time.
*
* If arenas have different nfree values, we will be using the lowest one
* found as limiting to the overall "bandwidth".
*/
struct btt *
btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp)
{
LOG(3, "rawsize %" PRIu64 " lbasize %u", rawsize, lbasize);
if (rawsize < BTT_MIN_SIZE) {
ERR("rawsize smaller than BTT_MIN_SIZE %u", BTT_MIN_SIZE);
errno = EINVAL;
return NULL;
}
struct btt *bttp = Zalloc(sizeof(*bttp));
if (bttp == NULL) {
ERR("!Malloc %zu bytes", sizeof(*bttp));
return NULL;
}
util_mutex_init(&bttp->layout_write_mutex);
memcpy(bttp->parent_uuid, parent_uuid, BTTINFO_UUID_LEN);
bttp->rawsize = rawsize;
bttp->lbasize = lbasize;
bttp->ns = ns;
bttp->ns_cbp = ns_cbp;
/*
* Load up layout, if it exists.
*
* Whether read_layout() finds a valid layout or not, it finishes
* updating these layout-related fields:
* bttp->nfree
* bttp->nlba
* bttp->narena
* since these fields are used even before a valid layout it written.
*/
if (read_layout(bttp, 0) < 0) {
btt_fini(bttp); /* free up any allocations */
return NULL;
}
bttp->nlane = bttp->nfree;
/* maxlane, if provided, is an upper bound on nlane */
if (maxlane && bttp->nlane > maxlane)
bttp->nlane = maxlane;
LOG(3, "success, bttp %p nlane %u", bttp, bttp->nlane);
return bttp;
}
/*
* btt_nlane -- return the number of "lanes" for this btt namespace
*
* The number of lanes is the number of threads allowed in this module
* concurrently for a given btt. Each thread executing this code must
* have a unique "lane" number assigned to it between 0 and btt_nlane() - 1.
*/
unsigned
btt_nlane(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
return bttp->nlane;
}
/*
* btt_nlba -- return the number of usable blocks in a btt namespace
*
* Valid LBAs to pass to btt_read() and btt_write() are 0 through
* btt_nlba() - 1.
*/
size_t
btt_nlba(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
return bttp->nlba;
}
/*
* btt_read -- read a block from a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
if (invalid_lba(bttp, lba))
return -1;
/* if there's no layout written yet, all reads come back as zeros */
if (!bttp->laidout)
return zero_block(bttp, buf);
/* find which arena LBA lives in, and the offset to the map entry */
struct arena *arenap;
uint32_t premap_lba;
uint64_t map_entry_off;
if (lba_to_arena_lba(bttp, lba, &arenap, &premap_lba) < 0)
return -1;
/* convert pre-map LBA into an offset into the map */
map_entry_off = arenap->mapoff + BTT_MAP_ENTRY_SIZE * premap_lba;
/*
* Read the current map entry to get the post-map LBA for the data
* block read.
*/
uint32_t entry;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &entry,
sizeof(entry), map_entry_off) < 0)
return -1;
entry = le32toh(entry);
/*
* Retries come back to the top of this loop (for a rare case where
* the map is changed by another thread doing writes to the same LBA).
*/
while (1) {
if (map_entry_is_error(entry)) {
ERR("EIO due to map entry error flag");
errno = EIO;
return -1;
}
if (map_entry_is_zero_or_initial(entry))
return zero_block(bttp, buf);
/*
* Record the post-map LBA in the read tracking table during
* the read. The write will check entries in the read tracking
* table before allocating a block for a write, waiting for
* outstanding reads on that block to complete.
*
* Since we already checked for error, zero, and initial
* states above, the entry must have both error and zero
* bits set at this point (BTT_MAP_ENTRY_NORMAL). We store
* the entry that way, with those bits set, in the rtt and
* btt_write() will check for it the same way, with the bits
* both set.
*/
arenap->rtt[lane] = entry;
util_synchronize();
/*
* In case this thread was preempted between reading entry and
* storing it in the rtt, check to see if the map changed. If
* it changed, the block about to be read is at least free now
* (in the flog, but that's okay since the data will still be
* undisturbed) and potentially allocated and being used for
* another write (data disturbed, so not okay to continue).
*/
uint32_t latest_entry;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &latest_entry,
sizeof(latest_entry), map_entry_off) < 0) {
arenap->rtt[lane] = BTT_MAP_ENTRY_ERROR;
return -1;
}
latest_entry = le32toh(latest_entry);
if (entry == latest_entry)
break; /* map stayed the same */
else
entry = latest_entry; /* try again */
}
/*
* It is safe to read the block now, since the rtt protects the
* block from getting re-allocated to something else by a write.
*/
uint64_t data_block_off =
arenap->dataoff + (uint64_t)(entry & BTT_MAP_ENTRY_LBA_MASK) *
arenap->internal_lbasize;
int readret = (*bttp->ns_cbp->nsread)(bttp->ns, lane, buf,
bttp->lbasize, data_block_off);
/* done with read, so clear out rtt entry */
arenap->rtt[lane] = BTT_MAP_ENTRY_ERROR;
return readret;
}
/*
* map_lock -- (internal) grab the map_lock and read a map entry
*/
static int
map_lock(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t *entryp, uint32_t premap_lba)
{
LOG(3, "bttp %p lane %u arenap %p premap_lba %u",
bttp, lane, arenap, premap_lba);
uint64_t map_entry_off =
arenap->mapoff + BTT_MAP_ENTRY_SIZE * premap_lba;
uint32_t map_lock_num = get_map_lock_num(premap_lba, bttp->nfree);
util_mutex_lock(&arenap->map_locks[map_lock_num]);
/* read the old map entry */
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, entryp,
sizeof(uint32_t), map_entry_off) < 0) {
util_mutex_unlock(&arenap->map_locks[map_lock_num]);
return -1;
}
/* if map entry is in its initial state return premap_lba */
if (map_entry_is_initial(*entryp))
*entryp = htole32(premap_lba | BTT_MAP_ENTRY_NORMAL);
LOG(9, "locked map[%d]: %u%s%s", premap_lba,
*entryp & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(*entryp)) ? " ERROR" : "",
(map_entry_is_zero(*entryp)) ? " ZERO" : "");
return 0;
}
/*
* map_abort -- (internal) drop the map_lock without updating the entry
*/
static void
map_abort(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t premap_lba)
{
LOG(3, "bttp %p lane %u arenap %p premap_lba %u",
bttp, lane, arenap, premap_lba);
util_mutex_unlock(&arenap->map_locks[get_map_lock_num(premap_lba,
bttp->nfree)]);
}
/*
* map_unlock -- (internal) update the map and drop the map_lock
*/
static int
map_unlock(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t entry, uint32_t premap_lba)
{
LOG(3, "bttp %p lane %u arenap %p entry %u premap_lba %u",
bttp, lane, arenap, entry, premap_lba);
uint64_t map_entry_off =
arenap->mapoff + BTT_MAP_ENTRY_SIZE * premap_lba;
/* write the new map entry */
int err = (*bttp->ns_cbp->nswrite)(bttp->ns, lane, &entry,
sizeof(uint32_t), map_entry_off);
util_mutex_unlock(&arenap->map_locks[get_map_lock_num(premap_lba,
bttp->nfree)]);
LOG(9, "unlocked map[%d]: %u%s%s", premap_lba,
entry & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(entry)) ? " ERROR" : "",
(map_entry_is_zero(entry)) ? " ZERO" : "");
return err;
}
/*
* btt_write -- write a block to a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
if (invalid_lba(bttp, lba))
return -1;
/* first write through here will initialize the metadata layout */
if (!bttp->laidout) {
int err = 0;
util_mutex_lock(&bttp->layout_write_mutex);
if (!bttp->laidout)
err = write_layout(bttp, lane, 1);
util_mutex_unlock(&bttp->layout_write_mutex);
if (err < 0)
return err;
}
/* find which arena LBA lives in, and the offset to the map entry */
struct arena *arenap;
uint32_t premap_lba;
if (lba_to_arena_lba(bttp, lba, &arenap, &premap_lba) < 0)
return -1;
/* if the arena is in an error state, writing is not allowed */
if (arenap->flags & BTTINFO_FLAG_ERROR_MASK) {
ERR("EIO due to btt_info error flags 0x%x",
arenap->flags & BTTINFO_FLAG_ERROR_MASK);
errno = EIO;
return -1;
}
/*
* This routine was passed a unique "lane" which is an index
* into the flog. That means the free block held by flog[lane]
* is assigned to this thread and to no other threads (no additional
* locking required). So start by performing the write to the
* free block. It is only safe to write to a free block if it
* doesn't appear in the read tracking table, so scan that first
* and if found, wait for the thread reading from it to finish.
*/
uint32_t free_entry = (arenap->flogs[lane].flog.old_map &
BTT_MAP_ENTRY_LBA_MASK) | BTT_MAP_ENTRY_NORMAL;
LOG(3, "free_entry %u (before mask %u)", free_entry,
arenap->flogs[lane].flog.old_map);
/* wait for other threads to finish any reads on free block */
for (unsigned i = 0; i < bttp->nlane; i++)
while (arenap->rtt[i] == free_entry)
;
/* it is now safe to perform write to the free block */
uint64_t data_block_off = arenap->dataoff +
(uint64_t)(free_entry & BTT_MAP_ENTRY_LBA_MASK) *
arenap->internal_lbasize;
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, buf,
bttp->lbasize, data_block_off) < 0)
return -1;
/*
* Make the new block active atomically by updating the on-media flog
* and then updating the map.
*/
uint32_t old_entry;
if (map_lock(bttp, lane, arenap, &old_entry, premap_lba) < 0)
return -1;
old_entry = le32toh(old_entry);
/* update the flog */
if (flog_update(bttp, lane, arenap, premap_lba,
old_entry, free_entry) < 0) {
map_abort(bttp, lane, arenap, premap_lba);
return -1;
}
if (map_unlock(bttp, lane, arenap, htole32(free_entry),
premap_lba) < 0) {
/*
* A critical write error occurred, set the arena's
* info block error bit.
*/
set_arena_error(bttp, arenap, lane);
errno = EIO;
return -1;
}
return 0;
}
/*
* map_entry_setf -- (internal) set a given flag on a map entry
*
* Returns 0 on success, otherwise -1/errno.
*/
static int
map_entry_setf(struct btt *bttp, unsigned lane, uint64_t lba, uint32_t setf)
{
LOG(3, "bttp %p lane %u lba %" PRIu64 " setf 0x%x",
bttp, lane, lba, setf);
if (invalid_lba(bttp, lba))
return -1;
if (!bttp->laidout) {
/*
* No layout is written yet. If the flag being set
* is the zero flag, it is superfluous since all blocks
* read as zero at this point.
*/
if (setf == BTT_MAP_ENTRY_ZERO)
return 0;
/*
* Treat this like the first write and write out
* the metadata layout at this point.
*/
int err = 0;
util_mutex_lock(&bttp->layout_write_mutex);
if (!bttp->laidout)
err = write_layout(bttp, lane, 1);
util_mutex_unlock(&bttp->layout_write_mutex);
if (err < 0)
return err;
}
/* find which arena LBA lives in, and the offset to the map entry */
struct arena *arenap;
uint32_t premap_lba;
if (lba_to_arena_lba(bttp, lba, &arenap, &premap_lba) < 0)
return -1;
/* if the arena is in an error state, writing is not allowed */
if (arenap->flags & BTTINFO_FLAG_ERROR_MASK) {
ERR("EIO due to btt_info error flags 0x%x",
arenap->flags & BTTINFO_FLAG_ERROR_MASK);
errno = EIO;
return -1;
}
/*
* Set the flags in the map entry. To do this, read the
* current map entry, set the flags, and write out the update.
*/
uint32_t old_entry;
uint32_t new_entry;
if (map_lock(bttp, lane, arenap, &old_entry, premap_lba) < 0)
return -1;
old_entry = le32toh(old_entry);
if (setf == BTT_MAP_ENTRY_ZERO &&
map_entry_is_zero_or_initial(old_entry)) {
map_abort(bttp, lane, arenap, premap_lba);
return 0; /* block already zero, nothing to do */
}
/* create the new map entry */
new_entry = (old_entry & BTT_MAP_ENTRY_LBA_MASK) | setf;
if (map_unlock(bttp, lane, arenap, htole32(new_entry), premap_lba) < 0)
return -1;
return 0;
}
/*
* btt_set_zero -- mark a block as zeroed in a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
return map_entry_setf(bttp, lane, lba, BTT_MAP_ENTRY_ZERO);
}
/*
* btt_set_error -- mark a block as in an error state in a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
return map_entry_setf(bttp, lane, lba, BTT_MAP_ENTRY_ERROR);
}
/*
* check_arena -- (internal) perform a consistency check on an arena
*/
static int
check_arena(struct btt *bttp, struct arena *arenap)
{
LOG(3, "bttp %p arenap %p", bttp, arenap);
int consistent = 1;
uint64_t map_entry_off = arenap->mapoff;
uint32_t bitmapsize = howmany(arenap->internal_nlba, 8);
uint8_t *bitmap = Zalloc(bitmapsize);
if (bitmap == NULL) {
ERR("!Malloc for bitmap");
return -1;
}
/*
* Go through every post-map LBA mentioned in the map and make sure
* there are no duplicates. bitmap is used to track which LBAs have
* been seen so far.
*/
uint32_t *mapp = NULL;
ssize_t mlen;
int next_index = 0;
size_t remaining = 0;
for (uint32_t i = 0; i < arenap->external_nlba; i++) {
uint32_t entry;
if (remaining == 0) {
/* request a mapping of remaining map area */
size_t req_len =
(arenap->external_nlba - i) * sizeof(uint32_t);
mlen = (*bttp->ns_cbp->nsmap)(bttp->ns, 0,
(void **)&mapp, req_len, map_entry_off);
if (mlen < 0)
return -1;
remaining = (size_t)mlen;
next_index = 0;
}
entry = le32toh(mapp[next_index]);
/* for debug, dump non-zero map entries at log level 11 */
if (map_entry_is_zero_or_initial(entry) == 0)
LOG(11, "map[%d]: %u%s", i,
entry & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(entry)) ? " ERROR" : "");
/* this is an uninitialized map entry, set the default value */
if (map_entry_is_initial(entry))
entry = i;
else
entry &= BTT_MAP_ENTRY_LBA_MASK;
/* check if entry is valid */
if (entry >= arenap->internal_nlba) {
ERR("map[%d] entry out of bounds: %u", i, entry);
errno = EINVAL;
return -1;
}
if (util_isset(bitmap, entry)) {
ERR("map[%d] duplicate entry: %u", i, entry);
consistent = 0;
} else
util_setbit(bitmap, entry);
map_entry_off += sizeof(uint32_t);
next_index++;
ASSERT(remaining >= sizeof(uint32_t));
remaining -= sizeof(uint32_t);
}
/*
* Go through the free blocks in the flog, adding them to bitmap
* and checking for duplications. It is sufficient to read the
* run-time flog here, avoiding more calls to nsread.
*/
for (uint32_t i = 0; i < bttp->nfree; i++) {
uint32_t entry = arenap->flogs[i].flog.old_map;
entry &= BTT_MAP_ENTRY_LBA_MASK;
if (util_isset(bitmap, entry)) {
ERR("flog[%u] duplicate entry: %u", i, entry);
consistent = 0;
} else
util_setbit(bitmap, entry);
}
/*
* Make sure every possible post-map LBA was accounted for
* in the two loops above.
*/
for (uint32_t i = 0; i < arenap->internal_nlba; i++)
if (util_isclr(bitmap, i)) {
ERR("unreferenced lba: %d", i);
consistent = 0;
}
Free(bitmap);
return consistent;
}
/*
* btt_check -- perform a consistency check on a btt namespace
*
* This routine contains a fairly high-impact set of consistency checks.
* It may use a good amount of dynamic memory and CPU time performing
* the checks. Any lightweight, quick consistency checks are included
* in read_layout() so they happen every time the BTT area is opened
* for use.
*
* Returns true if consistent, zero if inconsistent, -1/error if checking
* cannot happen due to other errors.
*
* No lane number required here because only one thread is allowed -- all
* other threads must be locked out of all btt routines for this btt
* namespace while this is running.
*/
int
btt_check(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
int consistent = 1;
if (!bttp->laidout) {
/* consistent by definition */
LOG(3, "no layout yet");
return consistent;
}
/* XXX report issues found during read_layout (from flags) */
/* for each arena... */
struct arena *arenap = bttp->arenas;
for (unsigned i = 0; i < bttp->narena; i++, arenap++) {
/*
* Perform the consistency checks for the arena.
*/
int retval = check_arena(bttp, arenap);
if (retval < 0)
return retval;
else if (retval == 0)
consistent = 0;
}
/* XXX stub */
return consistent;
}
/*
* btt_fini -- delete opaque btt info, done using btt namespace
*/
void
btt_fini(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
if (bttp->arenas) {
for (unsigned i = 0; i < bttp->narena; i++) {
if (bttp->arenas[i].flogs)
Free(bttp->arenas[i].flogs);
if (bttp->arenas[i].rtt)
Free((void *)bttp->arenas[i].rtt);
if (bttp->arenas[i].rtt)
Free((void *)bttp->arenas[i].map_locks);
}
Free(bttp->arenas);
}
Free(bttp);
}
| 57,266 | 26.907895 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/btt_layout.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt_layout.h -- block translation table on-media layout definitions
*/
/*
* Layout of BTT info block. All integers are stored little-endian.
*/
#ifndef BTT_LAYOUT_H
#define BTT_LAYOUT_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define BTT_ALIGNMENT ((uintptr_t)4096) /* alignment of all BTT structures */
#define BTTINFO_SIG_LEN 16
#define BTTINFO_UUID_LEN 16
#define BTTINFO_UNUSED_LEN 3968
#define BTTINFO_SIG "BTT_ARENA_INFO\0"
struct btt_info {
char sig[BTTINFO_SIG_LEN]; /* must be "BTT_ARENA_INFO\0\0" */
uint8_t uuid[BTTINFO_UUID_LEN]; /* BTT UUID */
uint8_t parent_uuid[BTTINFO_UUID_LEN]; /* UUID of container */
uint32_t flags; /* see flag bits below */
uint16_t major; /* major version */
uint16_t minor; /* minor version */
uint32_t external_lbasize; /* advertised LBA size (bytes) */
uint32_t external_nlba; /* advertised LBAs in this arena */
uint32_t internal_lbasize; /* size of data area blocks (bytes) */
uint32_t internal_nlba; /* number of blocks in data area */
uint32_t nfree; /* number of free blocks */
uint32_t infosize; /* size of this info block */
/*
* The following offsets are relative to the beginning of
* the btt_info block.
*/
uint64_t nextoff; /* offset to next arena (or zero) */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t infooff; /* offset to backup info block */
char unused[BTTINFO_UNUSED_LEN]; /* must be zero */
uint64_t checksum; /* Fletcher64 of all fields */
};
/*
* Definitions for flags mask for btt_info structure above.
*/
#define BTTINFO_FLAG_ERROR 0x00000001 /* error state (read-only) */
#define BTTINFO_FLAG_ERROR_MASK 0x00000001 /* all error bits */
/*
* Current on-media format versions.
*/
#define BTTINFO_MAJOR_VERSION 1
#define BTTINFO_MINOR_VERSION 1
/*
* Layout of a BTT "flog" entry. All integers are stored little-endian.
*
* The "nfree" field in the BTT info block determines how many of these
* flog entries there are, and each entry consists of two of the following
* structs (entry updates alternate between the two structs), padded up
* to a cache line boundary to isolate adjacent updates.
*/
#define BTT_FLOG_PAIR_ALIGN ((uintptr_t)64)
struct btt_flog {
uint32_t lba; /* last pre-map LBA using this entry */
uint32_t old_map; /* old post-map LBA (the freed block) */
uint32_t new_map; /* new post-map LBA */
uint32_t seq; /* sequence number (01, 10, 11) */
};
/*
* Layout of a BTT "map" entry. 4-byte internal LBA offset, little-endian.
*/
#define BTT_MAP_ENTRY_SIZE 4
#define BTT_MAP_ENTRY_ERROR 0x40000000U
#define BTT_MAP_ENTRY_ZERO 0x80000000U
#define BTT_MAP_ENTRY_NORMAL 0xC0000000U
#define BTT_MAP_ENTRY_LBA_MASK 0x3fffffffU
#define BTT_MAP_LOCK_ALIGN ((uintptr_t)64)
/*
* BTT layout properties...
*/
#define BTT_MIN_SIZE ((1u << 20) * 16)
#define BTT_MAX_ARENA (1ull << 39) /* 512GB per arena */
#define BTT_MIN_LBA_SIZE (size_t)512
#define BTT_INTERNAL_LBA_ALIGNMENT 256U
#define BTT_DEFAULT_NFREE 256
#ifdef __cplusplus
}
#endif
#endif
| 3,197 | 28.611111 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/libpmemblk_main.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* libpmemblk_main.c -- entry point for libpmemblk.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemblk_init(void);
void libpmemblk_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemblk_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemblk_fini();
break;
}
return TRUE;
}
| 669 | 19.30303 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/blk.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* blk.c -- block memory pool entry points for libpmem
*/
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#include <stdint.h>
#include <endian.h>
#include <stdbool.h>
#include "libpmem.h"
#include "libpmemblk.h"
#include "mmap.h"
#include "set.h"
#include "out.h"
#include "btt.h"
#include "blk.h"
#include "util.h"
#include "sys_util.h"
#include "util_pmem.h"
#include "valgrind_internal.h"
static const struct pool_attr Blk_create_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Blk_open_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
/*
* lane_enter -- (internal) acquire a unique lane number
*/
static void
lane_enter(PMEMblkpool *pbp, unsigned *lane)
{
unsigned mylane;
mylane = util_fetch_and_add32(&pbp->next_lane, 1) % pbp->nlane;
/* lane selected, grab the per-lane lock */
util_mutex_lock(&pbp->locks[mylane]);
*lane = mylane;
}
/*
* lane_exit -- (internal) drop lane lock
*/
static void
lane_exit(PMEMblkpool *pbp, unsigned mylane)
{
util_mutex_unlock(&pbp->locks[mylane]);
}
/*
* nsread -- (internal) read data from the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nsread(void *ns, unsigned lane, void *buf, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
memcpy(buf, (char *)pbp->data + off, count);
return 0;
}
/*
* nswrite -- (internal) write data to the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nswrite(void *ns, unsigned lane, const void *buf, size_t count,
uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
#ifdef DEBUG
/* grab debug write lock */
util_mutex_lock(&pbp->write_lock);
#endif
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
if (pbp->is_pmem)
pmem_memcpy_nodrain(dest, buf, count);
else
memcpy(dest, buf, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
#ifdef DEBUG
/* release debug write lock */
util_mutex_unlock(&pbp->write_lock);
#endif
if (pbp->is_pmem)
pmem_drain();
else
pmem_msync(dest, count);
return 0;
}
/*
* nsmap -- (internal) allow direct access to a range of a namespace
*
* The caller requests a range to be "mapped" but the return value
* may indicate a smaller amount (in which case the caller is expected
* to call back later for another mapping).
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static ssize_t
nsmap(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u len %zu off %" PRIu64, pbp, lane, len, off);
ASSERT(((ssize_t)len) >= 0);
if (off + len >= pbp->datasize) {
ERR("offset + len (%zu) past end of data area (%zu)",
(size_t)off + len, pbp->datasize - 1);
errno = EINVAL;
return -1;
}
/*
* Since the entire file is memory-mapped, this callback
* can always provide the entire length requested.
*/
*addrp = (char *)pbp->data + off;
LOG(12, "returning addr %p", *addrp);
return (ssize_t)len;
}
/*
* nssync -- (internal) flush changes made to a namespace range
*
* This is used in conjunction with the addresses handed out by
* nsmap() above. There's no need to sync things written via
* nswrite() since those changes are flushed each time nswrite()
* is called.
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static void
nssync(void *ns, unsigned lane, void *addr, size_t len)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u addr %p len %zu", pbp, lane, addr, len);
if (pbp->is_pmem)
pmem_persist(addr, len);
else
pmem_msync(addr, len);
}
/*
* nszero -- (internal) zero data in the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* zero the memory pool containing the BTT layout.
*/
static int
nszero(void *ns, unsigned lane, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
pmem_memset_persist(dest, 0, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
return 0;
}
/* callbacks for btt_init() */
static struct ns_callback ns_cb = {
.nsread = nsread,
.nswrite = nswrite,
.nszero = nszero,
.nsmap = nsmap,
.nssync = nssync,
.ns_is_zeroed = 0
};
/*
* blk_descr_create -- (internal) create block memory pool descriptor
*/
static void
blk_descr_create(PMEMblkpool *pbp, uint32_t bsize, int zeroed)
{
LOG(3, "pbp %p bsize %u zeroed %d", pbp, bsize, zeroed);
/* create the required metadata */
pbp->bsize = htole32(bsize);
util_persist(pbp->is_pmem, &pbp->bsize, sizeof(bsize));
pbp->is_zeroed = zeroed;
util_persist(pbp->is_pmem, &pbp->is_zeroed, sizeof(pbp->is_zeroed));
}
/*
* blk_descr_check -- (internal) validate block memory pool descriptor
*/
static int
blk_descr_check(PMEMblkpool *pbp, size_t *bsize)
{
LOG(3, "pbp %p bsize %zu", pbp, *bsize);
size_t hdr_bsize = le32toh(pbp->bsize);
if (*bsize && *bsize != hdr_bsize) {
ERR("wrong bsize (%zu), pool created with bsize %zu",
*bsize, hdr_bsize);
errno = EINVAL;
return -1;
}
*bsize = hdr_bsize;
LOG(3, "using block size from header: %zu", *bsize);
return 0;
}
/*
* blk_runtime_init -- (internal) initialize block memory pool runtime data
*/
static int
blk_runtime_init(PMEMblkpool *pbp, size_t bsize, int rdonly)
{
LOG(3, "pbp %p bsize %zu rdonly %d",
pbp, bsize, rdonly);
/* remove volatile part of header */
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
sizeof(struct pool_hdr) -
sizeof(pbp->bsize) -
sizeof(pbp->is_zeroed));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
pbp->rdonly = rdonly;
pbp->data = (char *)pbp->addr +
roundup(sizeof(*pbp), BLK_FORMAT_DATA_ALIGN);
ASSERT(((char *)pbp->addr + pbp->size) >= (char *)pbp->data);
pbp->datasize = (size_t)
(((char *)pbp->addr + pbp->size) - (char *)pbp->data);
LOG(4, "data area %p data size %zu bsize %zu",
pbp->data, pbp->datasize, bsize);
long ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1)
ncpus = 1;
ns_cb.ns_is_zeroed = pbp->is_zeroed;
/* things free by "goto err" if not NULL */
struct btt *bttp = NULL;
os_mutex_t *locks = NULL;
bttp = btt_init(pbp->datasize, (uint32_t)bsize, pbp->hdr.poolset_uuid,
(unsigned)ncpus * 2, pbp, &ns_cb);
if (bttp == NULL)
goto err; /* btt_init set errno, called LOG */
pbp->bttp = bttp;
pbp->nlane = btt_nlane(pbp->bttp);
pbp->next_lane = 0;
if ((locks = Malloc(pbp->nlane * sizeof(*locks))) == NULL) {
ERR("!Malloc for lane locks");
goto err;
}
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_init(&locks[i]);
pbp->locks = locks;
#ifdef DEBUG
/* initialize debug lock */
util_mutex_init(&pbp->write_lock);
#endif
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(pbp->addr, sizeof(struct pool_hdr), pbp->is_dev_dax);
/* the data area should be kept read-only for debug version */
RANGE_RO(pbp->data, pbp->datasize, pbp->is_dev_dax);
return 0;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (bttp)
btt_fini(bttp);
errno = oerrno;
return -1;
}
/*
* pmemblk_createU -- create a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
LOG(3, "path %s bsize %zu poolsize %zu mode %o",
path, bsize, poolsize, mode);
/* check if bsize is valid */
if (bsize == 0) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
if (bsize > UINT32_MAX) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
struct pool_set *set;
struct pool_attr adj_pool_attr = Blk_create_attr;
/* force set SDS feature */
if (SDS_at_create)
adj_pool_attr.features.incompat |= POOL_FEAT_SDS;
else
adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS;
if (util_pool_create(&set, path, poolsize, PMEMBLK_MIN_POOL,
PMEMBLK_MIN_PART, &adj_pool_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
/* create pool descriptor */
blk_descr_create(pbp, (uint32_t)bsize, set->zeroed);
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, 0) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_create -- create a block memory pool
*/
PMEMblkpool *
pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
return pmemblk_createU(path, bsize, poolsize, mode);
}
#else
/*
* pmemblk_createW -- create a block memory pool
*/
PMEMblkpool *
pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_createU(upath, bsize, poolsize, mode);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* blk_open_common -- (internal) open a block memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*
* Passing in bsize == 0 means a valid pool header must exist (which
* will supply the block size).
*/
static PMEMblkpool *
blk_open_common(const char *path, size_t bsize, unsigned flags)
{
LOG(3, "path %s bsize %zu flags 0x%x", path, bsize, flags);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMBLK_MIN_PART, &Blk_open_attr,
NULL, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (blk_descr_check(pbp, &bsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, set->rdonly) != 0) {
ERR("pool initialization failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* pmemblk_openU -- open a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_openU(const char *path, size_t bsize)
{
LOG(3, "path %s bsize %zu", path, bsize);
return blk_open_common(path, bsize, COW_at_open ? POOL_OPEN_COW : 0);
}
#ifndef _WIN32
/*
* pmemblk_open -- open a block memory pool
*/
PMEMblkpool *
pmemblk_open(const char *path, size_t bsize)
{
return pmemblk_openU(path, bsize);
}
#else
/*
* pmemblk_openW -- open a block memory pool
*/
PMEMblkpool *
pmemblk_openW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_openU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_close -- close a block memory pool
*/
void
pmemblk_close(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
btt_fini(pbp->bttp);
if (pbp->locks) {
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_destroy(&pbp->locks[i]);
Free((void *)pbp->locks);
}
#ifdef DEBUG
/* destroy debug lock */
util_mutex_destroy(&pbp->write_lock);
#endif
util_poolset_close(pbp->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemblk_bsize -- return size of block for specified pool
*/
size_t
pmemblk_bsize(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return le32toh(pbp->bsize);
}
/*
* pmemblk_nblock -- return number of usable blocks in a block memory pool
*/
size_t
pmemblk_nblock(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return btt_nlba(pbp->bttp);
}
/*
* pmemblk_read -- read a block in a block memory pool
*/
int
pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_read(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_write -- write a block (atomically) in a block memory pool
*/
int
pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_write(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_zero -- zero a block in a block memory pool
*/
int
pmemblk_set_zero(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_zero(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_error -- set the error state on a block in a block memory pool
*/
int
pmemblk_set_error(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_error(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_checkU -- block memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_checkU(const char *path, size_t bsize)
{
LOG(3, "path \"%s\" bsize %zu", path, bsize);
/* map the pool read-only */
PMEMblkpool *pbp = blk_open_common(path, bsize, POOL_OPEN_COW);
if (pbp == NULL)
return -1; /* errno set by blk_open_common() */
int retval = btt_check(pbp->bttp);
int oerrno = errno;
pmemblk_close(pbp);
errno = oerrno;
return retval;
}
#ifndef _WIN32
/*
* pmemblk_check -- block memory pool consistency check
*/
int
pmemblk_check(const char *path, size_t bsize)
{
return pmemblk_checkU(path, bsize);
}
#else
/*
* pmemblk_checkW -- block memory pool consistency check
*/
int
pmemblk_checkW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
int ret = pmemblk_checkU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemblk_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemblk_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemblk_ctl_get -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_getU(pbp, name, arg);
}
/*
* pmemblk_ctl_set -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_setU(pbp, name, arg);
}
/*
* pmemblk_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_execU(pbp, name, arg);
}
#else
/*
* pmemblk_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_getU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_setU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_execU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
#if FAULT_INJECTION
void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmemblk_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 20,218 | 20.305585 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container_ravl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* container_ravl.c -- implementation of ravl-based block container
*/
#include "container_ravl.h"
#include "ravl.h"
#include "out.h"
#include "sys_util.h"
struct block_container_ravl {
struct block_container super;
struct ravl *tree;
};
/*
* container_compare_memblocks -- (internal) compares two memory blocks
*/
static int
container_compare_memblocks(const void *lhs, const void *rhs)
{
const struct memory_block *l = lhs;
const struct memory_block *r = rhs;
int64_t diff = (int64_t)l->size_idx - (int64_t)r->size_idx;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->block_off - (int64_t)r->block_off;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
/*
* container_ravl_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_ravl_insert_block(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct memory_block *e = m->m_ops->get_user_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(e, sizeof(*e));
VALGRIND_ADD_TO_TX(e, sizeof(*e));
*e = *m;
VALGRIND_SET_CLEAN(e, sizeof(*e));
VALGRIND_REMOVE_FROM_TX(e, sizeof(*e));
return ravl_insert(c->tree, e);
}
/*
* container_ravl_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_ravl_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL)
return ENOMEM;
struct memory_block *e = ravl_data(n);
*m = *e;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_get_rm_block_exact --
* (internal) removes exact match memory block
*/
static int
container_ravl_get_rm_block_exact(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL);
if (n == NULL)
return ENOMEM;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_is_empty -- (internal) checks whether the container is empty
*/
static int
container_ravl_is_empty(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
return ravl_empty(c->tree);
}
/*
* container_ravl_rm_all -- (internal) removes all elements from the tree
*/
static void
container_ravl_rm_all(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_clear(c->tree);
}
/*
* container_ravl_delete -- (internal) deletes the container
*/
static void
container_ravl_destroy(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_delete(c->tree);
Free(bc);
}
/*
* Tree-based block container used to provide best-fit functionality to the
* bucket. The time complexity for this particular container is O(k) where k is
* the length of the key.
*
* The get methods also guarantee that the block with lowest possible address
* that best matches the requirements is provided.
*/
static const struct block_container_ops container_ravl_ops = {
.insert = container_ravl_insert_block,
.get_rm_exact = container_ravl_get_rm_block_exact,
.get_rm_bestfit = container_ravl_get_rm_block_bestfit,
.is_empty = container_ravl_is_empty,
.rm_all = container_ravl_rm_all,
.destroy = container_ravl_destroy,
};
/*
* container_new_ravl -- allocates and initializes a ravl container
*/
struct block_container *
container_new_ravl(struct palloc_heap *heap)
{
struct block_container_ravl *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_ravl_ops;
bc->tree = ravl_new(container_compare_memblocks);
if (bc->tree == NULL)
goto error_ravl_new;
return (struct block_container *)&bc->super;
error_ravl_new:
Free(bc);
error_container_malloc:
return NULL;
}
| 4,333 | 21.931217 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/tx.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* tx.c -- transactions implementation
*/
#include <inttypes.h>
#include <wchar.h>
#include "queue.h"
#include "ravl.h"
#include "obj.h"
#include "out.h"
#include "pmalloc.h"
#include "tx.h"
#include "valgrind_internal.h"
#include "memops.h"
#include <x86intrin.h>
struct tx_data {
PMDK_SLIST_ENTRY(tx_data) tx_entry;
jmp_buf env;
enum pobj_tx_failure_behavior failure_behavior;
};
struct tx {
PMEMobjpool *pop;
enum pobj_tx_stage stage;
int last_errnum;
struct lane *lane;
PMDK_SLIST_HEAD(txl, tx_lock_data) tx_locks;
PMDK_SLIST_HEAD(txd, tx_data) tx_entries;
struct ravl *ranges;
VEC(, struct pobj_action) actions;
VEC(, struct user_buffer_def) redo_userbufs;
size_t redo_userbufs_capacity;
pmemobj_tx_callback stage_callback;
void *stage_callback_arg;
int first_snapshot;
void *user_data;
};
/*
* get_tx -- (internal) returns current transaction
*
* This function should be used only in high-level functions.
*/
static struct tx *
get_tx()
{
static __thread struct tx tx;
return &tx;
}
struct tx_lock_data {
union {
PMEMmutex *mutex;
PMEMrwlock *rwlock;
} lock;
enum pobj_tx_param lock_type;
PMDK_SLIST_ENTRY(tx_lock_data) tx_lock;
};
struct tx_alloc_args {
uint64_t flags;
const void *copy_ptr;
size_t copy_size;
};
#define COPY_ARGS(flags, copy_ptr, copy_size)\
(struct tx_alloc_args){flags, copy_ptr, copy_size}
#define ALLOC_ARGS(flags)\
(struct tx_alloc_args){flags, NULL, 0}
struct tx_range_def {
uint64_t offset;
uint64_t size;
uint64_t flags;
};
/*
* tx_range_def_cmp -- compares two snapshot ranges
*/
static int
tx_range_def_cmp(const void *lhs, const void *rhs)
{
const struct tx_range_def *l = lhs;
const struct tx_range_def *r = rhs;
if (l->offset > r->offset)
return 1;
else if (l->offset < r->offset)
return -1;
return 0;
}
/*
* tx_params_new -- creates a new transactional parameters instance and fills it
* with default values.
*/
struct tx_parameters *
tx_params_new(void)
{
struct tx_parameters *tx_params = Malloc(sizeof(*tx_params));
if (tx_params == NULL)
return NULL;
tx_params->cache_size = TX_DEFAULT_RANGE_CACHE_SIZE;
return tx_params;
}
/*
* tx_params_delete -- deletes transactional parameters instance
*/
void
tx_params_delete(struct tx_parameters *tx_params)
{
Free(tx_params);
}
static void
obj_tx_abort(int errnum, int user);
/*
* obj_tx_fail_err -- (internal) pmemobj_tx_abort variant that returns
* error code
*/
static inline int
obj_tx_fail_err(int errnum, uint64_t flags)
{
if ((flags & POBJ_FLAG_TX_NO_ABORT) == 0)
obj_tx_abort(errnum, 0);
errno = errnum;
return errnum;
}
/*
* obj_tx_fail_null -- (internal) pmemobj_tx_abort variant that returns
* null PMEMoid
*/
static inline PMEMoid
obj_tx_fail_null(int errnum, uint64_t flags)
{
if ((flags & POBJ_FLAG_TX_NO_ABORT) == 0)
obj_tx_abort(errnum, 0);
errno = errnum;
return OID_NULL;
}
/* ASSERT_IN_TX -- checks whether there's open transaction */
#define ASSERT_IN_TX(tx) do {\
if ((tx)->stage == TX_STAGE_NONE)\
FATAL("%s called outside of transaction", __func__);\
} while (0)
/* ASSERT_TX_STAGE_WORK -- checks whether current transaction stage is WORK */
#define ASSERT_TX_STAGE_WORK(tx) do {\
if ((tx)->stage != TX_STAGE_WORK)\
FATAL("%s called in invalid stage %d", __func__, (tx)->stage);\
} while (0)
/*
* tx_action_reserve -- (internal) reserve space for the given number of actions
*/
static int
tx_action_reserve(struct tx *tx, size_t n)
{
size_t entries_size = (VEC_SIZE(&tx->actions) + n) *
sizeof(struct ulog_entry_val);
/* take the provided user buffers into account when reserving */
entries_size -= MIN(tx->redo_userbufs_capacity, entries_size);
if (operation_reserve(tx->lane->external, entries_size) != 0)
return -1;
return 0;
}
/*
* tx_action_add -- (internal) reserve space and add a new tx action
*/
static struct pobj_action *
tx_action_add(struct tx *tx)
{
if (tx_action_reserve(tx, 1) != 0)
return NULL;
VEC_INC_BACK(&tx->actions);
return &VEC_BACK(&tx->actions);
}
/*
* tx_action_remove -- (internal) remove last tx action
*/
static void
tx_action_remove(struct tx *tx)
{
VEC_POP_BACK(&tx->actions);
}
/*
* constructor_tx_alloc -- (internal) constructor for normal alloc
*/
static int
constructor_tx_alloc(void *ctx, void *ptr, size_t usable_size, void *arg)
{
LOG(5, NULL);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct tx_alloc_args *args = arg;
/* do not report changes to the new object */
VALGRIND_ADD_TO_TX(ptr, usable_size);
if (args->flags & POBJ_FLAG_ZERO)
memset(ptr, 0, usable_size);
if (args->copy_ptr && args->copy_size != 0) {
memcpy(ptr, args->copy_ptr, args->copy_size);
}
return 0;
}
struct tx_range_data {
void *begin;
void *end;
PMDK_SLIST_ENTRY(tx_range_data) tx_range;
};
PMDK_SLIST_HEAD(txr, tx_range_data);
/*
* tx_remove_range -- (internal) removes specified range from ranges list
*/
static void
tx_remove_range(struct txr *tx_ranges, void *begin, void *end)
{
struct tx_range_data *txr = PMDK_SLIST_FIRST(tx_ranges);
while (txr) {
if (begin >= txr->end || end < txr->begin) {
txr = PMDK_SLIST_NEXT(txr, tx_range);
continue;
}
LOG(4, "detected PMEM lock in undo log; "
"range %p-%p, lock %p-%p",
txr->begin, txr->end, begin, end);
/* split the range into new ones */
if (begin > txr->begin) {
struct tx_range_data *txrn = Malloc(sizeof(*txrn));
if (txrn == NULL)
/* we can't do it any other way */
FATAL("!Malloc");
txrn->begin = txr->begin;
txrn->end = begin;
LOG(4, "range split; %p-%p", txrn->begin, txrn->end);
PMDK_SLIST_INSERT_HEAD(tx_ranges, txrn, tx_range);
}
if (end < txr->end) {
struct tx_range_data *txrn = Malloc(sizeof(*txrn));
if (txrn == NULL)
/* we can't do it any other way */
FATAL("!Malloc");
txrn->begin = end;
txrn->end = txr->end;
LOG(4, "range split; %p-%p", txrn->begin, txrn->end);
PMDK_SLIST_INSERT_HEAD(tx_ranges, txrn, tx_range);
}
struct tx_range_data *next = PMDK_SLIST_NEXT(txr, tx_range);
/* remove the original range from the list */
PMDK_SLIST_REMOVE(tx_ranges, txr, tx_range_data, tx_range);
Free(txr);
txr = next;
}
}
/*
* tx_restore_range -- (internal) restore a single range from undo log
*
* If the snapshot contains any PMEM locks that are held by the current
* transaction, they won't be overwritten with the saved data to avoid changing
* their state. Those locks will be released in tx_end().
*/
static void
tx_restore_range(PMEMobjpool *pop, struct tx *tx, struct ulog_entry_buf *range)
{
COMPILE_ERROR_ON(sizeof(PMEMmutex) != _POBJ_CL_SIZE);
COMPILE_ERROR_ON(sizeof(PMEMrwlock) != _POBJ_CL_SIZE);
COMPILE_ERROR_ON(sizeof(PMEMcond) != _POBJ_CL_SIZE);
struct txr tx_ranges;
PMDK_SLIST_INIT(&tx_ranges);
struct tx_range_data *txr;
txr = Malloc(sizeof(*txr));
if (txr == NULL) {
/* we can't do it any other way */
FATAL("!Malloc");
}
uint64_t range_offset = ulog_entry_offset(&range->base);
txr->begin = OBJ_OFF_TO_PTR(pop, range_offset);
txr->end = (char *)txr->begin + range->size;
PMDK_SLIST_INSERT_HEAD(&tx_ranges, txr, tx_range);
struct tx_lock_data *txl;
/* check if there are any locks within given memory range */
PMDK_SLIST_FOREACH(txl, &tx->tx_locks, tx_lock) {
void *lock_begin = txl->lock.mutex;
/* all PMEM locks have the same size */
void *lock_end = (char *)lock_begin + _POBJ_CL_SIZE;
tx_remove_range(&tx_ranges, lock_begin, lock_end);
}
ASSERT(!PMDK_SLIST_EMPTY(&tx_ranges));
void *dst_ptr = OBJ_OFF_TO_PTR(pop, range_offset);
while (!PMDK_SLIST_EMPTY(&tx_ranges)) {
txr = PMDK_SLIST_FIRST(&tx_ranges);
PMDK_SLIST_REMOVE_HEAD(&tx_ranges, tx_range);
/* restore partial range data from snapshot */
ASSERT((char *)txr->begin >= (char *)dst_ptr);
uint8_t *src = &range->data[
(char *)txr->begin - (char *)dst_ptr];
ASSERT((char *)txr->end >= (char *)txr->begin);
size_t size = (size_t)((char *)txr->end - (char *)txr->begin);
pmemops_memcpy(&pop->p_ops, txr->begin, src, size, 0);
Free(txr);
}
}
/*
* tx_undo_entry_apply -- applies modifications of a single ulog entry
*/
static int
tx_undo_entry_apply(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
struct ulog_entry_buf *eb;
switch (ulog_entry_type(e)) {
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)e;
tx_restore_range(p_ops->base, get_tx(), eb);
break;
case ULOG_OPERATION_AND:
case ULOG_OPERATION_OR:
case ULOG_OPERATION_SET:
case ULOG_OPERATION_BUF_SET:
default:
ASSERT(0);
}
return 0;
}
/*
* tx_abort_set -- (internal) abort all set operations
*/
static void
tx_abort_set(PMEMobjpool *pop, struct lane *lane)
{
LOG(7, NULL);
ulog_foreach_entry((struct ulog *)&lane->layout->undo,
tx_undo_entry_apply, NULL, &pop->p_ops,NULL);
pmemops_drain(&pop->p_ops);
operation_finish(lane->undo, ULOG_INC_FIRST_GEN_NUM);
}
/*
* tx_flush_range -- (internal) flush one range
*/
static void
tx_flush_range(void *data, void *ctx)
{
PMEMobjpool *pop = ctx;
struct tx_range_def *range = data;
if (!(range->flags & POBJ_FLAG_NO_FLUSH)) {
pmemops_xflush(&pop->p_ops, OBJ_OFF_TO_PTR(pop, range->offset),
range->size, PMEMOBJ_F_RELAXED);
}
VALGRIND_REMOVE_FROM_TX(OBJ_OFF_TO_PTR(pop, range->offset),
range->size);
}
/*
* tx_clean_range -- (internal) clean one range
*/
static void
tx_clean_range(void *data, void *ctx)
{
PMEMobjpool *pop = ctx;
struct tx_range_def *range = data;
VALGRIND_REMOVE_FROM_TX(OBJ_OFF_TO_PTR(pop, range->offset),
range->size);
VALGRIND_SET_CLEAN(OBJ_OFF_TO_PTR(pop, range->offset), range->size);
}
/*
* tx_pre_commit -- (internal) do pre-commit operations
*/
static void
tx_pre_commit(struct tx *tx)
{
LOG(5, NULL);
/* Flush all regions and destroy the whole tree. */
ravl_delete_cb(tx->ranges, tx_flush_range, tx->pop);
tx->ranges = NULL;
}
/*
* tx_abort -- (internal) abort all allocated objects
*/
static void
tx_abort(PMEMobjpool *pop, struct lane *lane)
{
LOG(7, NULL);
struct tx *tx = get_tx();
tx_abort_set(pop, lane);
ravl_delete_cb(tx->ranges, tx_clean_range, pop);
palloc_cancel(&pop->heap,
VEC_ARR(&tx->actions), VEC_SIZE(&tx->actions));
tx->ranges = NULL;
}
/*
* tx_get_pop -- returns the current transaction's pool handle, NULL if not
* within a transaction.
*/
PMEMobjpool *
tx_get_pop(void)
{
return get_tx()->pop;
}
/*
* add_to_tx_and_lock -- (internal) add lock to the transaction and acquire it
*/
static int
add_to_tx_and_lock(struct tx *tx, enum pobj_tx_param type, void *lock)
{
LOG(15, NULL);
int retval = 0;
struct tx_lock_data *txl;
/* check if the lock is already on the list */
PMDK_SLIST_FOREACH(txl, &tx->tx_locks, tx_lock) {
if (memcmp(&txl->lock, &lock, sizeof(lock)) == 0)
return 0;
}
txl = Malloc(sizeof(*txl));
if (txl == NULL)
return ENOMEM;
txl->lock_type = type;
switch (txl->lock_type) {
case TX_PARAM_MUTEX:
txl->lock.mutex = lock;
retval = pmemobj_mutex_lock(tx->pop,
txl->lock.mutex);
if (retval) {
ERR("!pmemobj_mutex_lock");
goto err;
}
break;
case TX_PARAM_RWLOCK:
txl->lock.rwlock = lock;
retval = pmemobj_rwlock_wrlock(tx->pop,
txl->lock.rwlock);
if (retval) {
ERR("!pmemobj_rwlock_wrlock");
goto err;
}
break;
default:
ERR("Unrecognized lock type");
ASSERT(0);
break;
}
PMDK_SLIST_INSERT_HEAD(&tx->tx_locks, txl, tx_lock);
return 0;
err:
errno = retval;
Free(txl);
return retval;
}
/*
* release_and_free_tx_locks -- (internal) release and remove all locks from the
* transaction
*/
static void
release_and_free_tx_locks(struct tx *tx)
{
LOG(15, NULL);
while (!PMDK_SLIST_EMPTY(&tx->tx_locks)) {
struct tx_lock_data *tx_lock = PMDK_SLIST_FIRST(&tx->tx_locks);
PMDK_SLIST_REMOVE_HEAD(&tx->tx_locks, tx_lock);
switch (tx_lock->lock_type) {
case TX_PARAM_MUTEX:
pmemobj_mutex_unlock(tx->pop,
tx_lock->lock.mutex);
break;
case TX_PARAM_RWLOCK:
pmemobj_rwlock_unlock(tx->pop,
tx_lock->lock.rwlock);
break;
default:
ERR("Unrecognized lock type");
ASSERT(0);
break;
}
Free(tx_lock);
}
}
/*
* tx_lane_ranges_insert_def -- (internal) allocates and inserts a new range
* definition into the ranges tree
*/
static int
tx_lane_ranges_insert_def(PMEMobjpool *pop, struct tx *tx,
const struct tx_range_def *rdef)
{
LOG(3, "rdef->offset %"PRIu64" rdef->size %"PRIu64,
rdef->offset, rdef->size);
int ret = ravl_emplace_copy(tx->ranges, rdef);
if (ret && errno == EEXIST)
FATAL("invalid state of ranges tree");
return ret;
}
/*
* tx_alloc_common -- (internal) common function for alloc and zalloc
*/
static PMEMoid
tx_alloc_common(struct tx *tx, size_t size, type_num_t type_num,
palloc_constr constructor, struct tx_alloc_args args)
{
LOG(3, NULL);
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
return obj_tx_fail_null(ENOMEM, args.flags);
}
PMEMobjpool *pop = tx->pop;
struct pobj_action *action = tx_action_add(tx);
if (action == NULL)
return obj_tx_fail_null(ENOMEM, args.flags);
if (palloc_reserve(&pop->heap, size, constructor, &args, type_num, 0,
CLASS_ID_FROM_FLAG(args.flags),
ARENA_ID_FROM_FLAG(args.flags), action) != 0)
goto err_oom;
/* allocate object to undo log */
PMEMoid retoid = OID_NULL;
retoid.off = action->heap.offset;
retoid.pool_uuid_lo = pop->uuid_lo;
size = action->heap.usable_size;
const struct tx_range_def r = {retoid.off, size, args.flags};
if (tx_lane_ranges_insert_def(pop, tx, &r) != 0)
goto err_oom;
return retoid;
err_oom:
tx_action_remove(tx);
ERR("out of memory");
return obj_tx_fail_null(ENOMEM, args.flags);
}
/*
* tx_realloc_common -- (internal) common function for tx realloc
*/
static PMEMoid
tx_realloc_common(struct tx *tx, PMEMoid oid, size_t size, uint64_t type_num,
palloc_constr constructor_alloc,
palloc_constr constructor_realloc,
uint64_t flags)
{
LOG(3, NULL);
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
return obj_tx_fail_null(ENOMEM, flags);
}
/* if oid is NULL just alloc */
if (OBJ_OID_IS_NULL(oid))
return tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_alloc, ALLOC_ARGS(flags));
ASSERT(OBJ_OID_IS_VALID(tx->pop, oid));
/* if size is 0 just free */
if (size == 0) {
if (pmemobj_tx_free(oid)) {
ERR("pmemobj_tx_free failed");
return oid;
} else {
return OID_NULL;
}
}
/* oid is not NULL and size is not 0 so do realloc by alloc and free */
void *ptr = OBJ_OFF_TO_PTR(tx->pop, oid.off);
size_t old_size = palloc_usable_size(&tx->pop->heap, oid.off);
size_t copy_size = old_size < size ? old_size : size;
PMEMoid new_obj = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_realloc, COPY_ARGS(flags, ptr, copy_size));
if (!OBJ_OID_IS_NULL(new_obj)) {
if (pmemobj_tx_free(oid)) {
ERR("pmemobj_tx_free failed");
VEC_POP_BACK(&tx->actions);
return OID_NULL;
}
}
return new_obj;
}
/*
* tx_construct_user_buffer -- add user buffer to the ulog
*/
static int
tx_construct_user_buffer(struct tx *tx, void *addr, size_t size,
enum pobj_log_type type, int outer_tx, uint64_t flags)
{
if (tx->pop != pmemobj_pool_by_ptr(addr)) {
ERR("Buffer from a different pool");
goto err;
}
/*
* We want to extend a log of a specified type, but if it is
* an outer transaction and the first user buffer we need to
* free all logs except the first at the beginning.
*/
struct operation_context *ctx = type == TX_LOG_TYPE_INTENT ?
tx->lane->external : tx->lane->undo;
if (outer_tx && !operation_get_any_user_buffer(ctx))
operation_free_logs(ctx, ULOG_ANY_USER_BUFFER);
struct user_buffer_def userbuf = {addr, size};
if (operation_user_buffer_verify_align(ctx, &userbuf) != 0)
goto err;
if (type == TX_LOG_TYPE_INTENT) {
/*
* Redo log context is not used until transaction commit and
* cannot be used until then, and so the user buffers have to
* be stored and added the operation at commit time.
* This is because atomic operations can executed independently
* in the same lane as a running transaction.
*/
if (VEC_PUSH_BACK(&tx->redo_userbufs, userbuf) != 0)
goto err;
tx->redo_userbufs_capacity +=
userbuf.size - TX_INTENT_LOG_BUFFER_OVERHEAD;
} else {
operation_add_user_buffer(ctx, &userbuf);
}
return 0;
err:
return obj_tx_fail_err(EINVAL, flags);
}
/*
* pmemobj_tx_begin -- initializes new transaction
*/
int
pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf env, ...)
{
LOG(3, NULL);
int err = 0;
struct tx *tx = get_tx();
enum pobj_tx_failure_behavior failure_behavior = POBJ_TX_FAILURE_ABORT;
if (tx->stage == TX_STAGE_WORK) {
ASSERTne(tx->lane, NULL);
if (tx->pop != pop) {
ERR("nested transaction for different pool");
return obj_tx_fail_err(EINVAL, 0);
}
/* inherits this value from the parent transaction */
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
failure_behavior = txd->failure_behavior;
VALGRIND_START_TX;
} else if (tx->stage == TX_STAGE_NONE) {
VALGRIND_START_TX;
lane_hold(pop, &tx->lane);
operation_start(tx->lane->undo);
VEC_INIT(&tx->actions);
VEC_INIT(&tx->redo_userbufs);
tx->redo_userbufs_capacity = 0;
PMDK_SLIST_INIT(&tx->tx_entries);
PMDK_SLIST_INIT(&tx->tx_locks);
tx->ranges = ravl_new_sized(tx_range_def_cmp,
sizeof(struct tx_range_def));
tx->pop = pop;
tx->first_snapshot = 1;
tx->user_data = NULL;
} else {
FATAL("Invalid stage %d to begin new transaction", tx->stage);
}
struct tx_data *txd = Malloc(sizeof(*txd));
if (txd == NULL) {
err = errno;
ERR("!Malloc");
goto err_abort;
}
tx->last_errnum = 0;
if (env != NULL)
memcpy(txd->env, env, sizeof(jmp_buf));
else
memset(txd->env, 0, sizeof(jmp_buf));
txd->failure_behavior = failure_behavior;
PMDK_SLIST_INSERT_HEAD(&tx->tx_entries, txd, tx_entry);
tx->stage = TX_STAGE_WORK;
/* handle locks */
va_list argp;
va_start(argp, env);
enum pobj_tx_param param_type;
while ((param_type = va_arg(argp, enum pobj_tx_param)) !=
TX_PARAM_NONE) {
if (param_type == TX_PARAM_CB) {
pmemobj_tx_callback cb =
va_arg(argp, pmemobj_tx_callback);
void *arg = va_arg(argp, void *);
if (tx->stage_callback &&
(tx->stage_callback != cb ||
tx->stage_callback_arg != arg)) {
FATAL("transaction callback is already set, "
"old %p new %p old_arg %p new_arg %p",
tx->stage_callback, cb,
tx->stage_callback_arg, arg);
}
tx->stage_callback = cb;
tx->stage_callback_arg = arg;
} else {
err = add_to_tx_and_lock(tx, param_type,
va_arg(argp, void *));
if (err) {
va_end(argp);
goto err_abort;
}
}
}
va_end(argp);
ASSERT(err == 0);
return 0;
err_abort:
if (tx->stage == TX_STAGE_WORK)
obj_tx_abort(err, 0);
else
tx->stage = TX_STAGE_ONABORT;
return err;
}
/*
* tx_abort_on_failure_flag -- (internal) return 0 or POBJ_FLAG_TX_NO_ABORT
* based on transaction setting
*/
static uint64_t
tx_abort_on_failure_flag(struct tx *tx)
{
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
if (txd->failure_behavior == POBJ_TX_FAILURE_RETURN)
return POBJ_FLAG_TX_NO_ABORT;
return 0;
}
/*
* pmemobj_tx_xlock -- get lane from pool and add lock to transaction,
* with no_abort option
*/
int
pmemobj_tx_xlock(enum pobj_tx_param type, void *lockp, uint64_t flags)
{
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
flags |= tx_abort_on_failure_flag(tx);
if (flags & ~POBJ_XLOCK_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_XLOCK_VALID_FLAGS);
return obj_tx_fail_err(EINVAL, flags);
}
int ret = add_to_tx_and_lock(tx, type, lockp);
if (ret)
return obj_tx_fail_err(ret, flags);
return 0;
}
/*
* pmemobj_tx_lock -- get lane from pool and add lock to transaction.
*/
int
pmemobj_tx_lock(enum pobj_tx_param type, void *lockp)
{
return pmemobj_tx_xlock(type, lockp, POBJ_XLOCK_NO_ABORT);
}
/*
* obj_tx_callback -- (internal) executes callback associated with current stage
*/
static void
obj_tx_callback(struct tx *tx)
{
if (!tx->stage_callback)
return;
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
/* is this the outermost transaction? */
if (PMDK_SLIST_NEXT(txd, tx_entry) == NULL)
tx->stage_callback(tx->pop, tx->stage, tx->stage_callback_arg);
}
/*
* pmemobj_tx_stage -- returns current transaction stage
*/
enum pobj_tx_stage
pmemobj_tx_stage(void)
{
LOG(3, NULL);
return get_tx()->stage;
}
/*
* obj_tx_abort -- aborts current transaction
*/
static void
obj_tx_abort(int errnum, int user)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
ASSERT(tx->lane != NULL);
if (errnum == 0)
errnum = ECANCELED;
tx->stage = TX_STAGE_ONABORT;
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
if (PMDK_SLIST_NEXT(txd, tx_entry) == NULL) {
/* this is the outermost transaction */
/* process the undo log */
tx_abort(tx->pop, tx->lane);
lane_release(tx->pop);
tx->lane = NULL;
}
tx->last_errnum = errnum;
errno = errnum;
if (user)
ERR("!explicit transaction abort");
/* ONABORT */
obj_tx_callback(tx);
if (!util_is_zeroed(txd->env, sizeof(jmp_buf)))
longjmp(txd->env, errnum);
}
/*
* pmemobj_tx_abort -- aborts current transaction
*
* Note: this function should not be called from inside of pmemobj.
*/
void
pmemobj_tx_abort(int errnum)
{
PMEMOBJ_API_START();
obj_tx_abort(errnum, 1);
PMEMOBJ_API_END();
}
/*
* pmemobj_tx_errno -- returns last transaction error code
*/
int
pmemobj_tx_errno(void)
{
LOG(3, NULL);
return get_tx()->last_errnum;
}
static void
tx_post_commit(struct tx *tx)
{
operation_finish(tx->lane->undo, 0);
}
#ifdef GET_NDP_BREAKDOWN
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
#endif
/*
static void wait_dma_complete(struct tx *tx){
#ifdef GET_NDP_BREAKDOWN
uint64_t startcycle = getCycle();
#endif
//if(*((uint32_t*)(tx->pop->p_ops.device)+254));
int it =0;
while(((*((uint32_t*)(tx->pop->p_ops.device)+254)) & 1) != 1){
//asm volatile ("clflush (%0)" :: "r"((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting %x %x\n",*((uint32_t*)(tx->pop->p_ops.device)+11),*((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting %d!!\n",it);
it++;
}
#ifdef GET_NDP_BREAKDOWN
uint64_t endcycle = getCycle();
waitCycles += endcycle-startcycle;
//printf("wait cpu time: %f\n",(((double)(endcycle-startcycle)))/2000000000);
#endif
}
static void reset_dma(struct tx *tx){
#ifdef GET_NDP_BREAKDOWN
uint64_t startcycle = getCycle();
#endif
*((uint32_t*)(tx->pop->p_ops.device)+253) = 7;
#ifdef GET_NDP_BREAKDOWN
uint64_t endcycle = getCycle();
resetCycles += endcycle-startcycle;
#endif
}
*/
/*
* pmemobj_tx_commit -- commits current transaction
*/
void
pmemobj_tx_commit(void)
{
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
///Ne code
// while(*((uint32_t*)(tx->pop->p_ops.device)+254) != 1){
// printf("waiting1 %x %x\n",*((uint32_t*)(tx->pop->p_ops.device)+11),*((uint32_t*)(tx->pop->p_ops.device)+254));
// }
// wait_dma_complete(tx);
// reset_dma(tx);
// *((uint32_t*)(tx->pop->p_ops.device)+253) = 7;
// while(*((uint32_t*)(tx->pop->p_ops.device1)+11) != 3){
// printf("waiting2 %d\n",*((uint32_t*)(tx->pop->p_ops.device1)+11));
// }
/////////
/* WORK */
obj_tx_callback(tx);
ASSERT(tx->lane != NULL);
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
if (PMDK_SLIST_NEXT(txd, tx_entry) == NULL) {
//wait_dma_complete(tx);
//reset_dma(tx);
/* this is the outermost transaction */
PMEMobjpool *pop = tx->pop;
/* pre-commit phase */
tx_pre_commit(tx);
pmemops_drain(&pop->p_ops);
operation_start(tx->lane->external);
struct user_buffer_def *userbuf;
//VEC_FOREACH_BY_PTR(userbuf, &tx->redo_userbufs)
// operation_add_user_buffer(tx->lane->external, userbuf);
palloc_publish(&pop->heap, VEC_ARR(&tx->actions),
VEC_SIZE(&tx->actions), tx->lane->external);
//reset_dma(tx);
tx_post_commit(tx);
lane_release(pop);
tx->lane = NULL;
}
tx->stage = TX_STAGE_ONCOMMIT;
/* ONCOMMIT */
obj_tx_callback(tx);
PMEMOBJ_API_END();
}
/*
* pmemobj_tx_end -- ends current transaction
*/
int
pmemobj_tx_end(void)
{
LOG(3, NULL);
struct tx *tx = get_tx();
if (tx->stage == TX_STAGE_WORK)
FATAL("pmemobj_tx_end called without pmemobj_tx_commit");
if (tx->pop == NULL)
FATAL("pmemobj_tx_end called without pmemobj_tx_begin");
if (tx->stage_callback &&
(tx->stage == TX_STAGE_ONCOMMIT ||
tx->stage == TX_STAGE_ONABORT)) {
tx->stage = TX_STAGE_FINALLY;
obj_tx_callback(tx);
}
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
PMDK_SLIST_REMOVE_HEAD(&tx->tx_entries, tx_entry);
Free(txd);
VALGRIND_END_TX;
if (PMDK_SLIST_EMPTY(&tx->tx_entries)) {
ASSERTeq(tx->lane, NULL);
release_and_free_tx_locks(tx);
tx->pop = NULL;
tx->stage = TX_STAGE_NONE;
VEC_DELETE(&tx->actions);
VEC_DELETE(&tx->redo_userbufs);
if (tx->stage_callback) {
pmemobj_tx_callback cb = tx->stage_callback;
void *arg = tx->stage_callback_arg;
tx->stage_callback = NULL;
tx->stage_callback_arg = NULL;
cb(tx->pop, TX_STAGE_NONE, arg);
}
} else {
/* resume the next transaction */
tx->stage = TX_STAGE_WORK;
/* abort called within inner transaction, waterfall the error */
if (tx->last_errnum)
obj_tx_abort(tx->last_errnum, 0);
}
return tx->last_errnum;
}
/*
* pmemobj_tx_process -- processes current transaction stage
*/
int current_tx1 = 1 ;
void
pmemobj_tx_process(void)
{
current_tx1 = 1;
LOG(5, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
switch (tx->stage) {
case TX_STAGE_NONE:
break;
case TX_STAGE_WORK:
pmemobj_tx_commit();
break;
case TX_STAGE_ONABORT:
case TX_STAGE_ONCOMMIT:
tx->stage = TX_STAGE_FINALLY;
obj_tx_callback(tx);
break;
case TX_STAGE_FINALLY:
tx->stage = TX_STAGE_NONE;
break;
default:
ASSERT(0);
}
}
/*
* vg_verify_initialized -- when executed under Valgrind verifies that
* the buffer has been initialized; explicit check at snapshotting time,
* because Valgrind may find it much later when it's impossible to tell
* for which snapshot it triggered
*/
static void
vg_verify_initialized(PMEMobjpool *pop, const struct tx_range_def *def)
{
#if VG_MEMCHECK_ENABLED
if (!On_memcheck)
return;
VALGRIND_DO_DISABLE_ERROR_REPORTING;
char *start = (char *)pop + def->offset;
char *uninit = (char *)VALGRIND_CHECK_MEM_IS_DEFINED(start, def->size);
if (uninit) {
VALGRIND_PRINTF(
"Snapshotting uninitialized data in range <%p,%p> (<offset:0x%lx,size:0x%lx>)\n",
start, start + def->size, def->offset, def->size);
if (uninit != start)
VALGRIND_PRINTF("Uninitialized data starts at: %p\n",
uninit);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
VALGRIND_CHECK_MEM_IS_DEFINED(start, def->size);
} else {
VALGRIND_DO_ENABLE_ERROR_REPORTING;
}
#endif
}
/*
* pmemobj_tx_add_snapshot -- (internal) creates a variably sized snapshot
*/
static int
pmemobj_tx_add_snapshot(struct tx *tx, struct tx_range_def *snapshot)
{
/*
* Depending on the size of the block, either allocate an
* entire new object or use cache.
*/
void *ptr = OBJ_OFF_TO_PTR(tx->pop, snapshot->offset);
VALGRIND_ADD_TO_TX(ptr, snapshot->size);
/* do nothing */
if (snapshot->flags & POBJ_XADD_NO_SNAPSHOT)
return 0;
if (!(snapshot->flags & POBJ_XADD_ASSUME_INITIALIZED))
vg_verify_initialized(tx->pop, snapshot);
/*
* If we are creating the first snapshot, setup a redo log action to
* increment counter in the undo log, so that the log becomes
* invalid once the redo log is processed.
*/
if (tx->first_snapshot) {
struct pobj_action *action = tx_action_add(tx);
if (action == NULL)
return -1;
uint64_t *n = &tx->lane->layout->undo.gen_num;
palloc_set_value(&tx->pop->heap, action,
n, *n + 1);
tx->first_snapshot = 0;
}
return operation_add_buffer(tx->lane->undo, ptr, ptr, snapshot->size,
ULOG_OPERATION_BUF_CPY);
}
/*
* pmemobj_tx_merge_flags -- (internal) common code for merging flags between
* two ranges to ensure resultant behavior is correct
*/
static void
pmemobj_tx_merge_flags(struct tx_range_def *dest, struct tx_range_def *merged)
{
/*
* POBJ_XADD_NO_FLUSH should only be set in merged range if set in
* both ranges
*/
if ((dest->flags & POBJ_XADD_NO_FLUSH) &&
!(merged->flags & POBJ_XADD_NO_FLUSH)) {
dest->flags = dest->flags & (~POBJ_XADD_NO_FLUSH);
}
}
/*
* pmemobj_tx_add_common -- (internal) common code for adding persistent memory
* into the transaction
*/
static int
pmemobj_tx_add_common(struct tx *tx, struct tx_range_def *args)
{
LOG(15, NULL);
if (args->size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("snapshot size too large");
return obj_tx_fail_err(EINVAL, args->flags);
}
if (args->offset < tx->pop->heap_offset ||
(args->offset + args->size) >
(tx->pop->heap_offset + tx->pop->heap_size)) {
ERR("object outside of heap");
return obj_tx_fail_err(EINVAL, args->flags);
}
int ret = 0;
/*
* Search existing ranges backwards starting from the end of the
* snapshot.
*/
struct tx_range_def r = *args;
struct tx_range_def search = {0, 0, 0};
/*
* If the range is directly adjacent to an existing one,
* they can be merged, so search for less or equal elements.
*/
enum ravl_predicate p = RAVL_PREDICATE_LESS_EQUAL;
struct ravl_node *nprev = NULL;
while (r.size != 0) {
search.offset = r.offset + r.size;
struct ravl_node *n = ravl_find(tx->ranges, &search, p);
/*
* We have to skip searching for LESS_EQUAL because
* the snapshot we would find is the one that was just
* created.
*/
p = RAVL_PREDICATE_LESS;
struct tx_range_def *f = n ? ravl_data(n) : NULL;
size_t fend = f == NULL ? 0: f->offset + f->size;
size_t rend = r.offset + r.size;
if (fend == 0 || fend < r.offset) {
/*
* If found no range or the found range is not
* overlapping or adjacent on the left side, we can just
* create the entire r.offset + r.size snapshot.
*
* Snapshot:
* --+-
* Existing ranges:
* ---- (no ranges)
* or +--- (no overlap)
* or ---+ (adjacent on on right side)
*/
if (nprev != NULL) {
/*
* But, if we have an existing adjacent snapshot
* on the right side, we can just extend it to
* include the desired range.
*/
struct tx_range_def *fprev = ravl_data(nprev);
ASSERTeq(rend, fprev->offset);
fprev->offset -= r.size;
fprev->size += r.size;
} else {
/*
* If we don't have anything adjacent, create
* a new range in the tree.
*/
ret = tx_lane_ranges_insert_def(tx->pop,
tx, &r);
if (ret != 0)
break;
}
ret = pmemobj_tx_add_snapshot(tx, &r);
break;
} else if (fend <= rend) {
/*
* If found range has its end inside of the desired
* snapshot range, we can extend the found range by the
* size leftover on the left side.
*
* Snapshot:
* --+++--
* Existing ranges:
* +++---- (overlap on left)
* or ---+--- (found snapshot is inside)
* or ---+-++ (inside, and adjacent on the right)
* or +++++-- (desired snapshot is inside)
*
*/
struct tx_range_def snapshot = *args;
snapshot.offset = fend;
/* the side not yet covered by an existing snapshot */
snapshot.size = rend - fend;
/* the number of bytes intersecting in both ranges */
size_t intersection = fend - MAX(f->offset, r.offset);
r.size -= intersection + snapshot.size;
f->size += snapshot.size;
pmemobj_tx_merge_flags(f, args);
if (snapshot.size != 0) {
ret = pmemobj_tx_add_snapshot(tx, &snapshot);
if (ret != 0)
break;
}
/*
* If there's a snapshot adjacent on right side, merge
* the two ranges together.
*/
if (nprev != NULL) {
struct tx_range_def *fprev = ravl_data(nprev);
ASSERTeq(rend, fprev->offset);
f->size += fprev->size;
pmemobj_tx_merge_flags(f, fprev);
ravl_remove(tx->ranges, nprev);
}
} else if (fend >= r.offset) {
/*
* If found range has its end extending beyond the
* desired snapshot.
*
* Snapshot:
* --+++--
* Existing ranges:
* -----++ (adjacent on the right)
* or ----++- (overlapping on the right)
* or ----+++ (overlapping and adjacent on the right)
* or --+++++ (desired snapshot is inside)
*
* Notice that we cannot create a snapshot based solely
* on this information without risking overwriting an
* existing one. We have to continue iterating, but we
* keep the information about adjacent snapshots in the
* nprev variable.
*/
size_t overlap = rend - MAX(f->offset, r.offset);
r.size -= overlap;
pmemobj_tx_merge_flags(f, args);
} else {
ASSERT(0);
}
nprev = n;
}
if (ret != 0) {
ERR("out of memory");
return obj_tx_fail_err(ENOMEM, args->flags);
}
return 0;
}
//ndp pmdk
static void setpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ);
return;
}
/*
* pmemobj_tx_add_range_direct -- adds persistent memory range into the
* transaction
*/
int page_fault_counter1 = 0;
//extern int current_tx1 = 1 ;
int
pmemobj_tx_add_range_direct(const void *ptr, size_t size)
{
if(current_tx1){
get_tx();
setpage(ptr);
current_tx1 = 0;
}
return 0;
}
/*
* pmemobj_tx_xadd_range_direct -- adds persistent memory range into the
* transaction
*/
int page_fault_counter2 = 0;
int
pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags)
{
if(current_tx1){
setpage(ptr);
current_tx1 = 0;
}
return 0;
}
/*
* pmemobj_tx_add_range -- adds persistent memory range into the transaction
*/
int page_fault_counter3 = 0;
int
pmemobj_tx_add_range(PMEMoid oid, uint64_t hoff, size_t size)
{
PMEMOBJ_API_START();
struct tx *tx = get_tx();
if(current_tx1){
setpage(oid.off + hoff + (char *)tx->pop);
current_tx1 = 0;
}
PMEMOBJ_API_END();
return 0;
}
/*
* pmemobj_tx_xadd_range -- adds persistent memory range into the transaction
*/
int
pmemobj_tx_xadd_range(PMEMoid oid, uint64_t hoff, size_t size, uint64_t flags)
{
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
int ret;
flags |= tx_abort_on_failure_flag(tx);
if (flags & ~POBJ_XADD_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64, flags
& ~POBJ_XADD_VALID_FLAGS);
ret = obj_tx_fail_err(EINVAL, flags);
PMEMOBJ_API_END();
return ret;
}
if (oid.pool_uuid_lo != tx->pop->uuid_lo) {
ERR("invalid pool uuid");
ret = obj_tx_fail_err(EINVAL, flags);
PMEMOBJ_API_END();
return ret;
}
ASSERT(OBJ_OID_IS_VALID(tx->pop, oid));
struct tx_range_def args = {
.offset = oid.off + hoff,
.size = size,
.flags = flags,
};
ret = pmemobj_tx_add_common(tx, &args);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_tx_alloc -- allocates a new object
*/
PMEMoid
pmemobj_tx_alloc(size_t size, uint64_t type_num)
{
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
uint64_t flags = tx_abort_on_failure_flag(tx);
PMEMoid oid;
if (size == 0) {
ERR("allocation with size 0");
oid = obj_tx_fail_null(EINVAL, flags);
PMEMOBJ_API_END();
return oid;
}
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, ALLOC_ARGS(flags));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_zalloc -- allocates a new zeroed object
*/
PMEMoid
pmemobj_tx_zalloc(size_t size, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
uint64_t flags = POBJ_FLAG_ZERO;
flags |= tx_abort_on_failure_flag(tx);
PMEMOBJ_API_START();
PMEMoid oid;
if (size == 0) {
ERR("allocation with size 0");
oid = obj_tx_fail_null(EINVAL, flags);
PMEMOBJ_API_END();
return oid;
}
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, ALLOC_ARGS(flags));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_xalloc -- allocates a new object
*/
PMEMoid
pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
flags |= tx_abort_on_failure_flag(tx);
PMEMOBJ_API_START();
PMEMoid oid;
if (size == 0) {
ERR("allocation with size 0");
oid = obj_tx_fail_null(EINVAL, flags);
PMEMOBJ_API_END();
return oid;
}
if (flags & ~POBJ_TX_XALLOC_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64, flags
& ~(POBJ_TX_XALLOC_VALID_FLAGS));
oid = obj_tx_fail_null(EINVAL, flags);
PMEMOBJ_API_END();
return oid;
}
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, ALLOC_ARGS(flags));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_realloc -- resizes an existing object
*/
PMEMoid
pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
PMEMoid ret = tx_realloc_common(tx, oid, size, type_num,
constructor_tx_alloc, constructor_tx_alloc, 0);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_zrealloc -- resizes an existing object, any new space is zeroed.
*/
PMEMoid
pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
PMEMoid ret = tx_realloc_common(tx, oid, size, type_num,
constructor_tx_alloc, constructor_tx_alloc,
POBJ_FLAG_ZERO);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_tx_xstrdup -- allocates a new object with duplicate of the string s.
*/
PMEMoid
pmemobj_tx_xstrdup(const char *s, uint64_t type_num, uint64_t flags)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
flags |= tx_abort_on_failure_flag(tx);
if (flags & ~POBJ_TX_XALLOC_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_TX_XALLOC_VALID_FLAGS);
return obj_tx_fail_null(EINVAL, flags);
}
PMEMOBJ_API_START();
PMEMoid oid;
if (NULL == s) {
ERR("cannot duplicate NULL string");
oid = obj_tx_fail_null(EINVAL, flags);
PMEMOBJ_API_END();
return oid;
}
size_t len = strlen(s);
if (len == 0) {
oid = tx_alloc_common(tx, sizeof(char), (type_num_t)type_num,
constructor_tx_alloc,
ALLOC_ARGS(POBJ_XALLOC_ZERO));
PMEMOBJ_API_END();
return oid;
}
size_t size = (len + 1) * sizeof(char);
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, COPY_ARGS(flags, s, size));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_strdup -- allocates a new object with duplicate of the string s.
*/
PMEMoid
pmemobj_tx_strdup(const char *s, uint64_t type_num)
{
return pmemobj_tx_xstrdup(s, type_num, 0);
}
/*
* pmemobj_tx_xwcsdup -- allocates a new object with duplicate of the wide
* character string s.
*/
PMEMoid
pmemobj_tx_xwcsdup(const wchar_t *s, uint64_t type_num, uint64_t flags)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
flags |= tx_abort_on_failure_flag(tx);
if (flags & ~POBJ_TX_XALLOC_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_TX_XALLOC_VALID_FLAGS);
return obj_tx_fail_null(EINVAL, flags);
}
PMEMOBJ_API_START();
PMEMoid oid;
if (NULL == s) {
ERR("cannot duplicate NULL string");
oid = obj_tx_fail_null(EINVAL, flags);
PMEMOBJ_API_END();
return oid;
}
size_t len = wcslen(s);
if (len == 0) {
oid = tx_alloc_common(tx, sizeof(wchar_t),
(type_num_t)type_num, constructor_tx_alloc,
ALLOC_ARGS(POBJ_XALLOC_ZERO));
PMEMOBJ_API_END();
return oid;
}
size_t size = (len + 1) * sizeof(wchar_t);
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, COPY_ARGS(flags, s, size));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_wcsdup -- allocates a new object with duplicate of the wide
* character string s.
*/
PMEMoid
pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num)
{
return pmemobj_tx_xwcsdup(s, type_num, 0);
}
/*
* pmemobj_tx_xfree -- frees an existing object, with no_abort option
*/
int
pmemobj_tx_xfree(PMEMoid oid, uint64_t flags)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
flags |= tx_abort_on_failure_flag(tx);
if (flags & ~POBJ_XFREE_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_XFREE_VALID_FLAGS);
return obj_tx_fail_err(EINVAL, flags);
}
if (OBJ_OID_IS_NULL(oid))
return 0;
PMEMobjpool *pop = tx->pop;
if (pop->uuid_lo != oid.pool_uuid_lo) {
ERR("invalid pool uuid");
return obj_tx_fail_err(EINVAL, flags);
}
ASSERT(OBJ_OID_IS_VALID(pop, oid));
PMEMOBJ_API_START();
struct pobj_action *action;
struct tx_range_def range = {oid.off, 0, 0};
struct ravl_node *n = ravl_find(tx->ranges, &range,
RAVL_PREDICATE_EQUAL);
/*
* If attempting to free an object allocated within the same
* transaction, simply cancel the alloc and remove it from the actions.
*/
if (n != NULL) {
VEC_FOREACH_BY_PTR(action, &tx->actions) {
if (action->type == POBJ_ACTION_TYPE_HEAP &&
action->heap.offset == oid.off) {
struct tx_range_def *r = ravl_data(n);
void *ptr = OBJ_OFF_TO_PTR(pop, r->offset);
VALGRIND_SET_CLEAN(ptr, r->size);
VALGRIND_REMOVE_FROM_TX(ptr, r->size);
ravl_remove(tx->ranges, n);
palloc_cancel(&pop->heap, action, 1);
VEC_ERASE_BY_PTR(&tx->actions, action);
PMEMOBJ_API_END();
return 0;
}
}
}
action = tx_action_add(tx);
if (action == NULL) {
int ret = obj_tx_fail_err(errno, flags);
PMEMOBJ_API_END();
return ret;
}
palloc_defer_free(&pop->heap, oid.off, action);
PMEMOBJ_API_END();
return 0;
}
/*
* pmemobj_tx_free -- frees an existing object
*/
int
pmemobj_tx_free(PMEMoid oid)
{
return pmemobj_tx_xfree(oid, 0);
}
/*
* pmemobj_tx_xpublish -- publishes actions inside of a transaction,
* with no_abort option
*/
int
pmemobj_tx_xpublish(struct pobj_action *actv, size_t actvcnt, uint64_t flags)
{
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
flags |= tx_abort_on_failure_flag(tx);
if (flags & ~POBJ_XPUBLISH_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_XPUBLISH_VALID_FLAGS);
return obj_tx_fail_err(EINVAL, flags);
}
PMEMOBJ_API_START();
if (tx_action_reserve(tx, actvcnt) != 0) {
int ret = obj_tx_fail_err(ENOMEM, flags);
PMEMOBJ_API_END();
return ret;
}
for (size_t i = 0; i < actvcnt; ++i) {
VEC_PUSH_BACK(&tx->actions, actv[i]);
}
PMEMOBJ_API_END();
return 0;
}
/*
* pmemobj_tx_publish -- publishes actions inside of a transaction
*/
int
pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt)
{
return pmemobj_tx_xpublish(actv, actvcnt, 0);
}
/*
* pmemobj_tx_xlog_append_buffer -- append user allocated buffer to the ulog
*/
int
pmemobj_tx_xlog_append_buffer(enum pobj_log_type type, void *addr, size_t size,
uint64_t flags)
{
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
flags |= tx_abort_on_failure_flag(tx);
if (flags & ~POBJ_XLOG_APPEND_BUFFER_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_XLOG_APPEND_BUFFER_VALID_FLAGS);
return obj_tx_fail_err(EINVAL, flags);
}
PMEMOBJ_API_START();
int err;
struct tx_data *td = PMDK_SLIST_FIRST(&tx->tx_entries);
err = tx_construct_user_buffer(tx, addr, size, type,
PMDK_SLIST_NEXT(td, tx_entry) == NULL, flags);
PMEMOBJ_API_END();
return err;
}
/*
* pmemobj_tx_log_append_buffer -- append user allocated buffer to the ulog
*/
int
pmemobj_tx_log_append_buffer(enum pobj_log_type type, void *addr, size_t size)
{
return pmemobj_tx_xlog_append_buffer(type, addr, size, 0);
}
/*
* pmemobj_tx_log_auto_alloc -- enable/disable automatic ulog allocation
*/
int
pmemobj_tx_log_auto_alloc(enum pobj_log_type type, int on_off)
{
struct tx *tx = get_tx();
ASSERT_TX_STAGE_WORK(tx);
struct operation_context *ctx = type == TX_LOG_TYPE_INTENT ?
tx->lane->external : tx->lane->undo;
operation_set_auto_reserve(ctx, on_off);
return 0;
}
/*
* pmemobj_tx_log_snapshots_max_size -- calculates the maximum
* size of a buffer which will be able to hold nsizes snapshots,
* each of size from sizes array
*/
size_t
pmemobj_tx_log_snapshots_max_size(size_t *sizes, size_t nsizes)
{
LOG(3, NULL);
/* each buffer has its header */
size_t result = TX_SNAPSHOT_LOG_BUFFER_OVERHEAD;
for (size_t i = 0; i < nsizes; ++i) {
/* check for overflow */
if (sizes[i] + TX_SNAPSHOT_LOG_ENTRY_OVERHEAD +
TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT < sizes[i])
goto err_overflow;
/* each entry has its header */
size_t size =
ALIGN_UP(sizes[i] + TX_SNAPSHOT_LOG_ENTRY_OVERHEAD,
TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT);
/* check for overflow */
if (result + size < result)
goto err_overflow;
/* sum up */
result += size;
}
/*
* if the result is bigger than a single allocation it must be divided
* into multiple allocations where each of them will have its own buffer
* header and entry header
*/
size_t allocs_overhead = (result / PMEMOBJ_MAX_ALLOC_SIZE) *
(TX_SNAPSHOT_LOG_BUFFER_OVERHEAD + TX_SNAPSHOT_LOG_ENTRY_OVERHEAD);
/* check for overflow */
if (result + allocs_overhead < result)
goto err_overflow;
result += allocs_overhead;
/* SIZE_MAX is a special value */
if (result == SIZE_MAX)
goto err_overflow;
return result;
err_overflow:
errno = ERANGE;
return SIZE_MAX;
}
/*
* pmemobj_tx_log_intents_max_size -- calculates the maximum size of a buffer
* which will be able to hold nintents
*/
size_t
pmemobj_tx_log_intents_max_size(size_t nintents)
{
LOG(3, NULL);
/* check for overflow */
if (nintents > SIZE_MAX / TX_INTENT_LOG_ENTRY_OVERHEAD)
goto err_overflow;
/* each entry has its header */
size_t entries_overhead = nintents * TX_INTENT_LOG_ENTRY_OVERHEAD;
/* check for overflow */
if (entries_overhead + TX_INTENT_LOG_BUFFER_ALIGNMENT
< entries_overhead)
goto err_overflow;
/* the whole buffer is aligned */
size_t result =
ALIGN_UP(entries_overhead, TX_INTENT_LOG_BUFFER_ALIGNMENT);
/* check for overflow */
if (result + TX_INTENT_LOG_BUFFER_OVERHEAD < result)
goto err_overflow;
/* add a buffer overhead */
result += TX_INTENT_LOG_BUFFER_OVERHEAD;
/*
* if the result is bigger than a single allocation it must be divided
* into multiple allocations where each of them will have its own buffer
* header and entry header
*/
size_t allocs_overhead = (result / PMEMOBJ_MAX_ALLOC_SIZE) *
(TX_INTENT_LOG_BUFFER_OVERHEAD + TX_INTENT_LOG_ENTRY_OVERHEAD);
/* check for overflow */
if (result + allocs_overhead < result)
goto err_overflow;
result += allocs_overhead;
/* SIZE_MAX is a special value */
if (result == SIZE_MAX)
goto err_overflow;
return result;
err_overflow:
errno = ERANGE;
return SIZE_MAX;
}
/*
* pmemobj_tx_set_user_data -- sets volatile pointer to the user data for the
* current transaction
*/
void
pmemobj_tx_set_user_data(void *data)
{
LOG(3, "data %p", data);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
tx->user_data = data;
}
/*
* pmemobj_tx_get_user_data -- gets volatile pointer to the user data associated
* with the current transaction
*/
void *
pmemobj_tx_get_user_data(void)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
return tx->user_data;
}
/*
* pmemobj_tx_set_failure_behavior -- enables or disables automatic transaction
* abort in case of an error
*/
void
pmemobj_tx_set_failure_behavior(enum pobj_tx_failure_behavior behavior)
{
LOG(3, "behavior %d", behavior);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
txd->failure_behavior = behavior;
}
/*
* pmemobj_tx_get_failure_behavior -- returns enum specifying failure event
* for the current transaction.
*/
enum pobj_tx_failure_behavior
pmemobj_tx_get_failure_behavior(void)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
struct tx_data *txd = PMDK_SLIST_FIRST(&tx->tx_entries);
return txd->failure_behavior;
}
/*
* CTL_READ_HANDLER(size) -- gets the cache size transaction parameter
*/
static int
CTL_READ_HANDLER(size)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t *arg_out = arg;
*arg_out = (ssize_t)pop->tx_params->cache_size;
return 0;
}
/*
* CTL_WRITE_HANDLER(size) -- sets the cache size transaction parameter
*/
static int
CTL_WRITE_HANDLER(size)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(int *)arg;
if (arg_in < 0 || arg_in > (ssize_t)PMEMOBJ_MAX_ALLOC_SIZE) {
errno = EINVAL;
ERR("invalid cache size, must be between 0 and max alloc size");
return -1;
}
size_t argu = (size_t)arg_in;
pop->tx_params->cache_size = argu;
return 0;
}
static const struct ctl_argument CTL_ARG(size) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(threshold) -- gets the cache threshold transaction parameter
*/
static int
CTL_READ_HANDLER(threshold)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
LOG(1, "tx.cache.threshold parameter is deprecated");
return 0;
}
/*
* CTL_WRITE_HANDLER(threshold) -- deprecated
*/
static int
CTL_WRITE_HANDLER(threshold)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
LOG(1, "tx.cache.threshold parameter is deprecated");
return 0;
}
static const struct ctl_argument CTL_ARG(threshold) = CTL_ARG_LONG_LONG;
static const struct ctl_node CTL_NODE(cache)[] = {
CTL_LEAF_RW(size),
CTL_LEAF_RW(threshold),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(skip_expensive_checks) -- returns "skip_expensive_checks"
* var from pool ctl
*/
static int
CTL_READ_HANDLER(skip_expensive_checks)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->tx_debug_skip_expensive_checks;
return 0;
}
/*
* CTL_WRITE_HANDLER(skip_expensive_checks) -- stores "skip_expensive_checks"
* var in pool ctl
*/
static int
CTL_WRITE_HANDLER(skip_expensive_checks)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->tx_debug_skip_expensive_checks = arg_in;
return 0;
}
static const struct ctl_argument CTL_ARG(skip_expensive_checks) =
CTL_ARG_BOOLEAN;
/*
* CTL_READ_HANDLER(verify_user_buffers) -- returns "ulog_user_buffers.verify"
* variable from the pool
*/
static int
CTL_READ_HANDLER(verify_user_buffers)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->ulog_user_buffers.verify;
return 0;
}
/*
* CTL_WRITE_HANDLER(verify_user_buffers) -- sets "ulog_user_buffers.verify"
* variable in the pool
*/
static int
CTL_WRITE_HANDLER(verify_user_buffers)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->ulog_user_buffers.verify = arg_in;
return 0;
}
static const struct ctl_argument CTL_ARG(verify_user_buffers) =
CTL_ARG_BOOLEAN;
static const struct ctl_node CTL_NODE(debug)[] = {
CTL_LEAF_RW(skip_expensive_checks),
CTL_LEAF_RW(verify_user_buffers),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(queue_depth) -- returns the depth of the post commit queue
*/
static int
CTL_READ_HANDLER(queue_depth)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
/*
* CTL_WRITE_HANDLER(queue_depth) -- sets the depth of the post commit queue
*/
static int
CTL_WRITE_HANDLER(queue_depth)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
static const struct ctl_argument CTL_ARG(queue_depth) = CTL_ARG_INT;
/*
* CTL_READ_HANDLER(worker) -- launches the post commit worker thread function
*/
static int
CTL_READ_HANDLER(worker)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
/*
* CTL_READ_HANDLER(stop) -- stops all post commit workers
*/
static int
CTL_READ_HANDLER(stop)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
static const struct ctl_node CTL_NODE(post_commit)[] = {
CTL_LEAF_RW(queue_depth),
CTL_LEAF_RO(worker),
CTL_LEAF_RO(stop),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(tx)[] = {
CTL_CHILD(debug),
CTL_CHILD(cache),
CTL_CHILD(post_commit),
CTL_NODE_END
};
/*
* tx_ctl_register -- registers ctl nodes for "tx" module
*/
void
tx_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, tx);
}
| 53,446 | 21.437867 | 114 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/heap.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* heap.c -- heap implementation
*/
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <float.h>
#include "queue.h"
#include "heap.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
#include "valgrind_internal.h"
#include "recycler.h"
#include "container_ravl.h"
#include "container_seglists.h"
#include "alloc_class.h"
#include "os_thread.h"
#include "set.h"
#define MAX_RUN_LOCKS MAX_CHUNK
#define MAX_RUN_LOCKS_VG 1024 /* avoid perf issues /w drd */
/*
* This is the value by which the heap might grow once we hit an OOM.
*/
#define HEAP_DEFAULT_GROW_SIZE (1 << 27) /* 128 megabytes */
#define MAX_DEFAULT_ARENAS (1 << 10) /* 1024 arenas */
struct arenas {
VEC(, struct arena *) vec;
size_t nactive;
/*
* When nesting with other locks, this one must be acquired first,
* prior to locking any buckets or memory blocks.
*/
os_mutex_t lock;
/* stores a pointer to one of the arenas */
os_tls_key_t thread;
};
/*
* Arenas store the collection of buckets for allocation classes.
* Each thread is assigned an arena on its first allocator operation
* if arena is set to auto.
*/
struct arena {
/* one bucket per allocation class */
struct bucket *buckets[MAX_ALLOCATION_CLASSES];
/*
* Decides whether the arena can be
* automatically assigned to a thread.
*/
int automatic;
size_t nthreads;
struct arenas *arenas;
};
struct heap_rt {
struct alloc_class_collection *alloc_classes;
/* DON'T use these two variable directly! */
struct bucket *default_bucket;
struct arenas arenas;
struct recycler *recyclers[MAX_ALLOCATION_CLASSES];
os_mutex_t run_locks[MAX_RUN_LOCKS];
unsigned nlocks;
unsigned nzones;
unsigned zones_exhausted;
};
/*
* heap_arenas_init - (internal) initialize generic arenas info
*/
static int
heap_arenas_init(struct arenas *arenas)
{
util_mutex_init(&arenas->lock);
VEC_INIT(&arenas->vec);
arenas->nactive = 0;
if (VEC_RESERVE(&arenas->vec, MAX_DEFAULT_ARENAS) == -1)
return -1;
return 0;
}
/*
* heap_arenas_fini - (internal) destroy generic arenas info
*/
static void
heap_arenas_fini(struct arenas *arenas)
{
util_mutex_destroy(&arenas->lock);
VEC_DELETE(&arenas->vec);
}
/*
* heap_alloc_classes -- returns the allocation classes collection
*/
struct alloc_class_collection *
heap_alloc_classes(struct palloc_heap *heap)
{
return heap->rt ? heap->rt->alloc_classes : NULL;
}
/*
* heap_arena_delete -- (internal) destroys arena instance
*/
static void
heap_arena_delete(struct arena *arena)
{
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i)
if (arena->buckets[i] != NULL)
bucket_delete(arena->buckets[i]);
Free(arena);
}
/*
* heap_arena_new -- (internal) initializes arena instance
*/
static struct arena *
heap_arena_new(struct palloc_heap *heap, int automatic)
{
struct heap_rt *rt = heap->rt;
struct arena *arena = Zalloc(sizeof(struct arena));
if (arena == NULL) {
ERR("!heap: arena malloc error");
return NULL;
}
arena->nthreads = 0;
arena->automatic = automatic;
arena->arenas = &heap->rt->arenas;
COMPILE_ERROR_ON(MAX_ALLOCATION_CLASSES > UINT8_MAX);
for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *ac =
alloc_class_by_id(rt->alloc_classes, i);
if (ac != NULL) {
arena->buckets[i] =
bucket_new(container_new_seglists(heap), ac);
if (arena->buckets[i] == NULL)
goto error_bucket_create;
} else {
arena->buckets[i] = NULL;
}
}
return arena;
error_bucket_create:
heap_arena_delete(arena);
return NULL;
}
/*
* heap_get_best_class -- returns the alloc class that best fits the
* requested size
*/
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size)
{
return alloc_class_by_alloc_size(heap->rt->alloc_classes, size);
}
/*
* heap_arena_thread_detach -- detaches arena from the current thread
*
* Must be called with arenas lock taken.
*/
static void
heap_arena_thread_detach(struct arena *a)
{
/*
* Even though this is under a lock, nactive variable can also be read
* concurrently from the recycler (without the arenas lock).
* That's why we are using an atomic operation.
*/
if ((--a->nthreads) == 0)
util_fetch_and_sub64(&a->arenas->nactive, 1);
}
/*
* heap_arena_thread_attach -- assign arena to the current thread
*
* Must be called with arenas lock taken.
*/
static void
heap_arena_thread_attach(struct palloc_heap *heap, struct arena *a)
{
struct heap_rt *h = heap->rt;
struct arena *thread_arena = os_tls_get(h->arenas.thread);
if (thread_arena)
heap_arena_thread_detach(thread_arena);
ASSERTne(a, NULL);
/*
* Even though this is under a lock, nactive variable can also be read
* concurrently from the recycler (without the arenas lock).
* That's why we are using an atomic operation.
*/
if ((a->nthreads++) == 0)
util_fetch_and_add64(&a->arenas->nactive, 1);
os_tls_set(h->arenas.thread, a);
}
/*
* heap_thread_arena_destructor -- (internal) removes arena thread assignment
*/
static void
heap_thread_arena_destructor(void *arg)
{
struct arena *a = arg;
os_mutex_lock(&a->arenas->lock);
heap_arena_thread_detach(a);
os_mutex_unlock(&a->arenas->lock);
}
/*
* heap_get_arena_by_id -- returns arena by id
*
* Must be called with arenas lock taken.
*/
static struct arena *
heap_get_arena_by_id(struct palloc_heap *heap, unsigned arena_id)
{
return VEC_ARR(&heap->rt->arenas.vec)[arena_id - 1];
}
/*
* heap_thread_arena_assign -- (internal) assigns the least used arena
* to current thread
*
* To avoid complexities with regards to races in the search for the least
* used arena, a lock is used, but the nthreads counter of the arena is still
* bumped using atomic instruction because it can happen in parallel to a
* destructor of a thread, which also touches that variable.
*/
static struct arena *
heap_thread_arena_assign(struct palloc_heap *heap)
{
util_mutex_lock(&heap->rt->arenas.lock);
struct arena *least_used = NULL;
ASSERTne(VEC_SIZE(&heap->rt->arenas.vec), 0);
struct arena *a;
VEC_FOREACH(a, &heap->rt->arenas.vec) {
if (!a->automatic)
continue;
if (least_used == NULL ||
a->nthreads < least_used->nthreads)
least_used = a;
}
LOG(4, "assigning %p arena to current thread", least_used);
/* at least one automatic arena must exist */
ASSERTne(least_used, NULL);
heap_arena_thread_attach(heap, least_used);
util_mutex_unlock(&heap->rt->arenas.lock);
return least_used;
}
/*
* heap_thread_arena -- (internal) returns the arena assigned to the current
* thread
*/
static struct arena *
heap_thread_arena(struct palloc_heap *heap)
{
struct arena *a;
if ((a = os_tls_get(heap->rt->arenas.thread)) == NULL)
a = heap_thread_arena_assign(heap);
return a;
}
/*
* heap_get_thread_arena_id -- returns the arena id assigned to the current
* thread
*/
unsigned
heap_get_thread_arena_id(struct palloc_heap *heap)
{
unsigned arena_id = 1;
struct arena *arenap = heap_thread_arena(heap);
struct arena *arenav;
struct heap_rt *rt = heap->rt;
util_mutex_lock(&rt->arenas.lock);
VEC_FOREACH(arenav, &heap->rt->arenas.vec) {
if (arenav == arenap) {
util_mutex_unlock(&rt->arenas.lock);
return arena_id;
}
arena_id++;
}
util_mutex_unlock(&rt->arenas.lock);
ASSERT(0);
return arena_id;
}
/*
* heap_bucket_acquire -- fetches by arena or by id a bucket exclusive
* for the thread until heap_bucket_release is called
*/
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, uint8_t class_id,
uint16_t arena_id)
{
struct heap_rt *rt = heap->rt;
struct bucket *b;
if (class_id == DEFAULT_ALLOC_CLASS_ID) {
b = rt->default_bucket;
goto out;
}
if (arena_id == HEAP_ARENA_PER_THREAD) {
struct arena *arena = heap_thread_arena(heap);
ASSERTne(arena->buckets, NULL);
b = arena->buckets[class_id];
} else {
b = (VEC_ARR(&heap->rt->arenas.vec)
[arena_id - 1])->buckets[class_id];
}
out:
util_mutex_lock(&b->lock);
return b;
}
/*
* heap_bucket_release -- puts the bucket back into the heap
*/
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b)
{
util_mutex_unlock(&b->lock);
}
/*
* heap_get_run_lock -- returns the lock associated with memory block
*/
os_mutex_t *
heap_get_run_lock(struct palloc_heap *heap, uint32_t chunk_id)
{
return &heap->rt->run_locks[chunk_id % heap->rt->nlocks];
}
/*
* heap_max_zone -- (internal) calculates how many zones can the heap fit
*/
static unsigned
heap_max_zone(size_t size)
{
unsigned max_zone = 0;
size -= sizeof(struct heap_header);
while (size >= ZONE_MIN_SIZE) {
max_zone++;
size -= size <= ZONE_MAX_SIZE ? size : ZONE_MAX_SIZE;
}
return max_zone;
}
/*
* zone_calc_size_idx -- (internal) calculates zone size index
*/
static uint32_t
zone_calc_size_idx(uint32_t zone_id, unsigned max_zone, size_t heap_size)
{
ASSERT(max_zone > 0);
if (zone_id < max_zone - 1)
return MAX_CHUNK;
ASSERT(heap_size >= zone_id * ZONE_MAX_SIZE);
size_t zone_raw_size = heap_size - zone_id * ZONE_MAX_SIZE;
ASSERT(zone_raw_size >= (sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK) +
sizeof(struct heap_header));
zone_raw_size -= sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK +
sizeof(struct heap_header);
size_t zone_size_idx = zone_raw_size / CHUNKSIZE;
ASSERT(zone_size_idx <= UINT32_MAX);
return (uint32_t)zone_size_idx;
}
/*
* heap_zone_init -- (internal) writes zone's first chunk and header
*/
static void
heap_zone_init(struct palloc_heap *heap, uint32_t zone_id,
uint32_t first_chunk_id)
{
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
uint32_t size_idx = zone_calc_size_idx(zone_id, heap->rt->nzones,
*heap->sizep);
ASSERT(size_idx > first_chunk_id);
memblock_huge_init(heap, first_chunk_id, zone_id,
size_idx - first_chunk_id);
struct zone_header nhdr = {
.size_idx = size_idx,
.magic = ZONE_HEADER_MAGIC,
};
z->header = nhdr; /* write the entire header (8 bytes) at once */
pmemops_persist(&heap->p_ops, &z->header, sizeof(z->header));
}
/*
* heap_memblock_insert_block -- (internal) bucket insert wrapper for callbacks
*/
static int
heap_memblock_insert_block(const struct memory_block *m, void *b)
{
return bucket_insert_block(b, m);
}
/*
* heap_run_create -- (internal) initializes a new run on an existing free chunk
*/
static int
heap_run_create(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m)
{
*m = memblock_run_init(heap, m->chunk_id, m->zone_id, &b->aclass->rdsc);
if (m->m_ops->iterate_free(m, heap_memblock_insert_block, b) != 0) {
b->c_ops->rm_all(b->container);
return -1;
}
STATS_INC(heap->stats, transient, heap_run_active,
m->size_idx * CHUNKSIZE);
return 0;
}
/*
* heap_run_reuse -- (internal) reuses existing run
*/
static int
heap_run_reuse(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m)
{
int ret = 0;
ASSERTeq(m->type, MEMORY_BLOCK_RUN);
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
ret = m->m_ops->iterate_free(m, heap_memblock_insert_block, b);
util_mutex_unlock(lock);
if (ret == 0) {
b->active_memory_block->m = *m;
b->active_memory_block->bucket = b;
b->is_active = 1;
util_fetch_and_add64(&b->active_memory_block->nresv, 1);
} else {
b->c_ops->rm_all(b->container);
}
return ret;
}
/*
* heap_free_chunk_reuse -- reuses existing free chunk
*/
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket,
struct memory_block *m)
{
/*
* Perform coalescing just in case there
* are any neighboring free chunks.
*/
struct memory_block nm = heap_coalesce_huge(heap, bucket, m);
if (nm.size_idx != m->size_idx) {
m->m_ops->prep_hdr(&nm, MEMBLOCK_FREE, NULL);
}
*m = nm;
return bucket_insert_block(bucket, m);
}
/*
* heap_run_into_free_chunk -- (internal) creates a new free chunk in place of
* a run.
*/
static void
heap_run_into_free_chunk(struct palloc_heap *heap,
struct bucket *bucket,
struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
m->block_off = 0;
m->size_idx = hdr->size_idx;
STATS_SUB(heap->stats, transient, heap_run_active,
m->size_idx * CHUNKSIZE);
/*
* The only thing this could race with is heap_memblock_on_free()
* because that function is called after processing the operation,
* which means that a different thread might immediately call this
* function if the free() made the run empty.
* We could forgo this lock if it weren't for helgrind which needs it
* to establish happens-before relation for the chunk metadata.
*/
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
*m = memblock_huge_init(heap, m->chunk_id, m->zone_id, m->size_idx);
heap_free_chunk_reuse(heap, bucket, m);
util_mutex_unlock(lock);
}
/*
* heap_reclaim_run -- checks the run for available memory if unclaimed.
*
* Returns 1 if reclaimed chunk, 0 otherwise.
*/
static int
heap_reclaim_run(struct palloc_heap *heap, struct memory_block *m, int startup)
{
struct chunk_run *run = heap_get_chunk_run(heap, m);
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
struct alloc_class *c = alloc_class_by_run(
heap->rt->alloc_classes,
run->hdr.block_size, hdr->flags, m->size_idx);
struct recycler_element e = recycler_element_new(heap, m);
if (c == NULL) {
uint32_t size_idx = m->size_idx;
struct run_bitmap b;
m->m_ops->get_bitmap(m, &b);
ASSERTeq(size_idx, m->size_idx);
return e.free_space == b.nbits;
}
if (e.free_space == c->rdsc.nallocs)
return 1;
if (startup) {
STATS_INC(heap->stats, transient, heap_run_active,
m->size_idx * CHUNKSIZE);
STATS_INC(heap->stats, transient, heap_run_allocated,
(c->rdsc.nallocs - e.free_space) * run->hdr.block_size);
}
if (recycler_put(heap->rt->recyclers[c->id], m, e) < 0)
ERR("lost runtime tracking info of %u run due to OOM", c->id);
return 0;
}
/*
* heap_reclaim_zone_garbage -- (internal) creates volatile state of unused runs
*/
static void
heap_reclaim_zone_garbage(struct palloc_heap *heap, struct bucket *bucket,
uint32_t zone_id)
{
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
for (uint32_t i = 0; i < z->header.size_idx; ) {
struct chunk_header *hdr = &z->chunk_headers[i];
ASSERT(hdr->size_idx != 0);
struct memory_block m = MEMORY_BLOCK_NONE;
m.zone_id = zone_id;
m.chunk_id = i;
m.size_idx = hdr->size_idx;
memblock_rebuild_state(heap, &m);
m.m_ops->reinit_chunk(&m);
switch (hdr->type) {
case CHUNK_TYPE_RUN:
if (heap_reclaim_run(heap, &m, 1) != 0)
heap_run_into_free_chunk(heap, bucket,
&m);
break;
case CHUNK_TYPE_FREE:
heap_free_chunk_reuse(heap, bucket, &m);
break;
case CHUNK_TYPE_USED:
break;
default:
ASSERT(0);
}
i = m.chunk_id + m.size_idx; /* hdr might have changed */
}
}
/*
* heap_populate_bucket -- (internal) creates volatile state of memory blocks
*/
static int
heap_populate_bucket(struct palloc_heap *heap, struct bucket *bucket)
{
struct heap_rt *h = heap->rt;
/* at this point we are sure that there's no more memory in the heap */
if (h->zones_exhausted == h->nzones)
return ENOMEM;
uint32_t zone_id = h->zones_exhausted++;
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
/* ignore zone and chunk headers */
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(z, sizeof(z->header) +
sizeof(z->chunk_headers));
if (z->header.magic != ZONE_HEADER_MAGIC)
heap_zone_init(heap, zone_id, 0);
heap_reclaim_zone_garbage(heap, bucket, zone_id);
/*
* It doesn't matter that this function might not have found any
* free blocks because there is still potential that subsequent calls
* will find something in later zones.
*/
return 0;
}
/*
* heap_recycle_unused -- recalculate scores in the recycler and turn any
* empty runs into free chunks
*
* If force is not set, this function might effectively be a noop if not enough
* of space was freed.
*/
static int
heap_recycle_unused(struct palloc_heap *heap, struct recycler *recycler,
struct bucket *defb, int force)
{
struct empty_runs r = recycler_recalc(recycler, force);
if (VEC_SIZE(&r) == 0)
return ENOMEM;
struct bucket *nb = defb == NULL ? heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD) : NULL;
ASSERT(defb != NULL || nb != NULL);
struct memory_block *nm;
VEC_FOREACH_BY_PTR(nm, &r) {
heap_run_into_free_chunk(heap, defb ? defb : nb, nm);
}
if (nb != NULL)
heap_bucket_release(heap, nb);
VEC_DELETE(&r);
return 0;
}
/*
* heap_reclaim_garbage -- (internal) creates volatile state of unused runs
*/
static int
heap_reclaim_garbage(struct palloc_heap *heap, struct bucket *bucket)
{
int ret = ENOMEM;
struct recycler *r;
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
if ((r = heap->rt->recyclers[i]) == NULL)
continue;
if (heap_recycle_unused(heap, r, bucket, 1) == 0)
ret = 0;
}
return ret;
}
/*
* heap_ensure_huge_bucket_filled --
* (internal) refills the default bucket if needed
*/
static int
heap_ensure_huge_bucket_filled(struct palloc_heap *heap, struct bucket *bucket)
{
if (heap_reclaim_garbage(heap, bucket) == 0)
return 0;
if (heap_populate_bucket(heap, bucket) == 0)
return 0;
int extend;
if ((extend = heap_extend(heap, bucket, heap->growsize)) < 0)
return ENOMEM;
if (extend == 1)
return 0;
/*
* Extending the pool does not automatically add the chunks into the
* runtime state of the bucket - we need to traverse the new zone if
* it was created.
*/
if (heap_populate_bucket(heap, bucket) == 0)
return 0;
return ENOMEM;
}
/*
* heap_bucket_deref_active -- detaches active blocks from the bucket
*/
static int
heap_bucket_deref_active(struct palloc_heap *heap, struct bucket *b)
{
/* get rid of the active block in the bucket */
struct memory_block_reserved **active = &b->active_memory_block;
if (b->is_active) {
b->c_ops->rm_all(b->container);
if (util_fetch_and_sub64(&(*active)->nresv, 1) == 1) {
VALGRIND_ANNOTATE_HAPPENS_AFTER(&(*active)->nresv);
heap_discard_run(heap, &(*active)->m);
} else {
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&(*active)->nresv);
*active = NULL;
}
b->is_active = 0;
}
if (*active == NULL) {
*active = Zalloc(sizeof(struct memory_block_reserved));
if (*active == NULL)
return -1;
}
return 0;
}
/*
* heap_force_recycle -- detaches all memory from arenas, and forces global
* recycling of all memory blocks
*/
void
heap_force_recycle(struct palloc_heap *heap)
{
util_mutex_lock(&heap->rt->arenas.lock);
struct arena *arenap;
VEC_FOREACH(arenap, &heap->rt->arenas.vec) {
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct bucket *b = arenap->buckets[i];
if (b == NULL)
continue;
util_mutex_lock(&b->lock);
/*
* There's no need to check if this fails, as that
* will not prevent progress in this function.
*/
heap_bucket_deref_active(heap, b);
util_mutex_unlock(&b->lock);
}
}
util_mutex_unlock(&heap->rt->arenas.lock);
heap_reclaim_garbage(heap, NULL);
}
/*
* heap_reuse_from_recycler -- (internal) try reusing runs that are currently
* in the recycler
*/
static int
heap_reuse_from_recycler(struct palloc_heap *heap,
struct bucket *b, uint32_t units, int force)
{
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = units;
struct recycler *r = heap->rt->recyclers[b->aclass->id];
if (!force && recycler_get(r, &m) == 0)
return heap_run_reuse(heap, b, &m);
heap_recycle_unused(heap, r, NULL, force);
if (recycler_get(r, &m) == 0)
return heap_run_reuse(heap, b, &m);
return ENOMEM;
}
/*
* heap_discard_run -- puts the memory block back into the global heap.
*/
void
heap_discard_run(struct palloc_heap *heap, struct memory_block *m)
{
if (heap_reclaim_run(heap, m, 0)) {
struct bucket *defb =
heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, 0);
heap_run_into_free_chunk(heap, defb, m);
heap_bucket_release(heap, defb);
}
}
/*
* heap_ensure_run_bucket_filled -- (internal) refills the bucket if needed
*/
static int
heap_ensure_run_bucket_filled(struct palloc_heap *heap, struct bucket *b,
uint32_t units)
{
ASSERTeq(b->aclass->type, CLASS_RUN);
int ret = 0;
if (heap_bucket_deref_active(heap, b) != 0)
return ENOMEM;
if (heap_reuse_from_recycler(heap, b, units, 0) == 0)
goto out;
/* search in the next zone before attempting to create a new run */
struct bucket *defb = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
heap_populate_bucket(heap, defb);
heap_bucket_release(heap, defb);
if (heap_reuse_from_recycler(heap, b, units, 0) == 0)
goto out;
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = b->aclass->rdsc.size_idx;
defb = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
/* cannot reuse an existing run, create a new one */
if (heap_get_bestfit_block(heap, defb, &m) == 0) {
ASSERTeq(m.block_off, 0);
if (heap_run_create(heap, b, &m) != 0) {
heap_bucket_release(heap, defb);
return ENOMEM;
}
b->active_memory_block->m = m;
b->is_active = 1;
b->active_memory_block->bucket = b;
util_fetch_and_add64(&b->active_memory_block->nresv, 1);
heap_bucket_release(heap, defb);
goto out;
}
heap_bucket_release(heap, defb);
if (heap_reuse_from_recycler(heap, b, units, 0) == 0)
goto out;
ret = ENOMEM;
out:
return ret;
}
/*
* heap_memblock_on_free -- bookkeeping actions executed at every free of a
* block
*/
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m)
{
if (m->type != MEMORY_BLOCK_RUN)
return;
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
struct chunk_run *run = heap_get_chunk_run(heap, m);
ASSERTeq(hdr->type, CHUNK_TYPE_RUN);
struct alloc_class *c = alloc_class_by_run(
heap->rt->alloc_classes,
run->hdr.block_size, hdr->flags, hdr->size_idx);
if (c == NULL)
return;
recycler_inc_unaccounted(heap->rt->recyclers[c->id], m);
}
/*
* heap_split_block -- (internal) splits unused part of the memory block
*/
static void
heap_split_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m, uint32_t units)
{
ASSERT(units <= UINT16_MAX);
ASSERT(units > 0);
if (b->aclass->type == CLASS_RUN) {
ASSERT((uint64_t)m->block_off + (uint64_t)units <= UINT32_MAX);
struct memory_block r = {m->chunk_id, m->zone_id,
m->size_idx - units, (uint32_t)(m->block_off + units),
NULL, NULL, 0, 0, NULL};
memblock_rebuild_state(heap, &r);
if (bucket_insert_block(b, &r) != 0)
LOG(2,
"failed to allocate memory block runtime tracking info");
} else {
uint32_t new_chunk_id = m->chunk_id + units;
uint32_t new_size_idx = m->size_idx - units;
struct memory_block n = memblock_huge_init(heap,
new_chunk_id, m->zone_id, new_size_idx);
*m = memblock_huge_init(heap, m->chunk_id, m->zone_id, units);
if (bucket_insert_block(b, &n) != 0)
LOG(2,
"failed to allocate memory block runtime tracking info");
}
m->size_idx = units;
}
/*
* heap_get_bestfit_block --
* extracts a memory block of equal size index
*/
int
heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m)
{
uint32_t units = m->size_idx;
while (b->c_ops->get_rm_bestfit(b->container, m) != 0) {
if (b->aclass->type == CLASS_HUGE) {
if (heap_ensure_huge_bucket_filled(heap, b) != 0)
return ENOMEM;
} else {
if (heap_ensure_run_bucket_filled(heap, b, units) != 0)
return ENOMEM;
}
}
ASSERT(m->size_idx >= units);
if (units != m->size_idx)
heap_split_block(heap, b, m, units);
m->m_ops->ensure_header_type(m, b->aclass->header_type);
m->header_type = b->aclass->header_type;
return 0;
}
/*
* heap_get_adjacent_free_block -- locates adjacent free memory block in heap
*/
static int
heap_get_adjacent_free_block(struct palloc_heap *heap,
const struct memory_block *in, struct memory_block *out, int prev)
{
struct zone *z = ZID_TO_ZONE(heap->layout, in->zone_id);
struct chunk_header *hdr = &z->chunk_headers[in->chunk_id];
out->zone_id = in->zone_id;
if (prev) {
if (in->chunk_id == 0)
return ENOENT;
struct chunk_header *prev_hdr =
&z->chunk_headers[in->chunk_id - 1];
out->chunk_id = in->chunk_id - prev_hdr->size_idx;
if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE)
return ENOENT;
out->size_idx = z->chunk_headers[out->chunk_id].size_idx;
} else { /* next */
if (in->chunk_id + hdr->size_idx == z->header.size_idx)
return ENOENT;
out->chunk_id = in->chunk_id + hdr->size_idx;
if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE)
return ENOENT;
out->size_idx = z->chunk_headers[out->chunk_id].size_idx;
}
memblock_rebuild_state(heap, out);
return 0;
}
/*
* heap_coalesce -- (internal) merges adjacent memory blocks
*/
static struct memory_block
heap_coalesce(struct palloc_heap *heap,
const struct memory_block *blocks[], int n)
{
struct memory_block ret = MEMORY_BLOCK_NONE;
const struct memory_block *b = NULL;
ret.size_idx = 0;
for (int i = 0; i < n; ++i) {
if (blocks[i] == NULL)
continue;
b = b ? b : blocks[i];
ret.size_idx += blocks[i] ? blocks[i]->size_idx : 0;
}
ASSERTne(b, NULL);
ret.chunk_id = b->chunk_id;
ret.zone_id = b->zone_id;
ret.block_off = b->block_off;
memblock_rebuild_state(heap, &ret);
return ret;
}
/*
* heap_coalesce_huge -- finds neighbours of a huge block, removes them from the
* volatile state and returns the resulting block
*/
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m)
{
const struct memory_block *blocks[3] = {NULL, m, NULL};
struct memory_block prev = MEMORY_BLOCK_NONE;
if (heap_get_adjacent_free_block(heap, m, &prev, 1) == 0 &&
b->c_ops->get_rm_exact(b->container, &prev) == 0) {
blocks[0] = &prev;
}
struct memory_block next = MEMORY_BLOCK_NONE;
if (heap_get_adjacent_free_block(heap, m, &next, 0) == 0 &&
b->c_ops->get_rm_exact(b->container, &next) == 0) {
blocks[2] = &next;
}
return heap_coalesce(heap, blocks, 3);
}
/*
* heap_end -- returns first address after heap
*/
void *
heap_end(struct palloc_heap *h)
{
ASSERT(h->rt->nzones > 0);
struct zone *last_zone = ZID_TO_ZONE(h->layout, h->rt->nzones - 1);
return &last_zone->chunks[last_zone->header.size_idx];
}
/*
* heap_arena_create -- create a new arena, push it to the vector
* and return new arena id or -1 on failure
*/
int
heap_arena_create(struct palloc_heap *heap)
{
struct heap_rt *h = heap->rt;
struct arena *arena = heap_arena_new(heap, 0);
if (arena == NULL)
return -1;
util_mutex_lock(&h->arenas.lock);
if (VEC_PUSH_BACK(&h->arenas.vec, arena))
goto err_push_back;
int ret = (int)VEC_SIZE(&h->arenas.vec);
util_mutex_unlock(&h->arenas.lock);
return ret;
err_push_back:
util_mutex_unlock(&h->arenas.lock);
heap_arena_delete(arena);
return -1;
}
/*
* heap_get_narenas_total -- returns the number of all arenas in the heap
*/
unsigned
heap_get_narenas_total(struct palloc_heap *heap)
{
struct heap_rt *h = heap->rt;
util_mutex_lock(&h->arenas.lock);
unsigned total = (unsigned)VEC_SIZE(&h->arenas.vec);
util_mutex_unlock(&h->arenas.lock);
return total;
}
/*
* heap_get_narenas_max -- returns the max number of arenas
*/
unsigned
heap_get_narenas_max(struct palloc_heap *heap)
{
struct heap_rt *h = heap->rt;
util_mutex_lock(&h->arenas.lock);
unsigned max = (unsigned)VEC_CAPACITY(&h->arenas.vec);
util_mutex_unlock(&h->arenas.lock);
return max;
}
/*
* heap_set_narenas_max -- change the max number of arenas
*/
int
heap_set_narenas_max(struct palloc_heap *heap, unsigned size)
{
struct heap_rt *h = heap->rt;
int ret = -1;
util_mutex_lock(&h->arenas.lock);
unsigned capacity = (unsigned)VEC_CAPACITY(&h->arenas.vec);
if (size < capacity) {
LOG(2, "cannot decrease max number of arenas");
goto out;
} else if (size == capacity) {
ret = 0;
goto out;
}
ret = VEC_RESERVE(&h->arenas.vec, size);
out:
util_mutex_unlock(&h->arenas.lock);
return ret;
}
/*
* heap_get_narenas_auto -- returns the number of all automatic arenas
*/
unsigned
heap_get_narenas_auto(struct palloc_heap *heap)
{
struct heap_rt *h = heap->rt;
struct arena *arena;
unsigned narenas = 0;
util_mutex_lock(&h->arenas.lock);
VEC_FOREACH(arena, &h->arenas.vec) {
if (arena->automatic)
narenas++;
}
util_mutex_unlock(&h->arenas.lock);
return narenas;
}
/*
* heap_get_arena_buckets -- returns a pointer to buckets from the arena
*/
struct bucket **
heap_get_arena_buckets(struct palloc_heap *heap, unsigned arena_id)
{
util_mutex_lock(&heap->rt->arenas.lock);
struct arena *a = heap_get_arena_by_id(heap, arena_id);
util_mutex_unlock(&heap->rt->arenas.lock);
return a->buckets;
}
/*
* heap_get_arena_auto -- returns arena automatic value
*/
int
heap_get_arena_auto(struct palloc_heap *heap, unsigned arena_id)
{
util_mutex_lock(&heap->rt->arenas.lock);
struct arena *a = heap_get_arena_by_id(heap, arena_id);
util_mutex_unlock(&heap->rt->arenas.lock);
return a->automatic;
}
/*
* heap_set_arena_auto -- sets arena automatic value
*/
int
heap_set_arena_auto(struct palloc_heap *heap, unsigned arena_id,
int automatic)
{
unsigned nautomatic = 0;
struct arena *a;
struct heap_rt *h = heap->rt;
int ret = 0;
util_mutex_lock(&h->arenas.lock);
VEC_FOREACH(a, &h->arenas.vec)
if (a->automatic)
nautomatic++;
a = VEC_ARR(&heap->rt->arenas.vec)[arena_id - 1];
if (!automatic && nautomatic <= 1 && a->automatic) {
ERR("at least one automatic arena must exist");
ret = -1;
goto out;
}
a->automatic = automatic;
out:
util_mutex_unlock(&h->arenas.lock);
return ret;
}
/*
* heap_set_arena_thread -- assign arena with given id to the current thread
*/
void
heap_set_arena_thread(struct palloc_heap *heap, unsigned arena_id)
{
os_mutex_lock(&heap->rt->arenas.lock);
heap_arena_thread_attach(heap, heap_get_arena_by_id(heap, arena_id));
os_mutex_unlock(&heap->rt->arenas.lock);
}
/*
* heap_get_procs -- (internal) returns the number of arenas to create
*/
static unsigned
heap_get_procs(void)
{
long cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (cpus < 1)
cpus = 1;
unsigned arenas = (unsigned)cpus;
LOG(4, "creating %u arenas", arenas);
return arenas;
}
/*
* heap_create_alloc_class_buckets -- allocates all cache bucket
* instances of the specified type
*/
int
heap_create_alloc_class_buckets(struct palloc_heap *heap, struct alloc_class *c)
{
struct heap_rt *h = heap->rt;
if (c->type == CLASS_RUN) {
h->recyclers[c->id] = recycler_new(heap, c->rdsc.nallocs,
&heap->rt->arenas.nactive);
if (h->recyclers[c->id] == NULL)
goto error_recycler_new;
}
size_t i;
struct arena *arena;
VEC_FOREACH_BY_POS(i, &h->arenas.vec) {
arena = VEC_ARR(&h->arenas.vec)[i];
if (arena->buckets[c->id] == NULL)
arena->buckets[c->id] = bucket_new(
container_new_seglists(heap), c);
if (arena->buckets[c->id] == NULL)
goto error_cache_bucket_new;
}
return 0;
error_cache_bucket_new:
recycler_delete(h->recyclers[c->id]);
for (; i != 0; --i)
bucket_delete(VEC_ARR(&h->arenas.vec)[i - 1]->buckets[c->id]);
error_recycler_new:
return -1;
}
/*
* heap_buckets_init -- (internal) initializes bucket instances
*/
int
heap_buckets_init(struct palloc_heap *heap)
{
struct heap_rt *h = heap->rt;
for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = alloc_class_by_id(h->alloc_classes, i);
if (c != NULL) {
if (heap_create_alloc_class_buckets(heap, c) != 0)
goto error_bucket_create;
}
}
h->default_bucket = bucket_new(container_new_ravl(heap),
alloc_class_by_id(h->alloc_classes, DEFAULT_ALLOC_CLASS_ID));
if (h->default_bucket == NULL)
goto error_bucket_create;
return 0;
error_bucket_create: {
struct arena *arena;
VEC_FOREACH(arena, &h->arenas.vec)
heap_arena_delete(arena);
}
return -1;
}
/*
* heap_extend -- extend the heap by the given size
*
* Returns 0 if the current zone has been extended, 1 if a new zone had to be
* created, -1 if unsuccessful.
*
* If this function has to create a new zone, it will NOT populate buckets with
* the new chunks.
*/
int
heap_extend(struct palloc_heap *heap, struct bucket *b, size_t size)
{
void *nptr = util_pool_extend(heap->set, &size, PMEMOBJ_MIN_PART);
if (nptr == NULL)
return -1;
*heap->sizep += size;
pmemops_persist(&heap->p_ops, heap->sizep, sizeof(*heap->sizep));
/*
* If interrupted after changing the size, the heap will just grow
* automatically on the next heap_boot.
*/
uint32_t nzones = heap_max_zone(*heap->sizep);
uint32_t zone_id = nzones - 1;
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
uint32_t chunk_id = heap->rt->nzones == nzones ? z->header.size_idx : 0;
heap_zone_init(heap, zone_id, chunk_id);
if (heap->rt->nzones != nzones) {
heap->rt->nzones = nzones;
return 0;
}
struct chunk_header *hdr = &z->chunk_headers[chunk_id];
struct memory_block m = MEMORY_BLOCK_NONE;
m.chunk_id = chunk_id;
m.zone_id = zone_id;
m.block_off = 0;
m.size_idx = hdr->size_idx;
memblock_rebuild_state(heap, &m);
heap_free_chunk_reuse(heap, b, &m);
return 1;
}
/*
* heap_zone_update_if_needed -- updates the zone metadata if the pool has been
* extended.
*/
static void
heap_zone_update_if_needed(struct palloc_heap *heap)
{
struct zone *z;
for (uint32_t i = 0; i < heap->rt->nzones; ++i) {
z = ZID_TO_ZONE(heap->layout, i);
if (z->header.magic != ZONE_HEADER_MAGIC)
continue;
size_t size_idx = zone_calc_size_idx(i, heap->rt->nzones,
*heap->sizep);
if (size_idx == z->header.size_idx)
continue;
heap_zone_init(heap, i, z->header.size_idx);
}
}
/*
* heap_boot -- opens the heap region of the pmemobj pool
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep, void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set)
{
/*
* The size can be 0 if interrupted during heap_init or this is the
* first time booting the heap with the persistent size field.
*/
if (*sizep == 0) {
*sizep = heap_size;
pmemops_persist(p_ops, sizep, sizeof(*sizep));
}
if (heap_size < *sizep) {
ERR("mapped region smaller than the heap size");
return EINVAL;
}
struct heap_rt *h = Malloc(sizeof(*h));
int err;
if (h == NULL) {
err = ENOMEM;
goto error_heap_malloc;
}
h->alloc_classes = alloc_class_collection_new();
if (h->alloc_classes == NULL) {
err = ENOMEM;
goto error_alloc_classes_new;
}
unsigned narenas_default = heap_get_procs();
if (heap_arenas_init(&h->arenas) != 0) {
err = errno;
goto error_arenas_malloc;
}
h->nzones = heap_max_zone(heap_size);
h->zones_exhausted = 0;
h->nlocks = On_valgrind ? MAX_RUN_LOCKS_VG : MAX_RUN_LOCKS;
for (unsigned i = 0; i < h->nlocks; ++i)
util_mutex_init(&h->run_locks[i]);
os_tls_key_create(&h->arenas.thread, heap_thread_arena_destructor);
heap->p_ops = *p_ops;
heap->layout = heap_start;
heap->rt = h;
heap->sizep = sizep;
heap->base = base;
heap->stats = stats;
heap->set = set;
heap->growsize = HEAP_DEFAULT_GROW_SIZE;
heap->alloc_pattern = PALLOC_CTL_DEBUG_NO_PATTERN;
VALGRIND_DO_CREATE_MEMPOOL(heap->layout, 0, 0);
for (unsigned i = 0; i < narenas_default; ++i) {
if (VEC_PUSH_BACK(&h->arenas.vec, heap_arena_new(heap, 1))) {
err = errno;
goto error_vec_reserve;
}
}
for (unsigned i = 0; i < MAX_ALLOCATION_CLASSES; ++i)
h->recyclers[i] = NULL;
heap_zone_update_if_needed(heap);
return 0;
error_vec_reserve:
heap_arenas_fini(&h->arenas);
error_arenas_malloc:
alloc_class_collection_delete(h->alloc_classes);
error_alloc_classes_new:
Free(h);
heap->rt = NULL;
error_heap_malloc:
return err;
}
/*
* heap_write_header -- (internal) creates a clean header
*/
static void
heap_write_header(struct heap_header *hdr)
{
struct heap_header newhdr = {
.signature = HEAP_SIGNATURE,
.major = HEAP_MAJOR,
.minor = HEAP_MINOR,
.unused = 0,
.chunksize = CHUNKSIZE,
.chunks_per_zone = MAX_CHUNK,
.reserved = {0},
.checksum = 0
};
util_checksum(&newhdr, sizeof(newhdr), &newhdr.checksum, 1, 0);
*hdr = newhdr;
}
/*
* heap_init -- initializes the heap
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops)
{
if (heap_size < HEAP_MIN_SIZE)
return EINVAL;
VALGRIND_DO_MAKE_MEM_UNDEFINED(heap_start, heap_size);
struct heap_layout *layout = heap_start;
heap_write_header(&layout->header);
pmemops_persist(p_ops, &layout->header, sizeof(struct heap_header));
unsigned zones = heap_max_zone(heap_size);
for (unsigned i = 0; i < zones; ++i) {
struct zone *zone = ZID_TO_ZONE(layout, i);
pmemops_memset(p_ops, &zone->header, 0,
sizeof(struct zone_header), 0);
pmemops_memset(p_ops, &zone->chunk_headers, 0,
sizeof(struct chunk_header), 0);
/* only explicitly allocated chunks should be accessible */
VALGRIND_DO_MAKE_MEM_NOACCESS(&zone->chunk_headers,
sizeof(struct chunk_header));
}
*sizep = heap_size;
pmemops_persist(p_ops, sizep, sizeof(*sizep));
return 0;
}
/*
* heap_cleanup -- cleanups the volatile heap state
*/
void
heap_cleanup(struct palloc_heap *heap)
{
struct heap_rt *rt = heap->rt;
alloc_class_collection_delete(rt->alloc_classes);
os_tls_key_delete(rt->arenas.thread);
bucket_delete(rt->default_bucket);
struct arena *arena;
VEC_FOREACH(arena, &rt->arenas.vec)
heap_arena_delete(arena);
for (unsigned i = 0; i < rt->nlocks; ++i)
util_mutex_destroy(&rt->run_locks[i]);
heap_arenas_fini(&rt->arenas);
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
if (heap->rt->recyclers[i] == NULL)
continue;
recycler_delete(rt->recyclers[i]);
}
VALGRIND_DO_DESTROY_MEMPOOL(heap->layout);
Free(rt);
heap->rt = NULL;
}
/*
* heap_verify_header -- (internal) verifies if the heap header is consistent
*/
static int
heap_verify_header(struct heap_header *hdr)
{
if (util_checksum(hdr, sizeof(*hdr), &hdr->checksum, 0, 0) != 1) {
ERR("heap: invalid header's checksum");
return -1;
}
if (memcmp(hdr->signature, HEAP_SIGNATURE, HEAP_SIGNATURE_LEN) != 0) {
ERR("heap: invalid signature");
return -1;
}
return 0;
}
/*
* heap_verify_zone_header --
* (internal) verifies if the zone header is consistent
*/
static int
heap_verify_zone_header(struct zone_header *hdr)
{
if (hdr->magic != ZONE_HEADER_MAGIC) /* not initialized */
return 0;
if (hdr->size_idx == 0) {
ERR("heap: invalid zone size");
return -1;
}
return 0;
}
/*
* heap_verify_chunk_header --
* (internal) verifies if the chunk header is consistent
*/
static int
heap_verify_chunk_header(struct chunk_header *hdr)
{
if (hdr->type == CHUNK_TYPE_UNKNOWN) {
ERR("heap: invalid chunk type");
return -1;
}
if (hdr->type >= MAX_CHUNK_TYPE) {
ERR("heap: unknown chunk type");
return -1;
}
if (hdr->flags & ~CHUNK_FLAGS_ALL_VALID) {
ERR("heap: invalid chunk flags");
return -1;
}
return 0;
}
/*
* heap_verify_zone -- (internal) verifies if the zone is consistent
*/
static int
heap_verify_zone(struct zone *zone)
{
if (zone->header.magic == 0)
return 0; /* not initialized, and that is OK */
if (zone->header.magic != ZONE_HEADER_MAGIC) {
ERR("heap: invalid zone magic");
return -1;
}
if (heap_verify_zone_header(&zone->header))
return -1;
uint32_t i;
for (i = 0; i < zone->header.size_idx; ) {
if (heap_verify_chunk_header(&zone->chunk_headers[i]))
return -1;
i += zone->chunk_headers[i].size_idx;
}
if (i != zone->header.size_idx) {
ERR("heap: chunk sizes mismatch");
return -1;
}
return 0;
}
/*
* heap_check -- verifies if the heap is consistent and can be opened properly
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_check(void *heap_start, uint64_t heap_size)
{
if (heap_size < HEAP_MIN_SIZE) {
ERR("heap: invalid heap size");
return -1;
}
struct heap_layout *layout = heap_start;
if (heap_verify_header(&layout->header))
return -1;
for (unsigned i = 0; i < heap_max_zone(heap_size); ++i) {
if (heap_verify_zone(ZID_TO_ZONE(layout, i)))
return -1;
}
return 0;
}
/*
* heap_check_remote -- verifies if the heap of a remote pool is consistent
* and can be opened properly
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_check_remote(void *heap_start, uint64_t heap_size, struct remote_ops *ops)
{
if (heap_size < HEAP_MIN_SIZE) {
ERR("heap: invalid heap size");
return -1;
}
struct heap_layout *layout = heap_start;
struct heap_header header;
if (ops->read(ops->ctx, ops->base, &header, &layout->header,
sizeof(struct heap_header))) {
ERR("heap: obj_read_remote error");
return -1;
}
if (heap_verify_header(&header))
return -1;
struct zone *zone_buff = (struct zone *)Malloc(sizeof(struct zone));
if (zone_buff == NULL) {
ERR("heap: zone_buff malloc error");
return -1;
}
for (unsigned i = 0; i < heap_max_zone(heap_size); ++i) {
if (ops->read(ops->ctx, ops->base, zone_buff,
ZID_TO_ZONE(layout, i), sizeof(struct zone))) {
ERR("heap: obj_read_remote error");
goto out;
}
if (heap_verify_zone(zone_buff)) {
goto out;
}
}
Free(zone_buff);
return 0;
out:
Free(zone_buff);
return -1;
}
/*
* heap_zone_foreach_object -- (internal) iterates through objects in a zone
*/
static int
heap_zone_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block *m)
{
struct zone *zone = ZID_TO_ZONE(heap->layout, m->zone_id);
if (zone->header.magic == 0)
return 0;
for (; m->chunk_id < zone->header.size_idx; ) {
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
memblock_rebuild_state(heap, m);
m->size_idx = hdr->size_idx;
if (m->m_ops->iterate_used(m, cb, arg) != 0)
return 1;
m->chunk_id += m->size_idx;
m->block_off = 0;
}
return 0;
}
/*
* heap_foreach_object -- (internal) iterates through objects in the heap
*/
void
heap_foreach_object(struct palloc_heap *heap, object_callback cb, void *arg,
struct memory_block m)
{
for (; m.zone_id < heap->rt->nzones; ++m.zone_id) {
if (heap_zone_foreach_object(heap, cb, arg, &m) != 0)
break;
m.chunk_id = 0;
}
}
#if VG_MEMCHECK_ENABLED
/*
* heap_vg_open -- notifies Valgrind about heap layout
*/
void
heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects)
{
ASSERTne(cb, NULL);
VALGRIND_DO_MAKE_MEM_UNDEFINED(heap->layout, *heap->sizep);
struct heap_layout *layout = heap->layout;
VALGRIND_DO_MAKE_MEM_DEFINED(&layout->header, sizeof(layout->header));
unsigned zones = heap_max_zone(*heap->sizep);
struct memory_block m = MEMORY_BLOCK_NONE;
for (unsigned i = 0; i < zones; ++i) {
struct zone *z = ZID_TO_ZONE(layout, i);
uint32_t chunks;
m.zone_id = i;
m.chunk_id = 0;
VALGRIND_DO_MAKE_MEM_DEFINED(&z->header, sizeof(z->header));
if (z->header.magic != ZONE_HEADER_MAGIC)
continue;
chunks = z->header.size_idx;
for (uint32_t c = 0; c < chunks; ) {
struct chunk_header *hdr = &z->chunk_headers[c];
/* define the header before rebuilding state */
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
m.chunk_id = c;
m.size_idx = hdr->size_idx;
memblock_rebuild_state(heap, &m);
m.m_ops->vg_init(&m, objects, cb, arg);
m.block_off = 0;
ASSERT(hdr->size_idx > 0);
c += hdr->size_idx;
}
/* mark all unused chunk headers after last as not accessible */
VALGRIND_DO_MAKE_MEM_NOACCESS(&z->chunk_headers[chunks],
(MAX_CHUNK - chunks) * sizeof(struct chunk_header));
}
}
#endif
| 43,800 | 22.126188 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/heap_layout.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* heap_layout.h -- internal definitions for heap layout
*/
#ifndef LIBPMEMOBJ_HEAP_LAYOUT_H
#define LIBPMEMOBJ_HEAP_LAYOUT_H 1
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_MAJOR 1
#define HEAP_MINOR 0
#define MAX_CHUNK (UINT16_MAX - 7) /* has to be multiple of 8 */
#define CHUNK_BASE_ALIGNMENT 1024
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define MAX_MEMORY_BLOCK_SIZE (MAX_CHUNK * CHUNKSIZE)
#define HEAP_SIGNATURE_LEN 16
#define HEAP_SIGNATURE "MEMORY_HEAP_HDR\0"
#define ZONE_HEADER_MAGIC 0xC3F0A2D2
#define ZONE_MIN_SIZE (sizeof(struct zone) + sizeof(struct chunk))
#define ZONE_MAX_SIZE (sizeof(struct zone) + sizeof(struct chunk) * MAX_CHUNK)
#define HEAP_MIN_SIZE (sizeof(struct heap_layout) + ZONE_MIN_SIZE)
/* Base bitmap values, relevant for both normal and flexible bitmaps */
#define RUN_BITS_PER_VALUE 64U
#define RUN_BASE_METADATA_VALUES\
((unsigned)(sizeof(struct chunk_run_header) / sizeof(uint64_t)))
#define RUN_BASE_METADATA_SIZE (sizeof(struct chunk_run_header))
#define RUN_CONTENT_SIZE (CHUNKSIZE - RUN_BASE_METADATA_SIZE)
/*
* Calculates the size in bytes of a single run instance, including bitmap
*/
#define RUN_CONTENT_SIZE_BYTES(size_idx)\
(RUN_CONTENT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
/* Default bitmap values, specific for old, non-flexible, bitmaps */
#define RUN_DEFAULT_METADATA_VALUES 40 /* in 8 byte words, 320 bytes total */
#define RUN_DEFAULT_BITMAP_VALUES \
(RUN_DEFAULT_METADATA_VALUES - RUN_BASE_METADATA_VALUES)
#define RUN_DEFAULT_BITMAP_SIZE (sizeof(uint64_t) * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_BITMAP_NBITS\
(RUN_BITS_PER_VALUE * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_SIZE \
(CHUNKSIZE - RUN_BASE_METADATA_SIZE - RUN_DEFAULT_BITMAP_SIZE)
/*
* Calculates the size in bytes of a single run instance, without bitmap,
* but only for the default fixed-bitmap algorithm
*/
#define RUN_DEFAULT_SIZE_BYTES(size_idx)\
(RUN_DEFAULT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
#define CHUNK_MASK ((CHUNKSIZE) - 1)
#define CHUNK_ALIGN_UP(value) ((((value) + CHUNK_MASK) & ~CHUNK_MASK))
enum chunk_flags {
CHUNK_FLAG_COMPACT_HEADER = 0x0001,
CHUNK_FLAG_HEADER_NONE = 0x0002,
CHUNK_FLAG_ALIGNED = 0x0004,
CHUNK_FLAG_FLEX_BITMAP = 0x0008,
};
#define CHUNK_FLAGS_ALL_VALID (\
CHUNK_FLAG_COMPACT_HEADER |\
CHUNK_FLAG_HEADER_NONE |\
CHUNK_FLAG_ALIGNED |\
CHUNK_FLAG_FLEX_BITMAP\
)
enum chunk_type {
CHUNK_TYPE_UNKNOWN,
CHUNK_TYPE_FOOTER, /* not actual chunk type */
CHUNK_TYPE_FREE,
CHUNK_TYPE_USED,
CHUNK_TYPE_RUN,
CHUNK_TYPE_RUN_DATA,
MAX_CHUNK_TYPE
};
struct chunk {
uint8_t data[CHUNKSIZE];
};
struct chunk_run_header {
uint64_t block_size;
uint64_t alignment; /* valid only /w CHUNK_FLAG_ALIGNED */
};
struct chunk_run {
struct chunk_run_header hdr;
uint8_t content[RUN_CONTENT_SIZE]; /* bitmap + data */
};
struct chunk_header {
uint16_t type;
uint16_t flags;
uint32_t size_idx;
};
struct zone_header {
uint32_t magic;
uint32_t size_idx;
uint8_t reserved[56];
};
struct zone {
struct zone_header header;
struct chunk_header chunk_headers[MAX_CHUNK];
struct chunk chunks[];
};
struct heap_header {
char signature[HEAP_SIGNATURE_LEN];
uint64_t major;
uint64_t minor;
uint64_t unused; /* might be garbage */
uint64_t chunksize;
uint64_t chunks_per_zone;
uint8_t reserved[960];
uint64_t checksum;
};
struct heap_layout {
struct heap_header header;
struct zone zone0; /* first element of zones array */
};
#define ALLOC_HDR_SIZE_SHIFT (48ULL)
#define ALLOC_HDR_FLAGS_MASK (((1ULL) << ALLOC_HDR_SIZE_SHIFT) - 1)
struct allocation_header_legacy {
uint8_t unused[8];
uint64_t size;
uint8_t unused2[32];
uint64_t root_size;
uint64_t type_num;
};
#define ALLOC_HDR_COMPACT_SIZE sizeof(struct allocation_header_compact)
struct allocation_header_compact {
uint64_t size;
uint64_t extra;
};
enum header_type {
HEADER_LEGACY,
HEADER_COMPACT,
HEADER_NONE,
MAX_HEADER_TYPES
};
static const size_t header_type_to_size[MAX_HEADER_TYPES] = {
sizeof(struct allocation_header_legacy),
sizeof(struct allocation_header_compact),
0
};
static const enum chunk_flags header_type_to_flag[MAX_HEADER_TYPES] = {
(enum chunk_flags)0,
CHUNK_FLAG_COMPACT_HEADER,
CHUNK_FLAG_HEADER_NONE
};
static inline struct zone *
ZID_TO_ZONE(struct heap_layout *layout, size_t zone_id)
{
return (struct zone *)
((uintptr_t)&layout->zone0 + ZONE_MAX_SIZE * zone_id);
}
static inline struct chunk_header *
GET_CHUNK_HDR(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunk_headers[chunk_id];
}
static inline struct chunk *
GET_CHUNK(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunks[chunk_id];
}
static inline struct chunk_run *
GET_CHUNK_RUN(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return (struct chunk_run *)GET_CHUNK(layout, zone_id, chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 5,105 | 23.666667 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/alloc_class.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* alloc_class.h -- internal definitions for allocation classes
*/
#ifndef LIBPMEMOBJ_ALLOC_CLASS_H
#define LIBPMEMOBJ_ALLOC_CLASS_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "heap_layout.h"
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_ALLOCATION_CLASSES (UINT8_MAX)
#define DEFAULT_ALLOC_CLASS_ID (0)
#define RUN_UNIT_MAX RUN_BITS_PER_VALUE
struct alloc_class_collection;
enum alloc_class_type {
CLASS_UNKNOWN,
CLASS_HUGE,
CLASS_RUN,
MAX_ALLOC_CLASS_TYPES
};
struct alloc_class {
uint8_t id;
uint16_t flags;
size_t unit_size;
enum header_type header_type;
enum alloc_class_type type;
/* run-specific data */
struct run_descriptor rdsc;
};
struct alloc_class_collection *alloc_class_collection_new(void);
void alloc_class_collection_delete(struct alloc_class_collection *ac);
struct alloc_class *alloc_class_by_run(
struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx);
struct alloc_class *alloc_class_by_alloc_size(
struct alloc_class_collection *ac, size_t size);
struct alloc_class *alloc_class_by_id(
struct alloc_class_collection *ac, uint8_t id);
int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id);
int alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot);
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size);
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx);
void alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c);
#ifdef __cplusplus
}
#endif
#endif
| 1,815 | 21.7 | 71 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/recycler.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* recycler.c -- implementation of run recycler
*/
#include "heap.h"
#include "recycler.h"
#include "vec.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
#include "ravl.h"
#include "valgrind_internal.h"
#define THRESHOLD_MUL 4
/*
* recycler_element_cmp -- compares two recycler elements
*/
static int
recycler_element_cmp(const void *lhs, const void *rhs)
{
const struct recycler_element *l = lhs;
const struct recycler_element *r = rhs;
int64_t diff = (int64_t)l->max_free_block - (int64_t)r->max_free_block;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->free_space - (int64_t)r->free_space;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
struct recycler {
struct ravl *runs;
struct palloc_heap *heap;
/*
* How many unaccounted units there *might* be inside of the memory
* blocks stored in the recycler.
* The value is not meant to be accurate, but rather a rough measure on
* how often should the memory block scores be recalculated.
*
* Per-chunk unaccounted units are shared for all zones, which might
* lead to some unnecessary recalculations.
*/
size_t unaccounted_units[MAX_CHUNK];
size_t unaccounted_total;
size_t nallocs;
size_t *peak_arenas;
VEC(, struct recycler_element) recalc;
os_mutex_t lock;
};
/*
* recycler_new -- creates new recycler instance
*/
struct recycler *
recycler_new(struct palloc_heap *heap, size_t nallocs, size_t *peak_arenas)
{
struct recycler *r = Malloc(sizeof(struct recycler));
if (r == NULL)
goto error_alloc_recycler;
r->runs = ravl_new_sized(recycler_element_cmp,
sizeof(struct recycler_element));
if (r->runs == NULL)
goto error_alloc_tree;
r->heap = heap;
r->nallocs = nallocs;
r->peak_arenas = peak_arenas;
r->unaccounted_total = 0;
memset(&r->unaccounted_units, 0, sizeof(r->unaccounted_units));
VEC_INIT(&r->recalc);
util_mutex_init(&r->lock);
return r;
error_alloc_tree:
Free(r);
error_alloc_recycler:
return NULL;
}
/*
* recycler_delete -- deletes recycler instance
*/
void
recycler_delete(struct recycler *r)
{
VEC_DELETE(&r->recalc);
util_mutex_destroy(&r->lock);
ravl_delete(r->runs);
Free(r);
}
/*
* recycler_element_new -- calculates how many free bytes does a run have and
* what's the largest request that the run can handle, returns that as
* recycler element struct
*/
struct recycler_element
recycler_element_new(struct palloc_heap *heap, const struct memory_block *m)
{
/*
* Counting of the clear bits can race with a concurrent deallocation
* that operates on the same run. This race is benign and has absolutely
* no effect on the correctness of this algorithm. Ideally, we would
* avoid grabbing the lock, but helgrind gets very confused if we
* try to disable reporting for this function.
*/
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
struct recycler_element e = {
.free_space = 0,
.max_free_block = 0,
.chunk_id = m->chunk_id,
.zone_id = m->zone_id,
};
m->m_ops->calc_free(m, &e.free_space, &e.max_free_block);
util_mutex_unlock(lock);
return e;
}
/*
* recycler_put -- inserts new run into the recycler
*/
int
recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element)
{
int ret = 0;
util_mutex_lock(&r->lock);
ret = ravl_emplace_copy(r->runs, &element);
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_get -- retrieves a chunk from the recycler
*/
int
recycler_get(struct recycler *r, struct memory_block *m)
{
int ret = 0;
util_mutex_lock(&r->lock);
struct recycler_element e = { .max_free_block = m->size_idx, 0, 0, 0};
struct ravl_node *n = ravl_find(r->runs, &e,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL) {
ret = ENOMEM;
goto out;
}
struct recycler_element *ne = ravl_data(n);
m->chunk_id = ne->chunk_id;
m->zone_id = ne->zone_id;
ravl_remove(r->runs, n);
struct chunk_header *hdr = heap_get_chunk_hdr(r->heap, m);
m->size_idx = hdr->size_idx;
memblock_rebuild_state(r->heap, m);
out:
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_recalc -- recalculates the scores of runs in the recycler to match
* the updated persistent state
*/
struct empty_runs
recycler_recalc(struct recycler *r, int force)
{
struct empty_runs runs;
VEC_INIT(&runs);
uint64_t units = r->unaccounted_total;
size_t peak_arenas;
util_atomic_load64(r->peak_arenas, &peak_arenas);
uint64_t recalc_threshold =
THRESHOLD_MUL * peak_arenas * r->nallocs;
if (!force && units < recalc_threshold)
return runs;
if (util_mutex_trylock(&r->lock) != 0)
return runs;
/* If the search is forced, recalculate everything */
uint64_t search_limit = force ? UINT64_MAX : units;
uint64_t found_units = 0;
struct memory_block nm = MEMORY_BLOCK_NONE;
struct ravl_node *n;
struct recycler_element next = {0, 0, 0, 0};
enum ravl_predicate p = RAVL_PREDICATE_GREATER_EQUAL;
do {
if ((n = ravl_find(r->runs, &next, p)) == NULL)
break;
p = RAVL_PREDICATE_GREATER;
struct recycler_element *ne = ravl_data(n);
next = *ne;
uint64_t chunk_units = r->unaccounted_units[ne->chunk_id];
if (!force && chunk_units == 0)
continue;
uint32_t existing_free_space = ne->free_space;
nm.chunk_id = ne->chunk_id;
nm.zone_id = ne->zone_id;
memblock_rebuild_state(r->heap, &nm);
struct recycler_element e = recycler_element_new(r->heap, &nm);
ASSERT(e.free_space >= existing_free_space);
uint64_t free_space_diff = e.free_space - existing_free_space;
found_units += free_space_diff;
if (free_space_diff == 0)
continue;
/*
* Decrease the per chunk_id counter by the number of nallocs
* found, increased by the blocks potentially freed in the
* active memory block. Cap the sub value to prevent overflow.
*/
util_fetch_and_sub64(&r->unaccounted_units[nm.chunk_id],
MIN(chunk_units, free_space_diff + r->nallocs));
ravl_remove(r->runs, n);
if (e.free_space == r->nallocs) {
memblock_rebuild_state(r->heap, &nm);
if (VEC_PUSH_BACK(&runs, nm) != 0)
ASSERT(0); /* XXX: fix after refactoring */
} else {
VEC_PUSH_BACK(&r->recalc, e);
}
} while (found_units < search_limit);
struct recycler_element *e;
VEC_FOREACH_BY_PTR(e, &r->recalc) {
ravl_emplace_copy(r->runs, e);
}
VEC_CLEAR(&r->recalc);
util_mutex_unlock(&r->lock);
util_fetch_and_sub64(&r->unaccounted_total, units);
return runs;
}
/*
* recycler_inc_unaccounted -- increases the number of unaccounted units in the
* recycler
*/
void
recycler_inc_unaccounted(struct recycler *r, const struct memory_block *m)
{
util_fetch_and_add64(&r->unaccounted_total, m->size_idx);
util_fetch_and_add64(&r->unaccounted_units[m->chunk_id],
m->size_idx);
}
| 6,997 | 22.019737 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/ctl_debug.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* ctl_debug.c -- implementation of the debug CTL namespace
*/
#include "ctl.h"
#include "ctl_debug.h"
#include "obj.h"
/*
* CTL_WRITE_HANDLER(alloc_pattern) -- sets the alloc_pattern field in heap
*/
static int
CTL_WRITE_HANDLER(alloc_pattern)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->heap.alloc_pattern = arg_in;
return 0;
}
/*
* CTL_READ_HANDLER(alloc_pattern) -- returns alloc_pattern heap field
*/
static int
CTL_READ_HANDLER(alloc_pattern)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->heap.alloc_pattern;
return 0;
}
static const struct ctl_argument CTL_ARG(alloc_pattern) = CTL_ARG_LONG_LONG;
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_LEAF_RW(alloc_pattern),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(debug)[] = {
CTL_CHILD(heap),
CTL_NODE_END
};
/*
* debug_ctl_register -- registers ctl nodes for "debug" module
*/
void
debug_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, debug);
}
| 1,249 | 19.16129 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/alloc_class.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* alloc_class.c -- implementation of allocation classes
*/
#include <float.h>
#include <string.h>
#include "alloc_class.h"
#include "heap_layout.h"
#include "util.h"
#include "out.h"
#include "bucket.h"
#include "critnib.h"
#define RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s)\
((uint64_t)(map_idx_s) << 32 |\
(uint64_t)(flags_s) << 16 |\
(uint64_t)(size_idx_s))
/*
* Value used to mark a reserved spot in the bucket array.
*/
#define ACLASS_RESERVED ((void *)0xFFFFFFFFULL)
/*
* The last size that is handled by runs.
*/
#define MAX_RUN_SIZE (CHUNKSIZE * 10)
/*
* Maximum number of bytes the allocation class generation algorithm can decide
* to waste in a single run chunk.
*/
#define MAX_RUN_WASTED_BYTES 1024
/*
* Allocation categories are used for allocation classes generation. Each one
* defines the biggest handled size (in bytes) and step pct of the generation
* process. The step percentage defines maximum allowed external fragmentation
* for the category.
*/
#define MAX_ALLOC_CATEGORIES 9
/*
* The first size (in byes) which is actually used in the allocation
* class generation algorithm. All smaller sizes use the first predefined bucket
* with the smallest run unit size.
*/
#define FIRST_GENERATED_CLASS_SIZE 128
/*
* The granularity of the allocation class generation algorithm.
*/
#define ALLOC_BLOCK_SIZE_GEN 64
/*
* The first predefined allocation class size
*/
#define MIN_UNIT_SIZE 128
static const struct {
size_t size;
float step;
} categories[MAX_ALLOC_CATEGORIES] = {
/* dummy category - the first allocation class is predefined */
{FIRST_GENERATED_CLASS_SIZE, 0.05f},
{1024, 0.05f},
{2048, 0.05f},
{4096, 0.05f},
{8192, 0.05f},
{16384, 0.05f},
{32768, 0.05f},
{131072, 0.05f},
{393216, 0.05f},
};
#define RUN_UNIT_MAX_ALLOC 8U
/*
* Every allocation has to be a multiple of at least 8 because we need to
* ensure proper alignment of every pmem structure.
*/
#define ALLOC_BLOCK_SIZE 16
/*
* Converts size (in bytes) to number of allocation blocks.
*/
#define SIZE_TO_CLASS_MAP_INDEX(_s, _g) (1 + (((_s) - 1) / (_g)))
/*
* Target number of allocations per run instance.
*/
#define RUN_MIN_NALLOCS 200
/*
* Hard limit of chunks per single run.
*/
#define RUN_SIZE_IDX_CAP (16)
#define ALLOC_CLASS_DEFAULT_FLAGS CHUNK_FLAG_FLEX_BITMAP
struct alloc_class_collection {
size_t granularity;
struct alloc_class *aclasses[MAX_ALLOCATION_CLASSES];
/*
* The last size (in bytes) that is handled by runs, everything bigger
* uses the default class.
*/
size_t last_run_max_size;
/* maps allocation classes to allocation sizes, excluding the header! */
uint8_t *class_map_by_alloc_size;
/* maps allocation classes to run unit sizes */
struct critnib *class_map_by_unit_size;
int fail_on_missing_class;
int autogenerate_on_missing_class;
};
/*
* alloc_class_find_first_free_slot -- searches for the
* first available allocation class slot
*
* This function must be thread-safe because allocation classes can be created
* at runtime.
*/
int
alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot)
{
LOG(10, NULL);
for (int n = 0; n < MAX_ALLOCATION_CLASSES; ++n) {
if (util_bool_compare_and_swap64(&ac->aclasses[n],
NULL, ACLASS_RESERVED)) {
*slot = (uint8_t)n;
return 0;
}
}
return -1;
}
/*
* alloc_class_reserve -- reserve the specified class id
*/
int
alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id)
{
LOG(10, NULL);
return util_bool_compare_and_swap64(&ac->aclasses[id],
NULL, ACLASS_RESERVED) ? 0 : -1;
}
/*
* alloc_class_reservation_clear -- removes the reservation on class id
*/
static void
alloc_class_reservation_clear(struct alloc_class_collection *ac, int id)
{
LOG(10, NULL);
int ret = util_bool_compare_and_swap64(&ac->aclasses[id],
ACLASS_RESERVED, NULL);
ASSERT(ret);
}
/*
* alloc_class_new -- creates a new allocation class
*/
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx)
{
LOG(10, NULL);
struct alloc_class *c = Malloc(sizeof(*c));
if (c == NULL)
goto error_class_alloc;
c->unit_size = unit_size;
c->header_type = htype;
c->type = type;
c->flags = (uint16_t)
(header_type_to_flag[c->header_type] |
(alignment ? CHUNK_FLAG_ALIGNED : 0)) |
ALLOC_CLASS_DEFAULT_FLAGS;
switch (type) {
case CLASS_HUGE:
id = DEFAULT_ALLOC_CLASS_ID;
break;
case CLASS_RUN:
c->rdsc.alignment = alignment;
memblock_run_bitmap(&size_idx, c->flags, unit_size,
alignment, NULL, &c->rdsc.bitmap);
c->rdsc.nallocs = c->rdsc.bitmap.nbits;
c->rdsc.size_idx = size_idx;
/* these two fields are duplicated from class */
c->rdsc.unit_size = c->unit_size;
c->rdsc.flags = c->flags;
uint8_t slot = (uint8_t)id;
if (id < 0 && alloc_class_find_first_free_slot(ac,
&slot) != 0)
goto error_class_alloc;
id = slot;
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(c->unit_size,
ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)c->flags;
uint64_t k = RUN_CLASS_KEY_PACK(map_idx_s,
flags_s, size_idx_s);
if (critnib_insert(ac->class_map_by_unit_size,
k, c) != 0) {
ERR("unable to register allocation class");
goto error_map_insert;
}
break;
default:
ASSERT(0);
}
c->id = (uint8_t)id;
ac->aclasses[c->id] = c;
return c;
error_map_insert:
Free(c);
error_class_alloc:
if (id >= 0)
alloc_class_reservation_clear(ac, id);
return NULL;
}
/*
* alloc_class_delete -- (internal) deletes an allocation class
*/
void
alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c)
{
LOG(10, NULL);
ac->aclasses[c->id] = NULL;
Free(c);
}
/*
* alloc_class_find_or_create -- (internal) searches for the
* biggest allocation class for which unit_size is evenly divisible by n.
* If no such class exists, create one.
*/
static struct alloc_class *
alloc_class_find_or_create(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
COMPILE_ERROR_ON(MAX_ALLOCATION_CLASSES > UINT8_MAX);
uint64_t required_size_bytes = n * RUN_MIN_NALLOCS;
uint32_t required_size_idx = 1;
if (required_size_bytes > RUN_DEFAULT_SIZE) {
required_size_bytes -= RUN_DEFAULT_SIZE;
required_size_idx +=
CALC_SIZE_IDX(CHUNKSIZE, required_size_bytes);
if (required_size_idx > RUN_SIZE_IDX_CAP)
required_size_idx = RUN_SIZE_IDX_CAP;
}
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE ||
c->rdsc.size_idx < required_size_idx)
continue;
if (n % c->unit_size == 0 &&
n / c->unit_size <= RUN_UNIT_MAX_ALLOC)
return c;
}
/*
* In order to minimize the wasted space at the end of the run the
* run data size must be divisible by the allocation class unit size
* with the smallest possible remainder, preferably 0.
*/
struct run_bitmap b;
size_t runsize_bytes = 0;
do {
if (runsize_bytes != 0) /* don't increase on first iteration */
n += ALLOC_BLOCK_SIZE_GEN;
uint32_t size_idx = required_size_idx;
memblock_run_bitmap(&size_idx, ALLOC_CLASS_DEFAULT_FLAGS, n, 0,
NULL, &b);
runsize_bytes = RUN_CONTENT_SIZE_BYTES(size_idx) - b.size;
} while ((runsize_bytes % n) > MAX_RUN_WASTED_BYTES);
/*
* Now that the desired unit size is found the existing classes need
* to be searched for possible duplicates. If a class that can handle
* the calculated size already exists, simply return that.
*/
for (int i = 1; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE)
continue;
if (n / c->unit_size <= RUN_UNIT_MAX_ALLOC &&
n % c->unit_size == 0)
return c;
if (c->unit_size == n)
return c;
}
return alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, n, 0,
required_size_idx);
}
/*
* alloc_class_find_min_frag -- searches for an existing allocation
* class that will provide the smallest internal fragmentation for the given
* size.
*/
static struct alloc_class *
alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
struct alloc_class *best_c = NULL;
size_t lowest_waste = SIZE_MAX;
ASSERTne(n, 0);
/*
* Start from the largest buckets in order to minimize unit size of
* allocated memory blocks.
*/
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
/* can't use alloc classes /w no headers by default */
if (c == NULL || c->header_type == HEADER_NONE)
continue;
size_t real_size = n + header_type_to_size[c->header_type];
size_t units = CALC_SIZE_IDX(c->unit_size, real_size);
/* can't exceed the maximum allowed run unit max */
if (c->type == CLASS_RUN && units > RUN_UNIT_MAX_ALLOC)
continue;
if (c->unit_size * units == real_size)
return c;
size_t waste = (c->unit_size * units) - real_size;
/*
* If we assume that the allocation class is only ever going to
* be used with exactly one size, the effective internal
* fragmentation would be increased by the leftover
* memory at the end of the run.
*/
if (c->type == CLASS_RUN) {
size_t wasted_units = c->rdsc.nallocs % units;
size_t wasted_bytes = wasted_units * c->unit_size;
size_t waste_avg_per_unit = wasted_bytes /
c->rdsc.nallocs;
waste += waste_avg_per_unit;
}
if (best_c == NULL || lowest_waste > waste) {
best_c = c;
lowest_waste = waste;
}
}
ASSERTne(best_c, NULL);
return best_c;
}
/*
* alloc_class_collection_new -- creates a new collection of allocation classes
*/
struct alloc_class_collection *
alloc_class_collection_new()
{
LOG(10, NULL);
struct alloc_class_collection *ac = Zalloc(sizeof(*ac));
if (ac == NULL)
return NULL;
ac->granularity = ALLOC_BLOCK_SIZE;
ac->last_run_max_size = MAX_RUN_SIZE;
ac->fail_on_missing_class = 0;
ac->autogenerate_on_missing_class = 1;
size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1;
if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL)
goto error;
if ((ac->class_map_by_unit_size = critnib_new()) == NULL)
goto error;
memset(ac->class_map_by_alloc_size, 0xFF, maps_size);
if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT,
CHUNKSIZE, 0, 1) == NULL)
goto error;
struct alloc_class *predefined_class =
alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT,
MIN_UNIT_SIZE, 0, 1);
if (predefined_class == NULL)
goto error;
for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity;
++i) {
ac->class_map_by_alloc_size[i] = predefined_class->id;
}
/*
* Based on the defined categories, a set of allocation classes is
* created. The unit size of those classes is depended on the category
* initial size and step.
*/
size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1;
for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) {
size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN;
do {
if (alloc_class_find_or_create(ac, n) == NULL)
goto error;
float stepf = (float)n * categories[c].step;
size_t stepi = (size_t)stepf;
stepi = (stepf - (float)stepi < FLT_EPSILON) ?
stepi : stepi + 1;
n += (stepi + (granularity_mask)) & ~granularity_mask;
} while (n <= categories[c].size);
}
/*
* Find the largest alloc class and use it's unit size as run allocation
* threshold.
*/
uint8_t largest_aclass_slot;
for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1;
largest_aclass_slot > 0 &&
ac->aclasses[largest_aclass_slot] == NULL;
--largest_aclass_slot) {
/* intentional NOP */
}
struct alloc_class *c = ac->aclasses[largest_aclass_slot];
/*
* The actual run might contain less unit blocks than the theoretical
* unit max variable. This may be the case for very large unit sizes.
*/
size_t real_unit_max = c->rdsc.nallocs < RUN_UNIT_MAX_ALLOC ?
c->rdsc.nallocs : RUN_UNIT_MAX_ALLOC;
size_t theoretical_run_max_size = c->unit_size * real_unit_max;
ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ?
theoretical_run_max_size : MAX_RUN_SIZE;
#ifdef DEBUG
/*
* Verify that each bucket's unit size points back to the bucket by the
* bucket map. This must be true for the default allocation classes,
* otherwise duplicate buckets will be created.
*/
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL && c->type == CLASS_RUN) {
ASSERTeq(i, c->id);
ASSERTeq(alloc_class_by_run(ac, c->unit_size,
c->flags, c->rdsc.size_idx), c);
}
}
#endif
return ac;
error:
alloc_class_collection_delete(ac);
return NULL;
}
/*
* alloc_class_collection_delete -- deletes the allocation class collection and
* all of the classes within it
*/
void
alloc_class_collection_delete(struct alloc_class_collection *ac)
{
LOG(10, NULL);
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL) {
alloc_class_delete(ac, c);
}
}
if (ac->class_map_by_unit_size)
critnib_delete(ac->class_map_by_unit_size);
Free(ac->class_map_by_alloc_size);
Free(ac);
}
/*
* alloc_class_assign_by_size -- (internal) chooses the allocation class that
* best approximates the provided size
*/
static struct alloc_class *
alloc_class_assign_by_size(struct alloc_class_collection *ac,
size_t size)
{
LOG(10, NULL);
size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size,
ac->granularity);
struct alloc_class *c = alloc_class_find_min_frag(ac,
class_map_index * ac->granularity);
ASSERTne(c, NULL);
/*
* We don't lock this array because locking this section here and then
* bailing out if someone else was faster would be still slower than
* just calculating the class and failing to assign the variable.
* We are using a compare and swap so that helgrind/drd don't complain.
*/
util_bool_compare_and_swap64(
&ac->class_map_by_alloc_size[class_map_index],
MAX_ALLOCATION_CLASSES, c->id);
return c;
}
/*
* alloc_class_by_alloc_size -- returns allocation class that is assigned
* to handle an allocation of the provided size
*/
struct alloc_class *
alloc_class_by_alloc_size(struct alloc_class_collection *ac, size_t size)
{
if (size < ac->last_run_max_size) {
uint8_t class_id = ac->class_map_by_alloc_size[
SIZE_TO_CLASS_MAP_INDEX(size, ac->granularity)];
if (class_id == MAX_ALLOCATION_CLASSES) {
if (ac->fail_on_missing_class)
return NULL;
else if (ac->autogenerate_on_missing_class)
return alloc_class_assign_by_size(ac, size);
else
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
return ac->aclasses[class_id];
} else {
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
}
/*
* alloc_class_by_run -- returns the allocation class that has the given
* unit size
*/
struct alloc_class *
alloc_class_by_run(struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx)
{
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(unit_size, ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
ASSERT(size_idx <= UINT16_MAX);
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)flags;
return critnib_get(ac->class_map_by_unit_size,
RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s));
}
/*
* alloc_class_by_id -- returns the allocation class with an id
*/
struct alloc_class *
alloc_class_by_id(struct alloc_class_collection *ac, uint8_t id)
{
return ac->aclasses[id];
}
/*
* alloc_class_calc_size_idx -- calculates how many units does the size require
*/
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size)
{
uint32_t size_idx = CALC_SIZE_IDX(c->unit_size,
size + header_type_to_size[c->header_type]);
if (c->type == CLASS_RUN) {
if (c->header_type == HEADER_NONE && size_idx != 1)
return -1;
else if (size_idx > RUN_UNIT_MAX)
return -1;
else if (size_idx > c->rdsc.nallocs)
return -1;
}
return size_idx;
}
| 16,240 | 24.496075 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container_seglists.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* container_seglists.h -- internal definitions for
* segregated lists block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_SEGLISTS_H
#define LIBPMEMOBJ_CONTAINER_SEGLISTS_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_seglists(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_SEGLISTS_H */
| 479 | 18.2 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/obj.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* obj.h -- internal definitions for obj module
*/
#ifndef LIBPMEMOBJ_OBJ_H
#define LIBPMEMOBJ_OBJ_H 1
#include <stddef.h>
#include <stdint.h>
#include "lane.h"
#include "pool_hdr.h"
#include "pmalloc.h"
#include "ctl.h"
#include "sync.h"
#include "stats.h"
#include "ctl_debug.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMOBJ_LOG_PREFIX "libpmemobj"
#define PMEMOBJ_LOG_LEVEL_VAR "PMEMOBJ_LOG_LEVEL"
#define PMEMOBJ_LOG_FILE_VAR "PMEMOBJ_LOG_FILE"
/* attributes of the obj memory pool format for the pool header */
#define OBJ_HDR_SIG "PMEMOBJ" /* must be 8 bytes including '\0' */
#define OBJ_FORMAT_MAJOR 6
#define OBJ_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define OBJ_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t obj_format_feat_default = OBJ_FORMAT_FEAT_CHECK;
/* size of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_UNUSED (OBJ_DSC_P_SIZE - PMEMOBJ_MAX_LAYOUT - 40)
#define OBJ_LANES_OFFSET (sizeof(struct pmemobjpool)) /* lanes offset */
#define OBJ_NLANES 1024 /* number of lanes */
#define OBJ_OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off)))
#define OBJ_PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop))
#define OBJ_OID_IS_NULL(oid) ((oid).off == 0)
#define OBJ_LIST_EMPTY(head) OBJ_OID_IS_NULL((head)->pe_first)
#define OBJ_OFF_FROM_HEAP(pop, off)\
((off) >= (pop)->heap_offset &&\
(off) < (pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_FROM_LANES(pop, off)\
((off) >= (pop)->lanes_offset &&\
(off) < (pop)->lanes_offset +\
(pop)->nlanes * sizeof(struct lane_layout))
#define OBJ_PTR_FROM_POOL(pop, ptr)\
((uintptr_t)(ptr) >= (uintptr_t)(pop) &&\
(uintptr_t)(ptr) < (uintptr_t)(pop) +\
(pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_IS_VALID(pop, off)\
(OBJ_OFF_FROM_HEAP(pop, off) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_offset) == (off)) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_size) == (off)) ||\
(OBJ_OFF_FROM_LANES(pop, off)))
#define OBJ_PTR_IS_VALID(pop, ptr)\
OBJ_OFF_IS_VALID(pop, OBJ_PTR_TO_OFF(pop, ptr))
typedef void (*persist_local_fn)(const void *, size_t);
typedef void (*flush_local_fn)(const void *, size_t);
typedef void (*drain_local_fn)(void);
typedef void *(*memcpy_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_local_fn)(void *dest, int c, size_t len, unsigned flags);
typedef int (*persist_remote_fn)(PMEMobjpool *pop, const void *addr,
size_t len, unsigned lane, unsigned flags);
typedef uint64_t type_num_t;
#define CONVERSION_FLAG_OLD_SET_CACHE ((1ULL) << 0)
/* PMEM_OBJ_POOL_HEAD_SIZE Without the unused and unused2 arrays */
#define PMEM_OBJ_POOL_HEAD_SIZE 2196
#define PMEM_OBJ_POOL_UNUSED2_SIZE (PMEM_PAGESIZE \
- OBJ_DSC_P_UNUSED\
- PMEM_OBJ_POOL_HEAD_SIZE)
/*
//NEW
//#define _GNU_SOURCE
//#include <sys/types.h>
//#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
//int __real_open(const char *__path, int __oflag);
//int __wrap_open(const char *__path, int __oflag);
void* open_device(void);
//END NEW
*/
struct pmemobjpool {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMOBJ pool descriptor (2kB) */
char layout[PMEMOBJ_MAX_LAYOUT];
uint64_t lanes_offset;
uint64_t nlanes;
uint64_t heap_offset;
uint64_t unused3;
unsigned char unused[OBJ_DSC_P_UNUSED]; /* must be zero */
uint64_t checksum; /* checksum of above fields */
uint64_t root_offset;
/* unique runID for this program run - persistent but not checksummed */
uint64_t run_id;
uint64_t root_size;
/*
* These flags can be set from a conversion tool and are set only for
* the first recovery of the pool.
*/
uint64_t conversion_flags;
uint64_t heap_size;
struct stats_persistent stats_persistent;
char pmem_reserved[496]; /* must be zeroed */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
struct palloc_heap heap;
struct lane_descriptor lanes_desc;
uint64_t uuid_lo;
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct stats *stats;
struct pool_set *set; /* pool set info */
struct pmemobjpool *replica; /* next replica */
/* per-replica functions: pmem or non-pmem */
persist_local_fn persist_local; /* persist function */
flush_local_fn flush_local; /* flush function */
drain_local_fn drain_local; /* drain function */
memcpy_local_fn memcpy_local; /* persistent memcpy function */
memmove_local_fn memmove_local; /* persistent memmove function */
memset_local_fn memset_local; /* persistent memset function */
/* for 'master' replica: with or without data replication */
struct pmem_ops p_ops;
PMEMmutex rootlock; /* root object lock */
int is_master_replica;
int has_remote_replicas;
/* remote replica section */
void *rpp; /* RPMEMpool opaque handle if it is a remote replica */
uintptr_t remote_base; /* beginning of the remote pool */
char *node_addr; /* address of a remote node */
char *pool_desc; /* descriptor of a poolset */
persist_remote_fn persist_remote; /* remote persist function */
int vg_boot;
int tx_debug_skip_expensive_checks;
struct tx_parameters *tx_params;
/*
* Locks are dynamically allocated on FreeBSD. Keep track so
* we can free them on pmemobj_close.
*/
PMEMmutex_internal *mutex_head;
PMEMrwlock_internal *rwlock_head;
PMEMcond_internal *cond_head;
struct {
struct ravl *map;
os_mutex_t lock;
int verify;
} ulog_user_buffers;
void *user_data;
//New
//void *device;
/* padding to align size of this structure to page boundary */
/* sizeof(unused2) == 8192 - offsetof(struct pmemobjpool, unused2) */
char unused2[PMEM_OBJ_POOL_UNUSED2_SIZE -28 ];
};
/*
* Stored in the 'size' field of oobh header, determines whether the object
* is internal or not. Internal objects are skipped in pmemobj iteration
* functions.
*/
#define OBJ_INTERNAL_OBJECT_MASK ((1ULL) << 15)
#define CLASS_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 48))
#define ARENA_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 32))
/*
* pmemobj_get_uuid_lo -- (internal) evaluates XOR sum of least significant
* 8 bytes with most significant 8 bytes.
*/
static inline uint64_t
pmemobj_get_uuid_lo(PMEMobjpool *pop)
{
uint64_t uuid_lo = 0;
for (int i = 0; i < 8; i++) {
uuid_lo = (uuid_lo << 8) |
(pop->hdr.poolset_uuid[i] ^
pop->hdr.poolset_uuid[8 + i]);
}
return uuid_lo;
}
/*
* OBJ_OID_IS_VALID -- (internal) checks if 'oid' is valid
*/
static inline int
OBJ_OID_IS_VALID(PMEMobjpool *pop, PMEMoid oid)
{
return OBJ_OID_IS_NULL(oid) ||
(oid.pool_uuid_lo == pop->uuid_lo &&
oid.off >= pop->heap_offset &&
oid.off < pop->heap_offset + pop->heap_size);
}
static inline int
OBJ_OFF_IS_VALID_FROM_CTX(void *ctx, uint64_t offset)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
return OBJ_OFF_IS_VALID(pop, offset);
}
void obj_init(void);
void obj_fini(void);
int obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
/*
* (debug helper macro) logs notice message if used inside a transaction
*/
#ifdef DEBUG
#define _POBJ_DEBUG_NOTICE_IN_TX()\
_pobj_debug_notice(__func__, NULL, 0)
#else
#define _POBJ_DEBUG_NOTICE_IN_TX() do {} while (0)
#endif
#if FAULT_INJECTION
void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemobj_fault_injection_enabled(void);
#else
static inline void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemobj_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 8,196 | 25.441935 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/obj.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* obj.c -- transactional object store implementation
*/
#include <inttypes.h>
#include <limits.h>
#include <wchar.h>
#include <stdbool.h>
#include "valgrind_internal.h"
#include "libpmem.h"
#include "memblock.h"
#include "critnib.h"
#include "list.h"
#include "mmap.h"
#include "obj.h"
#include "ctl_global.h"
#include "ravl.h"
#include "heap_layout.h"
#include "os.h"
#include "os_thread.h"
#include "pmemops.h"
#include "set.h"
#include "sync.h"
#include "tx.h"
#include "sys_util.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define OBJ_CONFIG_ENV_VARIABLE "PMEMOBJ_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define OBJ_CONFIG_FILE_ENV_VARIABLE "PMEMOBJ_CONF_FILE"
/*
* The variable which overwrites a number of lanes available at runtime.
*/
#define OBJ_NLANES_ENV_VARIABLE "PMEMOBJ_NLANES"
#define OBJ_X_VALID_FLAGS PMEMOBJ_F_RELAXED
static const struct pool_attr Obj_create_attr = {
OBJ_HDR_SIG,
OBJ_FORMAT_MAJOR,
OBJ_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Obj_open_attr = {
OBJ_HDR_SIG,
OBJ_FORMAT_MAJOR,
OBJ_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
static struct critnib *pools_ht; /* hash table used for searching by UUID */
static struct critnib *pools_tree; /* tree used for searching by address */
int _pobj_cache_invalidate;
#ifndef _WIN32
__thread struct _pobj_pcache _pobj_cached_pool;
/*
* pmemobj_direct -- returns the direct pointer of an object
*/
void *
pmemobj_direct(PMEMoid oid)
{
return pmemobj_direct_inline(oid);
}
#else /* _WIN32 */
/*
* XXX - this is a temporary implementation
*
* Seems like we could still use TLS and simply substitute "__thread" with
* "__declspec(thread)", however it's not clear if it would work correctly
* with Windows DLL's.
* Need to verify that once we have the multi-threaded tests ported.
*/
struct _pobj_pcache {
PMEMobjpool *pop;
uint64_t uuid_lo;
int invalidate;
};
static os_once_t Cached_pool_key_once = OS_ONCE_INIT;
static os_tls_key_t Cached_pool_key;
/*
* _Cached_pool_key_alloc -- (internal) allocate pool cache pthread key
*/
static void
_Cached_pool_key_alloc(void)
{
int pth_ret = os_tls_key_create(&Cached_pool_key, free);
if (pth_ret)
FATAL("!os_tls_key_create");
}
/*
* pmemobj_direct -- returns the direct pointer of an object
*/
void *
pmemobj_direct(PMEMoid oid)
{
if (oid.off == 0 || oid.pool_uuid_lo == 0)
return NULL;
struct _pobj_pcache *pcache = os_tls_get(Cached_pool_key);
if (pcache == NULL) {
pcache = calloc(sizeof(struct _pobj_pcache), 1);
if (pcache == NULL)
FATAL("!pcache malloc");
int ret = os_tls_set(Cached_pool_key, pcache);
if (ret)
FATAL("!os_tls_set");
}
if (_pobj_cache_invalidate != pcache->invalidate ||
pcache->uuid_lo != oid.pool_uuid_lo) {
pcache->invalidate = _pobj_cache_invalidate;
if ((pcache->pop = pmemobj_pool_by_oid(oid)) == NULL) {
pcache->uuid_lo = 0;
return NULL;
}
pcache->uuid_lo = oid.pool_uuid_lo;
}
return (void *)((uintptr_t)pcache->pop + oid.off);
}
#endif /* _WIN32 */
/*
* obj_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
obj_ctl_init_and_load(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
if (pop != NULL && (pop->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
if (pop) {
tx_ctl_register(pop);
pmalloc_ctl_register(pop);
stats_ctl_register(pop);
debug_ctl_register(pop);
}
char *env_config = os_getenv(OBJ_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(pop ? pop->ctl : NULL,
pop, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
OBJ_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(OBJ_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(pop ? pop->ctl : NULL,
pop, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
OBJ_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (pop)
ctl_delete(pop->ctl);
return -1;
}
/*
* obj_pool_init -- (internal) allocate global structs holding all opened pools
*
* This is invoked on a first call to pmemobj_open() or pmemobj_create().
* Memory is released in library destructor.
*
* This function needs to be threadsafe.
*/
static void
obj_pool_init(void)
{
LOG(3, NULL);
struct critnib *c;
if (pools_ht == NULL) {
c = critnib_new();
if (c == NULL)
FATAL("!critnib_new for pools_ht");
if (!util_bool_compare_and_swap64(&pools_ht, NULL, c))
critnib_delete(c);
}
if (pools_tree == NULL) {
c = critnib_new();
if (c == NULL)
FATAL("!critnib_new for pools_tree");
if (!util_bool_compare_and_swap64(&pools_tree, NULL, c))
critnib_delete(c);
}
}
/*
* pmemobj_oid -- return a PMEMoid based on the virtual address
*
* If the address does not belong to any pool OID_NULL is returned.
*/
PMEMoid
pmemobj_oid(const void *addr)
{
PMEMobjpool *pop = pmemobj_pool_by_ptr(addr);
if (pop == NULL)
return OID_NULL;
PMEMoid oid = {pop->uuid_lo, (uintptr_t)addr - (uintptr_t)pop};
return oid;
}
/*
* obj_init -- initialization of obj
*
* Called by constructor.
*/
void
obj_init(void)
{
LOG(3, NULL);
COMPILE_ERROR_ON(sizeof(struct pmemobjpool) !=
POOL_HDR_SIZE + POOL_DESC_SIZE);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_NODRAIN != PMEM_F_MEM_NODRAIN);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_NONTEMPORAL != PMEM_F_MEM_NONTEMPORAL);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_TEMPORAL != PMEM_F_MEM_TEMPORAL);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_WC != PMEM_F_MEM_WC);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_WB != PMEM_F_MEM_WB);
COMPILE_ERROR_ON(PMEMOBJ_F_MEM_NOFLUSH != PMEM_F_MEM_NOFLUSH);
#ifdef _WIN32
/* XXX - temporary implementation (see above) */
os_once(&Cached_pool_key_once, _Cached_pool_key_alloc);
#endif
/*
* Load global config, ignore any issues. They will be caught on the
* subsequent call to this function for individual pools.
*/
ctl_global_register();
if (obj_ctl_init_and_load(NULL))
FATAL("error: %s", pmemobj_errormsg());
lane_info_boot();
util_remote_init();
}
/*
* obj_fini -- cleanup of obj
*
* Called by destructor.
*/
void
obj_fini(void)
{
LOG(3, NULL);
if (pools_ht)
critnib_delete(pools_ht);
if (pools_tree)
critnib_delete(pools_tree);
lane_info_destroy();
util_remote_fini();
#ifdef _WIN32
(void) os_tls_key_delete(Cached_pool_key);
#endif
}
/*
* obj_drain_empty -- (internal) empty function for drain on non-pmem memory
*/
static void
obj_drain_empty(void)
{
/* do nothing */
}
/*
* obj_msync_nofail -- (internal) pmem_msync wrapper that never fails from
* caller's perspective
*/
static void
obj_msync_nofail(const void *addr, size_t size)
{
if (pmem_msync(addr, size))
FATAL("!pmem_msync");
}
/*
* obj_nopmem_memcpy -- (internal) memcpy followed by an msync
*/
static void *
obj_nopmem_memcpy(void *dest, const void *src, size_t len, unsigned flags)
{
LOG(15, "dest %p src %p len %zu flags 0x%x", dest, src, len, flags);
/*
* Use pmem_memcpy instead of memcpy, because pmemobj_memcpy is supposed
* to guarantee that multiple of 8 byte stores to 8 byte aligned
* addresses are fail safe atomic. pmem_memcpy guarantees that, while
* libc memcpy does not.
*/
pmem_memcpy(dest, src, len, PMEM_F_MEM_NOFLUSH);
obj_msync_nofail(dest, len);
return dest;
}
/*
* obj_nopmem_memmove -- (internal) memmove followed by an msync
*/
static void *
obj_nopmem_memmove(void *dest, const void *src, size_t len, unsigned flags)
{
LOG(15, "dest %p src %p len %zu flags 0x%x", dest, src, len, flags);
/* see comment in obj_nopmem_memcpy */
pmem_memmove(dest, src, len, PMEM_F_MEM_NOFLUSH);
obj_msync_nofail(dest, len);
return dest;
}
/*
* obj_nopmem_memset -- (internal) memset followed by an msync
*/
static void *
obj_nopmem_memset(void *dest, int c, size_t len, unsigned flags)
{
LOG(15, "dest %p c 0x%02x len %zu flags 0x%x", dest, c, len, flags);
/* see comment in obj_nopmem_memcpy */
pmem_memset(dest, c, len, PMEM_F_MEM_NOFLUSH);
obj_msync_nofail(dest, len);
return dest;
}
/*
* obj_remote_persist -- (internal) remote persist function
*/
static int
obj_remote_persist(PMEMobjpool *pop, const void *addr, size_t len,
unsigned lane, unsigned flags)
{
LOG(15, "pop %p addr %p len %zu lane %u flags %u",
pop, addr, len, lane, flags);
ASSERTne(pop->rpp, NULL);
uintptr_t offset = (uintptr_t)addr - pop->remote_base;
unsigned rpmem_flags = 0;
if (flags & PMEMOBJ_F_RELAXED)
rpmem_flags |= RPMEM_PERSIST_RELAXED;
int rv = Rpmem_persist(pop->rpp, offset, len, lane, rpmem_flags);
if (rv) {
ERR("!rpmem_persist(rpp %p offset %zu length %zu lane %u)"
" FATAL ERROR (returned value %i)",
pop->rpp, offset, len, lane, rv);
return -1;
}
return 0;
}
/*
* XXX - Consider removing obj_norep_*() wrappers to call *_local()
* functions directly. Alternatively, always use obj_rep_*(), even
* if there are no replicas. Verify the performance penalty.
*/
/*
* obj_norep_memcpy -- (internal) memcpy w/o replication
*/
static void *
obj_norep_memcpy(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
return pop->memcpy_local(dest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
}
/*
* obj_norep_memmove -- (internal) memmove w/o replication
*/
static void *
obj_norep_memmove(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
return pop->memmove_local(dest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
}
/*
* obj_norep_memset -- (internal) memset w/o replication
*/
static void *
obj_norep_memset(void *ctx, void *dest, int c, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p c 0x%02x len %zu flags 0x%x", pop, dest, c, len,
flags);
return pop->memset_local(dest, c, len, flags & PMEM_F_MEM_VALID_FLAGS);
}
/*
* obj_norep_persist -- (internal) persist w/o replication
*/
static int
obj_norep_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pop->persist_local(addr, len);
return 0;
}
/*
* obj_norep_flush -- (internal) flush w/o replication
*/
static int
obj_norep_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pop->flush_local(addr, len);
return 0;
}
/*
* obj_norep_drain -- (internal) drain w/o replication
*/
static void
obj_norep_drain(void *ctx)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p", pop);
pop->drain_local();
}
static void obj_pool_cleanup(PMEMobjpool *pop);
/*
* obj_handle_remote_persist_error -- (internal) handle remote persist
* fatal error
*/
static void
obj_handle_remote_persist_error(PMEMobjpool *pop)
{
LOG(1, "pop %p", pop);
ERR("error clean up...");
obj_pool_cleanup(pop);
FATAL("Fatal error of remote persist. Aborting...");
}
/*
* obj_rep_memcpy -- (internal) memcpy with replication
*/
static void *
obj_rep_memcpy(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
void *ret = pop->memcpy_local(dest, src, len, flags);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *rdest = (char *)rep + (uintptr_t)dest - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memcpy_local(rdest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
} else {
if (rep->persist_remote(rep, rdest, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return ret;
}
/*
* obj_rep_memmove -- (internal) memmove with replication
*/
static void *
obj_rep_memmove(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
void *ret = pop->memmove_local(dest, src, len, flags);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *rdest = (char *)rep + (uintptr_t)dest - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memmove_local(rdest, src, len,
flags & PMEM_F_MEM_VALID_FLAGS);
} else {
if (rep->persist_remote(rep, rdest, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return ret;
}
/*
* obj_rep_memset -- (internal) memset with replication
*/
static void *
obj_rep_memset(void *ctx, void *dest, int c, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p dest %p c 0x%02x len %zu flags 0x%x", pop, dest, c, len,
flags);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
void *ret = pop->memset_local(dest, c, len, flags);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *rdest = (char *)rep + (uintptr_t)dest - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memset_local(rdest, c, len,
flags & PMEM_F_MEM_VALID_FLAGS);
} else {
if (rep->persist_remote(rep, rdest, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return ret;
}
/*
* obj_rep_persist -- (internal) persist with replication
*/
static int
obj_rep_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
pop->persist_local(addr, len);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *raddr = (char *)rep + (uintptr_t)addr - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memcpy_local(raddr, addr, len, 0);
} else {
if (rep->persist_remote(rep, raddr, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return 0;
}
/*
* obj_rep_flush -- (internal) flush with replication
*/
static int
obj_rep_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
unsigned lane = UINT_MAX;
if (pop->has_remote_replicas)
lane = lane_hold(pop, NULL);
pop->flush_local(addr, len);
PMEMobjpool *rep = pop->replica;
while (rep) {
void *raddr = (char *)rep + (uintptr_t)addr - (uintptr_t)pop;
if (rep->rpp == NULL) {
rep->memcpy_local(raddr, addr, len,
PMEM_F_MEM_NODRAIN);
} else {
if (rep->persist_remote(rep, raddr, len, lane, flags))
obj_handle_remote_persist_error(pop);
}
rep = rep->replica;
}
if (pop->has_remote_replicas)
lane_release(pop);
return 0;
}
/*
* obj_rep_drain -- (internal) drain with replication
*/
static void
obj_rep_drain(void *ctx)
{
PMEMobjpool *pop = ctx;
LOG(15, "pop %p", pop);
pop->drain_local();
PMEMobjpool *rep = pop->replica;
while (rep) {
if (rep->rpp == NULL)
rep->drain_local();
rep = rep->replica;
}
}
#if VG_MEMCHECK_ENABLED
/*
* Arbitrary value. When there's more undefined regions than MAX_UNDEFS, it's
* not worth reporting everything - developer should fix the code.
*/
#define MAX_UNDEFS 1000
/*
* obj_vg_check_no_undef -- (internal) check whether there are any undefined
* regions
*/
static void
obj_vg_check_no_undef(struct pmemobjpool *pop)
{
LOG(4, "pop %p", pop);
struct {
void *start, *end;
} undefs[MAX_UNDEFS];
int num_undefs = 0;
VALGRIND_DO_DISABLE_ERROR_REPORTING;
char *addr_start = pop->addr;
char *addr_end = addr_start + pop->set->poolsize;
while (addr_start < addr_end) {
char *noaccess = (char *)VALGRIND_CHECK_MEM_IS_ADDRESSABLE(
addr_start, addr_end - addr_start);
if (noaccess == NULL)
noaccess = addr_end;
while (addr_start < noaccess) {
char *undefined =
(char *)VALGRIND_CHECK_MEM_IS_DEFINED(
addr_start, noaccess - addr_start);
if (undefined) {
addr_start = undefined;
#ifdef VALGRIND_CHECK_MEM_IS_UNDEFINED
addr_start = (char *)
VALGRIND_CHECK_MEM_IS_UNDEFINED(
addr_start, noaccess - addr_start);
if (addr_start == NULL)
addr_start = noaccess;
#else
while (addr_start < noaccess &&
VALGRIND_CHECK_MEM_IS_DEFINED(
addr_start, 1))
addr_start++;
#endif
if (num_undefs < MAX_UNDEFS) {
undefs[num_undefs].start = undefined;
undefs[num_undefs].end = addr_start - 1;
num_undefs++;
}
} else
addr_start = noaccess;
}
#ifdef VALGRIND_CHECK_MEM_IS_UNADDRESSABLE
addr_start = (char *)VALGRIND_CHECK_MEM_IS_UNADDRESSABLE(
addr_start, addr_end - addr_start);
if (addr_start == NULL)
addr_start = addr_end;
#else
while (addr_start < addr_end &&
(char *)VALGRIND_CHECK_MEM_IS_ADDRESSABLE(
addr_start, 1) == addr_start)
addr_start++;
#endif
}
VALGRIND_DO_ENABLE_ERROR_REPORTING;
if (num_undefs) {
/*
* How to resolve this error:
* If it's part of the free space Valgrind should be told about
* it by VALGRIND_DO_MAKE_MEM_NOACCESS request. If it's
* allocated - initialize it or use VALGRIND_DO_MAKE_MEM_DEFINED
* request.
*/
VALGRIND_PRINTF("Part of the pool is left in undefined state on"
" boot. This is pmemobj's bug.\nUndefined"
" regions: [pool address: %p]\n", pop);
for (int i = 0; i < num_undefs; ++i)
VALGRIND_PRINTF(" [%p, %p]\n", undefs[i].start,
undefs[i].end);
if (num_undefs == MAX_UNDEFS)
VALGRIND_PRINTF(" ...\n");
/* Trigger error. */
VALGRIND_CHECK_MEM_IS_DEFINED(undefs[0].start, 1);
}
}
/*
* obj_vg_boot -- (internal) notify Valgrind about pool objects
*/
static void
obj_vg_boot(struct pmemobjpool *pop)
{
if (!On_memcheck)
return;
LOG(4, "pop %p", pop);
if (os_getenv("PMEMOBJ_VG_CHECK_UNDEF"))
obj_vg_check_no_undef(pop);
}
#endif
/*
* obj_runtime_init_common -- (internal) runtime initialization
*
* Common routine for create/open and check.
*/
static int
obj_runtime_init_common(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
if ((errno = lane_boot(pop)) != 0) {
ERR("!lane_boot");
return errno;
}
if ((errno = lane_recover_and_section_boot(pop)) != 0) {
ERR("!lane_recover_and_section_boot");
return errno;
}
pop->conversion_flags = 0;
pmemops_persist(&pop->p_ops,
&pop->conversion_flags, sizeof(pop->conversion_flags));
return 0;
}
/*
* obj_runtime_cleanup_common -- (internal) runtime cleanup
*
* Common routine for create/open and check
*/
static void
obj_runtime_cleanup_common(PMEMobjpool *pop)
{
lane_section_cleanup(pop);
lane_cleanup(pop);
}
/*
* obj_descr_create -- (internal) create obj pool descriptor
*/
static int
obj_descr_create(PMEMobjpool *pop, const char *layout, size_t poolsize)
{
LOG(3, "pop %p layout %s poolsize %zu", pop, layout, poolsize);
ASSERTeq(poolsize % Pagesize, 0);
/* opaque info lives at the beginning of mapped memory pool */
void *dscp = (void *)((uintptr_t)pop + sizeof(struct pool_hdr));
/* create the persistent part of pool's descriptor */
memset(dscp, 0, OBJ_DSC_P_SIZE);
if (layout)
strncpy(pop->layout, layout, PMEMOBJ_MAX_LAYOUT - 1);
struct pmem_ops *p_ops = &pop->p_ops;
pop->lanes_offset = OBJ_LANES_OFFSET;
pop->nlanes = OBJ_NLANES;
/* zero all lanes */
lane_init_data(pop);
pop->heap_offset = pop->lanes_offset +
pop->nlanes * sizeof(struct lane_layout);
pop->heap_offset = (pop->heap_offset + Pagesize - 1) & ~(Pagesize - 1);
size_t heap_size = pop->set->poolsize - pop->heap_offset;
/* initialize heap prior to storing the checksum */
errno = palloc_init((char *)pop + pop->heap_offset, heap_size,
&pop->heap_size, p_ops);
if (errno != 0) {
ERR("!palloc_init");
return -1;
}
util_checksum(dscp, OBJ_DSC_P_SIZE, &pop->checksum, 1, 0);
/*
* store the persistent part of pool's descriptor (2kB)
*
* It's safe to use PMEMOBJ_F_RELAXED flag because the entire
* structure is protected by checksum.
*/
pmemops_xpersist(p_ops, dscp, OBJ_DSC_P_SIZE, PMEMOBJ_F_RELAXED);
/* initialize run_id, it will be incremented later */
pop->run_id = 0;
pmemops_persist(p_ops, &pop->run_id, sizeof(pop->run_id));
pop->root_offset = 0;
pmemops_persist(p_ops, &pop->root_offset, sizeof(pop->root_offset));
pop->root_size = 0;
pmemops_persist(p_ops, &pop->root_size, sizeof(pop->root_size));
pop->conversion_flags = 0;
pmemops_persist(p_ops, &pop->conversion_flags,
sizeof(pop->conversion_flags));
/*
* It's safe to use PMEMOBJ_F_RELAXED flag because the reserved
* area must be entirely zeroed.
*/
pmemops_memset(p_ops, pop->pmem_reserved, 0,
sizeof(pop->pmem_reserved), PMEMOBJ_F_RELAXED);
return 0;
}
/*
* obj_descr_check -- (internal) validate obj pool descriptor
*/
static int
obj_descr_check(PMEMobjpool *pop, const char *layout, size_t poolsize)
{
LOG(3, "pop %p layout %s poolsize %zu", pop, layout, poolsize);
void *dscp = (void *)((uintptr_t)pop + sizeof(struct pool_hdr));
if (pop->rpp) {
/* read remote descriptor */
if (obj_read_remote(pop->rpp, pop->remote_base, dscp, dscp,
OBJ_DSC_P_SIZE)) {
ERR("!obj_read_remote");
return -1;
}
}
if (!util_checksum(dscp, OBJ_DSC_P_SIZE, &pop->checksum, 0, 0)) {
ERR("invalid checksum of pool descriptor");
errno = EINVAL;
return -1;
}
if (layout &&
strncmp(pop->layout, layout, PMEMOBJ_MAX_LAYOUT)) {
ERR("wrong layout (\"%s\"), "
"pool created with layout \"%s\"",
layout, pop->layout);
errno = EINVAL;
return -1;
}
if (pop->heap_offset % Pagesize) {
ERR("unaligned heap: off %" PRIu64, pop->heap_offset);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* obj_replica_init_local -- (internal) initialize runtime part
* of the local replicas
*/
static int
obj_replica_init_local(PMEMobjpool *rep, int is_pmem, size_t resvsize)
{
LOG(3, "rep %p is_pmem %d resvsize %zu", rep, is_pmem, resvsize);
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
rep->is_pmem = is_pmem;
/* init hooks */
rep->persist_remote = NULL;
/*
* All replicas, except for master, are ignored as far as valgrind is
* concerned. This is to save CPU time and lessen the complexity of
* instrumentation.
*/
if (!rep->is_master_replica)
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(rep, resvsize);
if (rep->is_pmem) {
rep->persist_local = pmem_persist;
rep->flush_local = pmem_flush;
rep->drain_local = pmem_drain;
rep->memcpy_local = pmem_memcpy;
rep->memmove_local = pmem_memmove;
rep->memset_local = pmem_memset;
} else {
rep->persist_local = obj_msync_nofail;
rep->flush_local = obj_msync_nofail;
rep->drain_local = obj_drain_empty;
rep->memcpy_local = obj_nopmem_memcpy;
rep->memmove_local = obj_nopmem_memmove;
rep->memset_local = obj_nopmem_memset;
}
return 0;
}
/*
* obj_replica_init_remote -- (internal) initialize runtime part
* of a remote replica
*/
static int
obj_replica_init_remote(PMEMobjpool *rep, struct pool_set *set,
unsigned repidx, int create)
{
LOG(3, "rep %p set %p repidx %u", rep, set, repidx);
struct pool_replica *repset = set->replica[repidx];
ASSERTne(repset->remote->rpp, NULL);
ASSERTne(repset->remote->node_addr, NULL);
ASSERTne(repset->remote->pool_desc, NULL);
rep->node_addr = Strdup(repset->remote->node_addr);
if (rep->node_addr == NULL)
return -1;
rep->pool_desc = Strdup(repset->remote->pool_desc);
if (rep->pool_desc == NULL) {
Free(rep->node_addr);
return -1;
}
rep->rpp = repset->remote->rpp;
/* remote_base - beginning of the remote pool */
rep->remote_base = (uintptr_t)rep->addr;
/* init hooks */
rep->persist_remote = obj_remote_persist;
rep->persist_local = NULL;
rep->flush_local = NULL;
rep->drain_local = NULL;
rep->memcpy_local = NULL;
rep->memmove_local = NULL;
rep->memset_local = NULL;
rep->p_ops.remote.read = obj_read_remote;
rep->p_ops.remote.ctx = rep->rpp;
rep->p_ops.remote.base = rep->remote_base;
return 0;
}
/*
* obj_cleanup_remote -- (internal) clean up the remote pools data
*/
static void
obj_cleanup_remote(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
for (; pop != NULL; pop = pop->replica) {
if (pop->rpp != NULL) {
Free(pop->node_addr);
Free(pop->pool_desc);
pop->rpp = NULL;
}
}
}
/*
* obj_replica_init -- (internal) initialize runtime part of the replica
*/
static int
obj_replica_init(PMEMobjpool *rep, struct pool_set *set, unsigned repidx,
int create)
{
struct pool_replica *repset = set->replica[repidx];
if (repidx == 0) {
/* master replica */
rep->is_master_replica = 1;
rep->has_remote_replicas = set->remote;
if (set->nreplicas > 1) {
rep->p_ops.persist = obj_rep_persist;
rep->p_ops.flush = obj_rep_flush;
rep->p_ops.drain = obj_rep_drain;
rep->p_ops.memcpy = obj_rep_memcpy;
rep->p_ops.memmove = obj_rep_memmove;
rep->p_ops.memset = obj_rep_memset;
} else {
rep->p_ops.persist = obj_norep_persist;
rep->p_ops.flush = obj_norep_flush;
rep->p_ops.drain = obj_norep_drain;
rep->p_ops.memcpy = obj_norep_memcpy;
rep->p_ops.memmove = obj_norep_memmove;
rep->p_ops.memset = obj_norep_memset;
}
rep->p_ops.base = rep;
} else {
/* non-master replicas */
rep->is_master_replica = 0;
rep->has_remote_replicas = 0;
rep->p_ops.persist = NULL;
rep->p_ops.flush = NULL;
rep->p_ops.drain = NULL;
rep->p_ops.memcpy = NULL;
rep->p_ops.memmove = NULL;
rep->p_ops.memset = NULL;
rep->p_ops.base = NULL;
}
rep->is_dev_dax = set->replica[repidx]->part[0].is_dev_dax;
int ret;
if (repset->remote)
ret = obj_replica_init_remote(rep, set, repidx, create);
else
ret = obj_replica_init_local(rep, repset->is_pmem,
set->resvsize);
if (ret)
return ret;
return 0;
}
/*
* obj_replica_fini -- (internal) deinitialize replica
*/
static void
obj_replica_fini(struct pool_replica *repset)
{
PMEMobjpool *rep = repset->part[0].addr;
if (repset->remote)
obj_cleanup_remote(rep);
}
/*
* obj_runtime_init -- (internal) initialize runtime part of the pool header
*/
static int
obj_runtime_init(PMEMobjpool *pop, int rdonly, int boot, unsigned nlanes)
{
LOG(3, "pop %p rdonly %d boot %d", pop, rdonly, boot);
struct pmem_ops *p_ops = &pop->p_ops;
/* run_id is made unique by incrementing the previous value */
pop->run_id += 2;
if (pop->run_id == 0)
pop->run_id += 2;
pmemops_persist(p_ops, &pop->run_id, sizeof(pop->run_id));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
pop->rdonly = rdonly;
pop->uuid_lo = pmemobj_get_uuid_lo(pop);
pop->lanes_desc.runtime_nlanes = nlanes;
pop->tx_params = tx_params_new();
if (pop->tx_params == NULL)
goto err_tx_params;
pop->stats = stats_new(pop);
if (pop->stats == NULL)
goto err_stat;
pop->user_data = NULL;
VALGRIND_REMOVE_PMEM_MAPPING(&pop->mutex_head,
sizeof(pop->mutex_head));
VALGRIND_REMOVE_PMEM_MAPPING(&pop->rwlock_head,
sizeof(pop->rwlock_head));
VALGRIND_REMOVE_PMEM_MAPPING(&pop->cond_head,
sizeof(pop->cond_head));
pop->mutex_head = NULL;
pop->rwlock_head = NULL;
pop->cond_head = NULL;
if (boot) {
if ((errno = obj_runtime_init_common(pop)) != 0)
goto err_boot;
#if VG_MEMCHECK_ENABLED
if (On_memcheck) {
/* mark unused part of the pool as not accessible */
void *end = palloc_heap_end(&pop->heap);
VALGRIND_DO_MAKE_MEM_NOACCESS(end,
(char *)pop + pop->set->poolsize - (char *)end);
}
#endif
obj_pool_init();
if ((errno = critnib_insert(pools_ht, pop->uuid_lo, pop))) {
ERR("!critnib_insert to pools_ht");
goto err_critnib_insert;
}
if ((errno = critnib_insert(pools_tree, (uint64_t)pop, pop))) {
ERR("!critnib_insert to pools_tree");
goto err_tree_insert;
}
}
if (obj_ctl_init_and_load(pop) != 0) {
errno = EINVAL;
goto err_ctl;
}
util_mutex_init(&pop->ulog_user_buffers.lock);
pop->ulog_user_buffers.map = ravl_new_sized(
operation_user_buffer_range_cmp,
sizeof(struct user_buffer_def));
if (pop->ulog_user_buffers.map == NULL) {
ERR("!ravl_new_sized");
goto err_user_buffers_map;
}
pop->ulog_user_buffers.verify = 0;
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(pop->addr, sizeof(struct pool_hdr), pop->is_dev_dax);
return 0;
err_user_buffers_map:
util_mutex_destroy(&pop->ulog_user_buffers.lock);
ctl_delete(pop->ctl);
err_ctl:;
void *n = critnib_remove(pools_tree, (uint64_t)pop);
ASSERTne(n, NULL);
err_tree_insert:
critnib_remove(pools_ht, pop->uuid_lo);
err_critnib_insert:
obj_runtime_cleanup_common(pop);
err_boot:
stats_delete(pop, pop->stats);
err_stat:
tx_params_delete(pop->tx_params);
err_tx_params:
return -1;
}
/*
* obj_get_nlanes -- get a number of lanes available at runtime. If the value
* provided with the PMEMOBJ_NLANES environment variable is greater than 0 and
* smaller than OBJ_NLANES constant it returns PMEMOBJ_NLANES. Otherwise it
* returns OBJ_NLANES.
*/
static unsigned
obj_get_nlanes(void)
{
LOG(3, NULL);
char *env_nlanes = os_getenv(OBJ_NLANES_ENV_VARIABLE);
if (env_nlanes) {
int nlanes = atoi(env_nlanes);
if (nlanes <= 0) {
ERR("%s variable must be a positive integer",
OBJ_NLANES_ENV_VARIABLE);
errno = EINVAL;
goto no_valid_env;
}
return (unsigned)(OBJ_NLANES < nlanes ? OBJ_NLANES : nlanes);
}
no_valid_env:
return OBJ_NLANES;
}
/*
* pmemobj_createU -- create a transactional memory pool (set)
*/
#ifndef _WIN32
static inline
#endif
PMEMobjpool *
pmemobj_createU(const char *path, const char *layout,
size_t poolsize, mode_t mode)
{
LOG(3, "path %s layout %s poolsize %zu mode %o",
path, layout, poolsize, mode);
PMEMobjpool *pop;
struct pool_set *set;
/* check length of layout */
if (layout && (strlen(layout) >= PMEMOBJ_MAX_LAYOUT)) {
ERR("Layout too long");
errno = EINVAL;
return NULL;
}
/*
* A number of lanes available at runtime equals the lowest value
* from all reported by remote replicas hosts. In the single host mode
* the runtime number of lanes is equal to the total number of lanes
* available in the pool or the value provided with PMEMOBJ_NLANES
* environment variable whichever is lower.
*/
unsigned runtime_nlanes = obj_get_nlanes();
struct pool_attr adj_pool_attr = Obj_create_attr;
/* force set SDS feature */
if (SDS_at_create)
adj_pool_attr.features.incompat |= POOL_FEAT_SDS;
else
adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS;
if (util_pool_create(&set, path, poolsize, PMEMOBJ_MIN_POOL,
PMEMOBJ_MIN_PART, &adj_pool_attr, &runtime_nlanes,
REPLICAS_ENABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
/* pop is master replica from now on */
pop = set->replica[0]->part[0].addr;
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *repset = set->replica[r];
PMEMobjpool *rep = repset->part[0].addr;
size_t rt_size = (uintptr_t)(rep + 1) - (uintptr_t)&rep->addr;
VALGRIND_REMOVE_PMEM_MAPPING(&rep->addr, rt_size);
memset(&rep->addr, 0, rt_size);
rep->addr = rep;
rep->replica = NULL;
rep->rpp = NULL;
/* initialize replica runtime - is_pmem, funcs, ... */
if (obj_replica_init(rep, set, r, 1 /* create */) != 0) {
ERR("initialization of replica #%u failed", r);
goto err;
}
/* link replicas */
if (r < set->nreplicas - 1)
rep->replica = set->replica[r + 1]->part[0].addr;
}
pop->set = set;
/* create pool descriptor */
if (obj_descr_create(pop, layout, set->poolsize) != 0) {
LOG(2, "creation of pool descriptor failed");
goto err;
}
/* initialize runtime parts - lanes, obj stores, ... */
if (obj_runtime_init(pop, 0, 1 /* boot */,
runtime_nlanes) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pop %p", pop);
return pop;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (set->remote)
obj_cleanup_remote(pop);
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
//NEW
/*int __wrap_open(const char *__path, int __oflag){
return __real_open(__path, __oflag);
}
*/
void* open_device(const char* pathname)
{
//int fd = os_open("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0",O_RDWR|O_SYNC);
int fd = os_open(pathname,O_RDWR|O_SYNC);
if(fd == -1)
{
printf("Couldnt opene file!!\n");
exit(0);
}
void * ptr = mmap(0,4096,PROT_READ|PROT_WRITE, MAP_SHARED,fd,0);
if(ptr == (void *)-1)
{
printf("Could not map memory!!\n");
exit(0);
}
printf("opened device without error!!\n");
return ptr;
}
///END NEW
#ifndef _WIN32
/*
* pmemobj_create -- create a transactional memory pool (set)
*/
PMEMobjpool *
pmemobj_create(const char *path, const char *layout,
size_t poolsize, mode_t mode)
{
PMEMOBJ_API_START();
PMEMobjpool *pop = pmemobj_createU(path, layout, poolsize, mode);
pop->p_ops.device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0");
pop->p_ops.objid = (uint16_t)pop->run_id;
PMEMoid root = pmemobj_root(pop, sizeof(uint64_t));
uint64_t* tmp = pmemobj_direct(root);
//printf( "%ld\n",PMEM_OBJ_POOL_UNUSED2_SIZE);
//printf( "%ld %ld %ld\n",sizeof(PMEMobjpool),sizeof(uint16_t),sizeof(void*));
printf("vaddr %p pmemobjid %lx\n",tmp,pop->run_id);
*tmp = 0xdeadbeefdeadbeef;
pmem_persist(tmp,64);
*tmp = (uint64_t)tmp;
pmem_persist(tmp,64);
uint32_t tid;
tid = 0;//gettid();
tid = tid & 0x3f;
tid = (tid<< 4)| 0;//pop->run_id;
//printf("%d %d\n",tid, pop->run_id);
*tmp = tid;
pmem_persist(tmp,64);
/*
PMEMoid root1 = pmemobj_root(pop, 256*1024*1024);
int a[64*1024*1024];
void* tmp1 = pmemobj_direct(root1);
clock_t start, end;
double cpu_time_used;
start = clock();
for(int i=0; i<10; i++) {
memcpy(tmp1, a, 256*1024*1024);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n",cpu_time_used);
*/
//*tmp = 0xdeadbeefdeadbeee;
//pmem_persist(tmp,64);
//*((uint64_t*)pop->p_ops.device) = (uint64_t)(tmp);//dst;
//*((uint64_t*)(pop->p_ops.device)+2) = (uint64_t)(tmp);
//*((uint64_t*)(pop->p_ops.device)+3) = ((uint64_t)(((pop->p_ops.objid) << 16)| 9) <<32);
//*(((uint32_t*)(pop->p_ops.device))+255) = (uint32_t)(((pop->p_ops.objid) << 16)| 9);
#ifdef USE_NDP_REDO
use_ndp_redo = 1;
#endif
PMEMOBJ_API_END();
return pop;
}
#else
/*
* pmemobj_createW -- create a transactional memory pool (set)
*/
PMEMobjpool *
pmemobj_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMobjpool *ret = pmemobj_createU(upath, ulayout, poolsize, mode);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* obj_check_basic_local -- (internal) basic pool consistency check
* of a local replica
*/
static int
obj_check_basic_local(PMEMobjpool *pop, size_t mapped_size)
{
LOG(3, "pop %p mapped_size %zu", pop, mapped_size);
ASSERTeq(pop->rpp, NULL);
int consistent = 1;
if (pop->run_id % 2) {
ERR("invalid run_id %" PRIu64, pop->run_id);
consistent = 0;
}
if ((errno = lane_check(pop)) != 0) {
LOG(2, "!lane_check");
consistent = 0;
}
/* pop->heap_size can still be 0 at this point */
size_t heap_size = mapped_size - pop->heap_offset;
errno = palloc_heap_check((char *)pop + pop->heap_offset,
heap_size);
if (errno != 0) {
LOG(2, "!heap_check");
consistent = 0;
}
return consistent;
}
/*
* obj_read_remote -- read data from remote replica
*
* It reads data of size 'length' from the remote replica 'pop'
* from address 'addr' and saves it at address 'dest'.
*/
int
obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length)
{
LOG(3, "ctx %p base 0x%lx dest %p addr %p length %zu", ctx, base, dest,
addr, length);
ASSERTne(ctx, NULL);
ASSERT((uintptr_t)addr >= base);
uintptr_t offset = (uintptr_t)addr - base;
if (Rpmem_read(ctx, dest, offset, length, RLANE_DEFAULT)) {
ERR("!rpmem_read");
return -1;
}
return 0;
}
/*
* obj_check_basic_remote -- (internal) basic pool consistency check
* of a remote replica
*/
static int
obj_check_basic_remote(PMEMobjpool *pop, size_t mapped_size)
{
LOG(3, "pop %p mapped_size %zu", pop, mapped_size);
ASSERTne(pop->rpp, NULL);
int consistent = 1;
/* read pop->run_id */
if (obj_read_remote(pop->rpp, pop->remote_base, &pop->run_id,
&pop->run_id, sizeof(pop->run_id))) {
ERR("!obj_read_remote");
return -1;
}
if (pop->run_id % 2) {
ERR("invalid run_id %" PRIu64, pop->run_id);
consistent = 0;
}
/* XXX add lane_check_remote */
/* pop->heap_size can still be 0 at this point */
size_t heap_size = mapped_size - pop->heap_offset;
if (palloc_heap_check_remote((char *)pop + pop->heap_offset,
heap_size, &pop->p_ops.remote)) {
LOG(2, "!heap_check_remote");
consistent = 0;
}
return consistent;
}
/*
* obj_check_basic -- (internal) basic pool consistency check
*
* Used to check if all the replicas are consistent prior to pool recovery.
*/
static int
obj_check_basic(PMEMobjpool *pop, size_t mapped_size)
{
LOG(3, "pop %p mapped_size %zu", pop, mapped_size);
if (pop->rpp == NULL)
return obj_check_basic_local(pop, mapped_size);
else
return obj_check_basic_remote(pop, mapped_size);
}
/*
* obj_pool_close -- (internal) close the pool set
*/
static void
obj_pool_close(struct pool_set *set)
{
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
}
/*
* obj_pool_open -- (internal) open the given pool
*/
static int
obj_pool_open(struct pool_set **set, const char *path, unsigned flags,
unsigned *nlanes)
{
if (util_pool_open(set, path, PMEMOBJ_MIN_PART, &Obj_open_attr,
nlanes, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return -1;
}
ASSERT((*set)->nreplicas > 0);
/* read-only mode is not supported in libpmemobj */
if ((*set)->rdonly) {
ERR("read-only mode is not supported");
errno = EINVAL;
goto err_rdonly;
}
return 0;
err_rdonly:
obj_pool_close(*set);
return -1;
}
/*
* obj_replicas_init -- (internal) initialize all replicas
*/
static int
obj_replicas_init(struct pool_set *set)
{
unsigned r;
for (r = 0; r < set->nreplicas; r++) {
struct pool_replica *repset = set->replica[r];
PMEMobjpool *rep = repset->part[0].addr;
size_t rt_size = (uintptr_t)(rep + 1) - (uintptr_t)&rep->addr;
VALGRIND_REMOVE_PMEM_MAPPING(&rep->addr, rt_size);
memset(&rep->addr, 0, rt_size);
rep->addr = rep;
rep->replica = NULL;
rep->rpp = NULL;
/* initialize replica runtime - is_pmem, funcs, ... */
if (obj_replica_init(rep, set, r, 0 /* open */) != 0) {
ERR("initialization of replica #%u failed", r);
goto err;
}
/* link replicas */
if (r < set->nreplicas - 1)
rep->replica = set->replica[r + 1]->part[0].addr;
}
return 0;
err:
for (unsigned p = 0; p < r; p++)
obj_replica_fini(set->replica[p]);
return -1;
}
/*
* obj_replicas_fini -- (internal) deinitialize all replicas
*/
static void
obj_replicas_fini(struct pool_set *set)
{
int oerrno = errno;
for (unsigned r = 0; r < set->nreplicas; r++)
obj_replica_fini(set->replica[r]);
errno = oerrno;
}
/*
* obj_replicas_check_basic -- (internal) perform basic consistency check
* for all replicas
*/
static int
obj_replicas_check_basic(PMEMobjpool *pop)
{
PMEMobjpool *rep;
for (unsigned r = 0; r < pop->set->nreplicas; r++) {
rep = pop->set->replica[r]->part[0].addr;
if (obj_check_basic(rep, pop->set->poolsize) == 0) {
ERR("inconsistent replica #%u", r);
return -1;
}
}
/* copy lanes */
void *src = (void *)((uintptr_t)pop + pop->lanes_offset);
size_t len = pop->nlanes * sizeof(struct lane_layout);
for (unsigned r = 1; r < pop->set->nreplicas; r++) {
rep = pop->set->replica[r]->part[0].addr;
void *dst = (void *)((uintptr_t)rep + pop->lanes_offset);
if (rep->rpp == NULL) {
rep->memcpy_local(dst, src, len, 0);
} else {
if (rep->persist_remote(rep, dst, len,
RLANE_DEFAULT, 0))
obj_handle_remote_persist_error(pop);
}
}
return 0;
}
/*
* obj_open_common -- open a transactional memory pool (set)
*
* This routine takes flags and does all the work
* (flag POOL_OPEN_COW - internal calls can map a read-only pool if required).
*/
static PMEMobjpool *
obj_open_common(const char *path, const char *layout, unsigned flags, int boot)
{
LOG(3, "path %s layout %s flags 0x%x", path, layout, flags);
PMEMobjpool *pop = NULL;
struct pool_set *set;
//pop->device = open_device();
/*
* A number of lanes available at runtime equals the lowest value
* from all reported by remote replicas hosts. In the single host mode
* the runtime number of lanes is equal to the total number of lanes
* available in the pool or the value provided with PMEMOBJ_NLANES
* environment variable whichever is lower.
*/
unsigned runtime_nlanes = obj_get_nlanes();
if (obj_pool_open(&set, path, flags, &runtime_nlanes))
return NULL;
/* pop is master replica from now on */
pop = set->replica[0]->part[0].addr;
if (obj_replicas_init(set))
goto replicas_init;
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *repset = set->replica[r];
PMEMobjpool *rep = repset->part[0].addr;
/* check descriptor */
if (obj_descr_check(rep, layout, set->poolsize) != 0) {
LOG(2, "descriptor check of replica #%u failed", r);
goto err_descr_check;
}
}
pop->set = set;
if (boot) {
/* check consistency of 'master' replica */
if (obj_check_basic(pop, pop->set->poolsize) == 0) {
goto err_check_basic;
}
}
if (set->nreplicas > 1) {
if (obj_replicas_check_basic(pop))
goto err_replicas_check_basic;
}
/*
* before runtime initialization lanes are unavailable, remote persists
* should use RLANE_DEFAULT
*/
pop->lanes_desc.runtime_nlanes = 0;
pop->p_ops.device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0");
// printf("orig addr %lx\n",(uint64_t)pop->p_ops.device);
#if VG_MEMCHECK_ENABLED
pop->vg_boot = boot;
#endif
/* initialize runtime parts - lanes, obj stores, ... */
if (obj_runtime_init(pop, 0, boot, runtime_nlanes) != 0) {
ERR("pool initialization failed");
goto err_runtime_init;
}
#if VG_MEMCHECK_ENABLED
if (boot)
obj_vg_boot(pop);
#endif
util_poolset_fdclose(set);
LOG(3, "pop %p", pop);
return pop;
err_runtime_init:
err_replicas_check_basic:
err_check_basic:
err_descr_check:
obj_replicas_fini(set);
replicas_init:
obj_pool_close(set);
return NULL;
}
/*
* pmemobj_openU -- open a transactional memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMobjpool *
pmemobj_openU(const char *path, const char *layout)
{
LOG(3, "path %s layout %s", path, layout);
return obj_open_common(path, layout,
COW_at_open ? POOL_OPEN_COW : 0, 1);
}
#ifndef _WIN32
/*
* pmemobj_open -- open a transactional memory pool
*/
PMEMobjpool *
pmemobj_open(const char *path, const char *layout)
{
PMEMOBJ_API_START();
PMEMobjpool *pop = pmemobj_openU(path, layout);
PMEMOBJ_API_END();
return pop;
}
#else
/*
* pmemobj_openW -- open a transactional memory pool
*/
PMEMobjpool *
pmemobj_openW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMobjpool *ret = pmemobj_openU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* obj_replicas_cleanup -- (internal) free resources allocated for replicas
*/
static void
obj_replicas_cleanup(struct pool_set *set)
{
LOG(3, "set %p", set);
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
PMEMobjpool *pop = rep->part[0].addr;
if (pop->rpp != NULL) {
/*
* remote replica will be closed in util_poolset_close
*/
pop->rpp = NULL;
Free(pop->node_addr);
Free(pop->pool_desc);
}
}
}
/*
* obj_pool_lock_cleanup -- (internal) Destroy any locks or condition
* variables that were allocated at run time
*/
static void
obj_pool_lock_cleanup(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
PMEMmutex_internal *nextm;
for (PMEMmutex_internal *m = pop->mutex_head; m != NULL; m = nextm) {
nextm = m->PMEMmutex_next;
LOG(4, "mutex %p *mutex %p", &m->PMEMmutex_lock,
m->PMEMmutex_bsd_mutex_p);
os_mutex_destroy(&m->PMEMmutex_lock);
m->PMEMmutex_next = NULL;
m->PMEMmutex_bsd_mutex_p = NULL;
}
pop->mutex_head = NULL;
PMEMrwlock_internal *nextr;
for (PMEMrwlock_internal *r = pop->rwlock_head; r != NULL; r = nextr) {
nextr = r->PMEMrwlock_next;
LOG(4, "rwlock %p *rwlock %p", &r->PMEMrwlock_lock,
r->PMEMrwlock_bsd_rwlock_p);
os_rwlock_destroy(&r->PMEMrwlock_lock);
r->PMEMrwlock_next = NULL;
r->PMEMrwlock_bsd_rwlock_p = NULL;
}
pop->rwlock_head = NULL;
PMEMcond_internal *nextc;
for (PMEMcond_internal *c = pop->cond_head; c != NULL; c = nextc) {
nextc = c->PMEMcond_next;
LOG(4, "cond %p *cond %p", &c->PMEMcond_cond,
c->PMEMcond_bsd_cond_p);
os_cond_destroy(&c->PMEMcond_cond);
c->PMEMcond_next = NULL;
c->PMEMcond_bsd_cond_p = NULL;
}
pop->cond_head = NULL;
}
/*
* obj_pool_cleanup -- (internal) cleanup the pool and unmap
*/
static void
obj_pool_cleanup(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
ravl_delete(pop->ulog_user_buffers.map);
util_mutex_destroy(&pop->ulog_user_buffers.lock);
stats_delete(pop, pop->stats);
tx_params_delete(pop->tx_params);
ctl_delete(pop->ctl);
obj_pool_lock_cleanup(pop);
lane_section_cleanup(pop);
lane_cleanup(pop);
/* unmap all the replicas */
obj_replicas_cleanup(pop->set);
util_poolset_close(pop->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemobj_close -- close a transactional memory pool
*/
void
pmemobj_close(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
PMEMOBJ_API_START();
_pobj_cache_invalidate++;
if (critnib_remove(pools_ht, pop->uuid_lo) != pop) {
ERR("critnib_remove for pools_ht");
}
if (critnib_remove(pools_tree, (uint64_t)pop) != pop)
ERR("critnib_remove for pools_tree");
#ifndef _WIN32
if (_pobj_cached_pool.pop == pop) {
_pobj_cached_pool.pop = NULL;
_pobj_cached_pool.uuid_lo = 0;
}
#else /* _WIN32 */
struct _pobj_pcache *pcache = os_tls_get(Cached_pool_key);
if (pcache != NULL) {
if (pcache->pop == pop) {
pcache->pop = NULL;
pcache->uuid_lo = 0;
}
}
#endif /* _WIN32 */
obj_pool_cleanup(pop);
PMEMOBJ_API_END();
}
/*
* pmemobj_checkU -- transactional memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_checkU(const char *path, const char *layout)
{
LOG(3, "path %s layout %s", path, layout);
PMEMobjpool *pop = obj_open_common(path, layout, POOL_OPEN_COW, 0);
if (pop == NULL)
return -1; /* errno set by obj_open_common() */
int consistent = 1;
/*
* For replicated pools, basic consistency check is performed
* in obj_open_common().
*/
if (pop->replica == NULL)
consistent = obj_check_basic(pop, pop->set->poolsize);
if (consistent && (errno = obj_runtime_init_common(pop)) != 0) {
LOG(3, "!obj_boot");
consistent = 0;
}
if (consistent) {
obj_pool_cleanup(pop);
} else {
stats_delete(pop, pop->stats);
tx_params_delete(pop->tx_params);
ctl_delete(pop->ctl);
/* unmap all the replicas */
obj_replicas_cleanup(pop->set);
util_poolset_close(pop->set, DO_NOT_DELETE_PARTS);
}
if (consistent)
LOG(4, "pool consistency check OK");
return consistent;
}
#ifndef _WIN32
/*
* pmemobj_check -- transactional memory pool consistency check
*/
int
pmemobj_check(const char *path, const char *layout)
{
PMEMOBJ_API_START();
int ret = pmemobj_checkU(path, layout);
PMEMOBJ_API_END();
return ret;
}
#else
/*
* pmemobj_checkW -- transactional memory pool consistency check
*/
int
pmemobj_checkW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return -1;
}
}
int ret = pmemobj_checkU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* pmemobj_pool_by_oid -- returns the pool handle associated with the oid
*/
PMEMobjpool *
pmemobj_pool_by_oid(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
/* XXX this is a temporary fix, to be fixed properly later */
if (pools_ht == NULL)
return NULL;
return critnib_get(pools_ht, oid.pool_uuid_lo);
}
/*
* pmemobj_pool_by_ptr -- returns the pool handle associated with the address
*/
PMEMobjpool *
pmemobj_pool_by_ptr(const void *addr)
{
LOG(3, "addr %p", addr);
/* fast path for transactions */
PMEMobjpool *pop = tx_get_pop();
if ((pop != NULL) && OBJ_PTR_FROM_POOL(pop, addr))
return pop;
/* XXX this is a temporary fix, to be fixed properly later */
if (pools_tree == NULL)
return NULL;
pop = critnib_find_le(pools_tree, (uint64_t)addr);
if (pop == NULL)
return NULL;
size_t pool_size = pop->heap_offset + pop->heap_size;
if ((char *)addr >= (char *)pop + pool_size)
return NULL;
return pop;
}
/*
* pmemobj_set_user_data -- sets volatile pointer to the user data for specified
* pool
*/
void
pmemobj_set_user_data(PMEMobjpool *pop, void *data)
{
LOG(3, "pop %p data %p", pop, data);
pop->user_data = data;
}
/*
* pmemobj_get_user_data -- gets volatile pointer to the user data associated
* with the specified pool
*/
void *
pmemobj_get_user_data(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
return pop->user_data;
}
/* arguments for constructor_alloc */
struct constr_args {
int zero_init;
pmemobj_constr constructor;
void *arg;
};
/*
* constructor_alloc -- (internal) constructor for obj_alloc_construct
*/
static int
constructor_alloc(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
struct pmem_ops *p_ops = &pop->p_ops;
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct constr_args *carg = arg;
if (carg->zero_init)
pmemops_memset(p_ops, ptr, 0, usable_size, 0);
int ret = 0;
if (carg->constructor)
ret = carg->constructor(pop, ptr, carg->arg);
return ret;
}
/*
* obj_alloc_construct -- (internal) allocates a new object with constructor
*/
static int
obj_alloc_construct(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
type_num_t type_num, uint64_t flags,
pmemobj_constr constructor, void *arg)
{
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return -1;
}
struct constr_args carg;
carg.zero_init = flags & POBJ_FLAG_ZERO;
carg.constructor = constructor;
carg.arg = arg;
struct operation_context *ctx = pmalloc_operation_hold(pop);
if (oidp)
operation_add_entry(ctx, &oidp->pool_uuid_lo, pop->uuid_lo,
ULOG_OPERATION_SET);
int ret = palloc_operation(&pop->heap, 0,
oidp != NULL ? &oidp->off : NULL, size,
constructor_alloc, &carg, type_num, 0,
CLASS_ID_FROM_FLAG(flags), ARENA_ID_FROM_FLAG(flags),
ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmemobj_alloc -- allocates a new object
*/
int
pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p oidp %p size %zu type_num %llx constructor %p arg %p",
pop, oidp, size, (unsigned long long)type_num,
constructor, arg);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (size == 0) {
ERR("allocation with size 0");
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
int ret = obj_alloc_construct(pop, oidp, size, type_num,
0, constructor, arg);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_xalloc -- allocates with flags
*/
int
pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, uint64_t flags,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p oidp %p size %zu type_num %llx flags %llx "
"constructor %p arg %p",
pop, oidp, size, (unsigned long long)type_num,
(unsigned long long)flags,
constructor, arg);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (size == 0) {
ERR("allocation with size 0");
errno = EINVAL;
return -1;
}
if (flags & ~POBJ_TX_XALLOC_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_TX_XALLOC_VALID_FLAGS);
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
int ret = obj_alloc_construct(pop, oidp, size, type_num,
flags, constructor, arg);
PMEMOBJ_API_END();
return ret;
}
/* arguments for constructor_realloc and constructor_zrealloc */
struct carg_realloc {
void *ptr;
size_t old_size;
size_t new_size;
int zero_init;
type_num_t user_type;
pmemobj_constr constructor;
void *arg;
};
/*
* pmemobj_zalloc -- allocates a new zeroed object
*/
int
pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num)
{
LOG(3, "pop %p oidp %p size %zu type_num %llx",
pop, oidp, size, (unsigned long long)type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (size == 0) {
ERR("allocation with size 0");
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
int ret = obj_alloc_construct(pop, oidp, size, type_num, POBJ_FLAG_ZERO,
NULL, NULL);
PMEMOBJ_API_END();
return ret;
}
/*
* obj_free -- (internal) free an object
*/
static void
obj_free(PMEMobjpool *pop, PMEMoid *oidp)
{
ASSERTne(oidp, NULL);
struct operation_context *ctx = pmalloc_operation_hold(pop);
operation_add_entry(ctx, &oidp->pool_uuid_lo, 0, ULOG_OPERATION_SET);
palloc_operation(&pop->heap, oidp->off, &oidp->off, 0, NULL, NULL,
0, 0, 0, 0, ctx);
pmalloc_operation_release(pop);
}
/*
* constructor_realloc -- (internal) constructor for pmemobj_realloc
*/
static int
constructor_realloc(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
struct pmem_ops *p_ops = &pop->p_ops;
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct carg_realloc *carg = arg;
if (!carg->zero_init)
return 0;
if (usable_size > carg->old_size) {
size_t grow_len = usable_size - carg->old_size;
void *new_data_ptr = (void *)((uintptr_t)ptr + carg->old_size);
pmemops_memset(p_ops, new_data_ptr, 0, grow_len, 0);
}
return 0;
}
/*
* obj_realloc_common -- (internal) common routine for resizing
* existing objects
*/
static int
obj_realloc_common(PMEMobjpool *pop,
PMEMoid *oidp, size_t size, type_num_t type_num, int zero_init)
{
/* if OID is NULL just allocate memory */
if (OBJ_OID_IS_NULL(*oidp)) {
/* if size is 0 - do nothing */
if (size == 0)
return 0;
return obj_alloc_construct(pop, oidp, size, type_num,
POBJ_FLAG_ZERO, NULL, NULL);
}
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return -1;
}
/* if size is 0 just free */
if (size == 0) {
obj_free(pop, oidp);
return 0;
}
struct carg_realloc carg;
carg.ptr = OBJ_OFF_TO_PTR(pop, oidp->off);
carg.new_size = size;
carg.old_size = pmemobj_alloc_usable_size(*oidp);
carg.user_type = type_num;
carg.constructor = NULL;
carg.arg = NULL;
carg.zero_init = zero_init;
struct operation_context *ctx = pmalloc_operation_hold(pop);
int ret = palloc_operation(&pop->heap, oidp->off, &oidp->off,
size, constructor_realloc, &carg, type_num,
0, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* constructor_zrealloc_root -- (internal) constructor for pmemobj_root
*/
static int
constructor_zrealloc_root(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
VALGRIND_ADD_TO_TX(ptr, usable_size);
struct carg_realloc *carg = arg;
constructor_realloc(pop, ptr, usable_size, arg);
int ret = 0;
if (carg->constructor)
ret = carg->constructor(pop, ptr, carg->arg);
VALGRIND_REMOVE_FROM_TX(ptr, usable_size);
return ret;
}
/*
* pmemobj_realloc -- resizes an existing object
*/
int
pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num)
{
ASSERTne(oidp, NULL);
LOG(3, "pop %p oid.off 0x%016" PRIx64 " size %zu type_num %" PRIu64,
pop, oidp->off, size, type_num);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, *oidp));
int ret = obj_realloc_common(pop, oidp, size, (type_num_t)type_num, 0);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_zrealloc -- resizes an existing object, any new space is zeroed.
*/
int
pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num)
{
ASSERTne(oidp, NULL);
LOG(3, "pop %p oid.off 0x%016" PRIx64 " size %zu type_num %" PRIu64,
pop, oidp->off, size, type_num);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, *oidp));
int ret = obj_realloc_common(pop, oidp, size, (type_num_t)type_num, 1);
PMEMOBJ_API_END();
return ret;
}
/* arguments for constructor_strdup */
struct carg_strdup {
size_t size;
const char *s;
};
/*
* constructor_strdup -- (internal) constructor of pmemobj_strdup
*/
static int
constructor_strdup(PMEMobjpool *pop, void *ptr, void *arg)
{
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct carg_strdup *carg = arg;
/* copy string */
pmemops_memcpy(&pop->p_ops, ptr, carg->s, carg->size, 0);
return 0;
}
/*
* pmemobj_strdup -- allocates a new object with duplicate of the string s.
*/
int
pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s,
uint64_t type_num)
{
LOG(3, "pop %p oidp %p string %s type_num %" PRIu64,
pop, oidp, s, type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (NULL == s) {
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
struct carg_strdup carg;
carg.size = (strlen(s) + 1) * sizeof(char);
carg.s = s;
int ret = obj_alloc_construct(pop, oidp, carg.size,
(type_num_t)type_num, 0, constructor_strdup, &carg);
PMEMOBJ_API_END();
return ret;
}
/* arguments for constructor_wcsdup */
struct carg_wcsdup {
size_t size;
const wchar_t *s;
};
/*
* constructor_wcsdup -- (internal) constructor of pmemobj_wcsdup
*/
static int
constructor_wcsdup(PMEMobjpool *pop, void *ptr, void *arg)
{
LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct carg_wcsdup *carg = arg;
/* copy string */
pmemops_memcpy(&pop->p_ops, ptr, carg->s, carg->size, 0);
return 0;
}
/*
* pmemobj_wcsdup -- allocates a new object with duplicate of the wide character
* string s.
*/
int
pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s,
uint64_t type_num)
{
LOG(3, "pop %p oidp %p string %S type_num %" PRIu64,
pop, oidp, s, type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (NULL == s) {
errno = EINVAL;
return -1;
}
PMEMOBJ_API_START();
struct carg_wcsdup carg;
carg.size = (wcslen(s) + 1) * sizeof(wchar_t);
carg.s = s;
int ret = obj_alloc_construct(pop, oidp, carg.size,
(type_num_t)type_num, 0, constructor_wcsdup, &carg);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_free -- frees an existing object
*/
void
pmemobj_free(PMEMoid *oidp)
{
ASSERTne(oidp, NULL);
LOG(3, "oid.off 0x%016" PRIx64, oidp->off);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
if (oidp->off == 0)
return;
PMEMOBJ_API_START();
PMEMobjpool *pop = pmemobj_pool_by_oid(*oidp);
ASSERTne(pop, NULL);
ASSERT(OBJ_OID_IS_VALID(pop, *oidp));
obj_free(pop, oidp);
PMEMOBJ_API_END();
}
/*
* pmemobj_alloc_usable_size -- returns usable size of object
*/
size_t
pmemobj_alloc_usable_size(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
if (oid.off == 0)
return 0;
PMEMobjpool *pop = pmemobj_pool_by_oid(oid);
ASSERTne(pop, NULL);
ASSERT(OBJ_OID_IS_VALID(pop, oid));
return (palloc_usable_size(&pop->heap, oid.off));
}
/*
* pmemobj_memcpy_persist -- pmemobj version of memcpy
*/
void *
pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src,
size_t len)
{
LOG(15, "pop %p dest %p src %p len %zu", pop, dest, src, len);
PMEMOBJ_API_START();
void *ptr = pmemops_memcpy(&pop->p_ops, dest, src, len, 0);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memset_persist -- pmemobj version of memset
*/
void *
pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len)
{
LOG(15, "pop %p dest %p c 0x%02x len %zu", pop, dest, c, len);
PMEMOBJ_API_START();
void *ptr = pmemops_memset(&pop->p_ops, dest, c, len, 0);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memcpy -- pmemobj version of memcpy
*/
void *
pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
PMEMOBJ_API_START();
void *ptr = pmemops_memcpy(&pop->p_ops, dest, src, len, flags);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memmove -- pmemobj version of memmove
*/
void *
pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pop %p dest %p src %p len %zu flags 0x%x", pop, dest, src, len,
flags);
PMEMOBJ_API_START();
void *ptr = pmemops_memmove(&pop->p_ops, dest, src, len, flags);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_memset -- pmemobj version of memset
*/
void *
pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len, unsigned flags)
{
LOG(15, "pop %p dest %p c 0x%02x len %zu flags 0x%x", pop, dest, c, len,
flags);
PMEMOBJ_API_START();
void *ptr = pmemops_memset(&pop->p_ops, dest, c, len, flags);
PMEMOBJ_API_END();
return ptr;
}
/*
* pmemobj_persist -- pmemobj version of pmem_persist
*/
void
pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pmemops_persist(&pop->p_ops, addr, len);
}
/*
* pmemobj_flush -- pmemobj version of pmem_flush
*/
void
pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
pmemops_flush(&pop->p_ops, addr, len);
}
/*
* pmemobj_xpersist -- pmemobj version of pmem_persist with additional flags
* argument
*/
int
pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
if (flags & ~OBJ_X_VALID_FLAGS) {
errno = EINVAL;
ERR("invalid flags 0x%x", flags);
return -1;
}
return pmemops_xpersist(&pop->p_ops, addr, len, flags);
}
/*
* pmemobj_xflush -- pmemobj version of pmem_flush with additional flags
* argument
*/
int
pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags)
{
LOG(15, "pop %p addr %p len %zu", pop, addr, len);
if (flags & ~OBJ_X_VALID_FLAGS) {
errno = EINVAL;
ERR("invalid flags 0x%x", flags);
return -1;
}
return pmemops_xflush(&pop->p_ops, addr, len, flags);
}
/*
* pmemobj_drain -- pmemobj version of pmem_drain
*/
void
pmemobj_drain(PMEMobjpool *pop)
{
LOG(15, "pop %p", pop);
pmemops_drain(&pop->p_ops);
}
/*
* pmemobj_type_num -- returns type number of object
*/
uint64_t
pmemobj_type_num(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
ASSERT(!OID_IS_NULL(oid));
PMEMobjpool *pop = pmemobj_pool_by_oid(oid);
ASSERTne(pop, NULL);
ASSERT(OBJ_OID_IS_VALID(pop, oid));
return palloc_extra(&pop->heap, oid.off);
}
/* arguments for constructor_alloc_root */
struct carg_root {
size_t size;
pmemobj_constr constructor;
void *arg;
};
/*
* obj_realloc_root -- (internal) reallocate root object
*/
static int
obj_alloc_root(PMEMobjpool *pop, size_t size,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p size %zu", pop, size);
struct carg_realloc carg;
carg.ptr = OBJ_OFF_TO_PTR(pop, pop->root_offset);
carg.old_size = pop->root_size;
carg.new_size = size;
carg.user_type = POBJ_ROOT_TYPE_NUM;
carg.constructor = constructor;
carg.zero_init = 1;
carg.arg = arg;
struct operation_context *ctx = pmalloc_operation_hold(pop);
operation_add_entry(ctx, &pop->root_size, size, ULOG_OPERATION_SET);
int ret = palloc_operation(&pop->heap, pop->root_offset,
&pop->root_offset, size,
constructor_zrealloc_root, &carg,
POBJ_ROOT_TYPE_NUM, OBJ_INTERNAL_OBJECT_MASK,
0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmemobj_root_size -- returns size of the root object
*/
size_t
pmemobj_root_size(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
if (pop->root_offset && pop->root_size) {
return pop->root_size;
} else
return 0;
}
/*
* pmemobj_root_construct -- returns root object
*/
PMEMoid
pmemobj_root_construct(PMEMobjpool *pop, size_t size,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p size %zu constructor %p args %p", pop, size, constructor,
arg);
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return OID_NULL;
}
if (size == 0 && pop->root_offset == 0) {
ERR("requested size cannot equals zero");
errno = EINVAL;
return OID_NULL;
}
PMEMOBJ_API_START();
PMEMoid root;
pmemobj_mutex_lock_nofail(pop, &pop->rootlock);
if (size > pop->root_size &&
obj_alloc_root(pop, size, constructor, arg)) {
pmemobj_mutex_unlock_nofail(pop, &pop->rootlock);
LOG(2, "obj_realloc_root failed");
PMEMOBJ_API_END();
return OID_NULL;
}
root.pool_uuid_lo = pop->uuid_lo;
root.off = pop->root_offset;
pmemobj_mutex_unlock_nofail(pop, &pop->rootlock);
PMEMOBJ_API_END();
return root;
}
/*
* pmemobj_root -- returns root object
*/
PMEMoid
pmemobj_root(PMEMobjpool *pop, size_t size)
{
LOG(3, "pop %p size %zu", pop, size);
PMEMOBJ_API_START();
PMEMoid oid = pmemobj_root_construct(pop, size, NULL, NULL);
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_first - returns first object of specified type
*/
PMEMoid
pmemobj_first(PMEMobjpool *pop)
{
LOG(3, "pop %p", pop);
PMEMoid ret = {0, 0};
uint64_t off = palloc_first(&pop->heap);
if (off != 0) {
ret.off = off;
ret.pool_uuid_lo = pop->uuid_lo;
if (palloc_flags(&pop->heap, off) & OBJ_INTERNAL_OBJECT_MASK) {
return pmemobj_next(ret);
}
}
return ret;
}
/*
* pmemobj_next - returns next object of specified type
*/
PMEMoid
pmemobj_next(PMEMoid oid)
{
LOG(3, "oid.off 0x%016" PRIx64, oid.off);
PMEMoid curr = oid;
if (curr.off == 0)
return OID_NULL;
PMEMobjpool *pop = pmemobj_pool_by_oid(curr);
ASSERTne(pop, NULL);
do {
ASSERT(OBJ_OID_IS_VALID(pop, curr));
uint64_t next_off = palloc_next(&pop->heap, curr.off);
if (next_off == 0)
return OID_NULL;
/* next object exists */
curr.off = next_off;
} while (palloc_flags(&pop->heap, curr.off) & OBJ_INTERNAL_OBJECT_MASK);
return curr;
}
/*
* pmemobj_reserve -- reserves a single object
*/
PMEMoid
pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num)
{
LOG(3, "pop %p act %p size %zu type_num %llx",
pop, act, size,
(unsigned long long)type_num);
PMEMOBJ_API_START();
PMEMoid oid = OID_NULL;
if (palloc_reserve(&pop->heap, size, NULL, NULL, type_num,
0, 0, 0, act) != 0) {
PMEMOBJ_API_END();
return oid;
}
oid.off = act->heap.offset;
oid.pool_uuid_lo = pop->uuid_lo;
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_xreserve -- reserves a single object
*/
PMEMoid
pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num, uint64_t flags)
{
LOG(3, "pop %p act %p size %zu type_num %llx flags %llx",
pop, act, size,
(unsigned long long)type_num, (unsigned long long)flags);
PMEMoid oid = OID_NULL;
if (flags & ~POBJ_ACTION_XRESERVE_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_ACTION_XRESERVE_VALID_FLAGS);
errno = EINVAL;
return oid;
}
PMEMOBJ_API_START();
struct constr_args carg;
carg.zero_init = flags & POBJ_FLAG_ZERO;
carg.constructor = NULL;
carg.arg = NULL;
if (palloc_reserve(&pop->heap, size, constructor_alloc, &carg,
type_num, 0, CLASS_ID_FROM_FLAG(flags),
ARENA_ID_FROM_FLAG(flags), act) != 0) {
PMEMOBJ_API_END();
return oid;
}
oid.off = act->heap.offset;
oid.pool_uuid_lo = pop->uuid_lo;
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_set_value -- creates an action to set a value
*/
void
pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act,
uint64_t *ptr, uint64_t value)
{
palloc_set_value(&pop->heap, act, ptr, value);
}
/*
* pmemobj_defer_free -- creates a deferred free action
*/
void
pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act)
{
ASSERT(!OID_IS_NULL(oid));
palloc_defer_free(&pop->heap, oid.off, act);
}
/*
* pmemobj_publish -- publishes a collection of actions
*/
int
pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt)
{
PMEMOBJ_API_START();
struct operation_context *ctx = pmalloc_operation_hold(pop);
size_t entries_size = actvcnt * sizeof(struct ulog_entry_val);
if (operation_reserve(ctx, entries_size) != 0) {
PMEMOBJ_API_END();
return -1;
}
palloc_publish(&pop->heap, actv, actvcnt, ctx);
pmalloc_operation_release(pop);
PMEMOBJ_API_END();
return 0;
}
/*
* pmemobj_cancel -- cancels collection of actions
*/
void
pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt)
{
PMEMOBJ_API_START();
palloc_cancel(&pop->heap, actv, actvcnt);
PMEMOBJ_API_END();
}
/*
* pmemobj_defrag -- reallocates provided PMEMoids so that the underlying memory
* is efficiently arranged.
*/
int
pmemobj_defrag(PMEMobjpool *pop, PMEMoid **oidv, size_t oidcnt,
struct pobj_defrag_result *result)
{
PMEMOBJ_API_START();
if (result) {
result->relocated = 0;
result->total = 0;
}
uint64_t **objv = Malloc(sizeof(uint64_t *) * oidcnt);
if (objv == NULL)
return -1;
int ret = 0;
size_t j = 0;
for (size_t i = 0; i < oidcnt; ++i) {
if (OID_IS_NULL(*oidv[i]))
continue;
if (oidv[i]->pool_uuid_lo != pop->uuid_lo) {
ret = -1;
ERR("Not all PMEMoids belong to the provided pool");
goto out;
}
objv[j++] = &oidv[i]->off;
}
struct operation_context *ctx = pmalloc_operation_hold(pop);
ret = palloc_defrag(&pop->heap, objv, j, ctx, result);
pmalloc_operation_release(pop);
out:
Free(objv);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_list_insert -- adds object to a list
*/
int
pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, "pop %p pe_offset %zu head %p dest.off 0x%016" PRIx64
" before %d oid.off 0x%016" PRIx64,
pop, pe_offset, head, dest.off, before, oid.off);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, oid));
ASSERT(OBJ_OID_IS_VALID(pop, dest));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
int ret = list_insert(pop, (ssize_t)pe_offset, head, dest, before, oid);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_list_insert_new -- adds new object to a list
*/
PMEMoid
pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid dest, int before, size_t size,
uint64_t type_num,
pmemobj_constr constructor, void *arg)
{
LOG(3, "pop %p pe_offset %zu head %p dest.off 0x%016" PRIx64
" before %d size %zu type_num %" PRIu64,
pop, pe_offset, head, dest.off, before, size, type_num);
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, dest));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
ASSERT(pe_offset <= size - sizeof(struct list_entry));
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
errno = ENOMEM;
return OID_NULL;
}
PMEMOBJ_API_START();
struct constr_args carg;
carg.constructor = constructor;
carg.arg = arg;
carg.zero_init = 0;
PMEMoid retoid = OID_NULL;
list_insert_new_user(pop, pe_offset, head, dest, before, size, type_num,
constructor_alloc, &carg, &retoid);
PMEMOBJ_API_END();
return retoid;
}
/*
* pmemobj_list_remove -- removes object from a list
*/
int
pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid oid, int free)
{
LOG(3, "pop %p pe_offset %zu head %p oid.off 0x%016" PRIx64 " free %d",
pop, pe_offset, head, oid.off, free);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, oid));
ASSERT(pe_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
int ret;
if (free)
ret = list_remove_free_user(pop, pe_offset, head, &oid);
else
ret = list_remove(pop, (ssize_t)pe_offset, head, oid);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_list_move -- moves object between lists
*/
int
pmemobj_list_move(PMEMobjpool *pop, size_t pe_old_offset, void *head_old,
size_t pe_new_offset, void *head_new,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, "pop %p pe_old_offset %zu pe_new_offset %zu"
" head_old %p head_new %p dest.off 0x%016" PRIx64
" before %d oid.off 0x%016" PRIx64 "",
pop, pe_old_offset, pe_new_offset,
head_old, head_new, dest.off, before, oid.off);
PMEMOBJ_API_START();
/* log notice message if used inside a transaction */
_POBJ_DEBUG_NOTICE_IN_TX();
ASSERT(OBJ_OID_IS_VALID(pop, oid));
ASSERT(OBJ_OID_IS_VALID(pop, dest));
ASSERT(pe_old_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
ASSERT(pe_new_offset <= pmemobj_alloc_usable_size(oid)
- sizeof(struct list_entry));
ASSERT(pe_old_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
ASSERT(pe_new_offset <= pmemobj_alloc_usable_size(dest)
- sizeof(struct list_entry));
int ret = list_move(pop, pe_old_offset, head_old,
pe_new_offset, head_new,
dest, before, oid);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_ctl_getU(PMEMobjpool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop == NULL ? NULL : pop->ctl, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemobj_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_ctl_setU(PMEMobjpool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop == NULL ? NULL : pop->ctl, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemobj_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemobj_ctl_execU(PMEMobjpool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop == NULL ? NULL : pop->ctl, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemobj_ctl_get -- programmatically executes a read ctl query
*/
int
pmemobj_ctl_get(PMEMobjpool *pop, const char *name, void *arg)
{
return pmemobj_ctl_getU(pop, name, arg);
}
/*
* pmemobj_ctl_set -- programmatically executes a write ctl query
*/
int
pmemobj_ctl_set(PMEMobjpool *pop, const char *name, void *arg)
{
PMEMOBJ_API_START();
int ret = pmemobj_ctl_setU(pop, name, arg);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemobj_ctl_exec(PMEMobjpool *pop, const char *name, void *arg)
{
PMEMOBJ_API_START();
int ret = pmemobj_ctl_execU(pop, name, arg);
PMEMOBJ_API_END();
return ret;
}
#else
/*
* pmemobj_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemobj_ctl_getW(PMEMobjpool *pop, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemobj_ctl_getU(pop, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemobj_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemobj_ctl_setW(PMEMobjpool *pop, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemobj_ctl_setU(pop, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemobj_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemobj_ctl_execW(PMEMobjpool *pop, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemobj_ctl_execU(pop, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
/*
* _pobj_debug_notice -- logs notice message if used inside a transaction
*/
void
_pobj_debug_notice(const char *api_name, const char *file, int line)
{
#ifdef DEBUG
if (pmemobj_tx_stage() != TX_STAGE_NONE) {
if (file)
LOG(4, "Notice: non-transactional API"
" used inside a transaction (%s in %s:%d)",
api_name, file, line);
else
LOG(4, "Notice: non-transactional API"
" used inside a transaction (%s)", api_name);
}
#endif /* DEBUG */
}
#if VG_PMEMCHECK_ENABLED
/*
* pobj_emit_log -- logs library and function names to pmemcheck store log
*/
void
pobj_emit_log(const char *func, int order)
{
util_emit_log("libpmemobj", func, order);
}
#endif
#if FAULT_INJECTION
void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmemobj_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 79,397 | 21.460537 | 117 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/list.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* list.h -- internal definitions for persistent atomic lists module
*/
#ifndef LIBPMEMOBJ_LIST_H
#define LIBPMEMOBJ_LIST_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "lane.h"
#include "pmalloc.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
struct list_entry {
PMEMoid pe_next;
PMEMoid pe_prev;
};
struct list_head {
PMEMoid pe_first;
PMEMmutex lock;
};
int list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, palloc_constr constructor, void *arg,
PMEMoid *oidp);
int list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before,
PMEMoid oid);
int list_remove_free_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head,
PMEMoid *oidp);
int list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid);
int list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid);
void list_move_oob(PMEMobjpool *pop,
struct list_head *head_old, struct list_head *head_new,
PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif
| 1,376 | 20.184615 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/memops.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memops.c -- aggregated memory operations helper implementation
*
* The operation collects all of the required memory modifications that
* need to happen in an atomic way (all of them or none), and abstracts
* away the storage type (transient/persistent) and the underlying
* implementation of how it's actually performed - in some cases using
* the redo log is unnecessary and the allocation process can be sped up
* a bit by completely omitting that whole machinery.
*
* The modifications are not visible until the context is processed.
*/
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "ravl.h"
#include "valgrind_internal.h"
#include "vecq.h"
#include "sys_util.h"
#include <x86intrin.h>
#define ULOG_BASE_SIZE 1024
#define OP_MERGE_SEARCH 64
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
enum operation_state {
OPERATION_IDLE,
OPERATION_IN_PROGRESS,
OPERATION_CLEANUP,
};
struct operation_log {
size_t capacity; /* capacity of the ulog log */
size_t offset; /* data offset inside of the log */
struct ulog *ulog; /* DRAM allocated log of modifications */
};
/*
* operation_context -- context of an ongoing palloc operation
*/
struct operation_context {
enum log_type type;
ulog_extend_fn extend; /* function to allocate next ulog */
ulog_free_fn ulog_free; /* function to free next ulogs */
const struct pmem_ops *p_ops;
struct pmem_ops t_ops; /* used for transient data processing */
struct pmem_ops s_ops; /* used for shadow copy data processing */
size_t ulog_curr_offset; /* offset in the log for buffer stores */
size_t ulog_curr_capacity; /* capacity of the current log */
size_t ulog_curr_gen_num; /* transaction counter in the current log */
struct ulog *ulog_curr; /* current persistent log */
size_t total_logged; /* total amount of buffer stores in the logs */
struct ulog *ulog; /* pointer to the persistent ulog log */
size_t ulog_base_nbytes; /* available bytes in initial ulog log */
size_t ulog_capacity; /* sum of capacity, incl all next ulog logs */
int ulog_auto_reserve; /* allow or do not to auto ulog reservation */
int ulog_any_user_buffer; /* set if any user buffer is added */
struct ulog_next next; /* vector of 'next' fields of persistent ulog */
enum operation_state state; /* operation sanity check */
struct operation_log pshadow_ops; /* shadow copy of persistent ulog */
struct operation_log transient_ops; /* log of transient changes */
/* collection used to look for potential merge candidates */
VECQ(, struct ulog_entry_val *) merge_entries;
};
/*
* operation_log_transient_init -- (internal) initialize operation log
* containing transient memory resident changes
*/
static int
operation_log_transient_init(struct operation_log *log)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ULOG_BASE_SIZE;
log->ulog = src;
return 0;
}
/*
* operation_log_persistent_init -- (internal) initialize operation log
* containing persistent memory resident changes
*/
static int
operation_log_persistent_init(struct operation_log *log,
size_t ulog_base_nbytes)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ulog_base_nbytes;
memset(src->unused, 0, sizeof(src->unused));
log->ulog = src;
return 0;
}
/*
* operation_transient_clean -- cleans pmemcheck address state
*/
static int
operation_transient_clean(void *base, const void *addr, size_t len,
unsigned flags)
{
VALGRIND_SET_CLEAN(addr, len);
return 0;
}
/*
* operation_transient_drain -- noop
*/
static void
operation_transient_drain(void *base)
{
}
/*
* operation_transient_memcpy -- transient memcpy wrapper
*/
static void *
operation_transient_memcpy(void *base, void *dest, const void *src, size_t len,
unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* operation_new -- creates new operation context
*/
struct operation_context *
operation_new(struct ulog *ulog, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type)
{
struct operation_context *ctx = Zalloc(sizeof(*ctx));
if (ctx == NULL) {
ERR("!Zalloc");
goto error_ctx_alloc;
}
ctx->ulog = ulog;
ctx->ulog_base_nbytes = ulog_base_nbytes;
ctx->ulog_capacity = ulog_capacity(ulog,
ulog_base_nbytes, p_ops);
ctx->extend = extend;
ctx->ulog_free = ulog_free;
ctx->state = OPERATION_IDLE;
VEC_INIT(&ctx->next);
ulog_rebuild_next_vec(ulog, &ctx->next, p_ops);
ctx->p_ops = p_ops;
ctx->type = type;
ctx->ulog_any_user_buffer = 0;
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr = NULL;
ctx->t_ops.base = NULL;
ctx->t_ops.flush = operation_transient_clean;
ctx->t_ops.memcpy = operation_transient_memcpy;
ctx->t_ops.drain = operation_transient_drain;
ctx->s_ops.base = p_ops->base;
ctx->s_ops.flush = operation_transient_clean;
ctx->s_ops.memcpy = operation_transient_memcpy;
ctx->s_ops.drain = operation_transient_drain;
VECQ_INIT(&ctx->merge_entries);
if (operation_log_transient_init(&ctx->transient_ops) != 0)
goto error_ulog_alloc;
if (operation_log_persistent_init(&ctx->pshadow_ops,
ulog_base_nbytes) != 0)
goto error_ulog_alloc;
return ctx;
error_ulog_alloc:
operation_delete(ctx);
error_ctx_alloc:
return NULL;
}
/*
* operation_delete -- deletes operation context
*/
void
operation_delete(struct operation_context *ctx)
{
VECQ_DELETE(&ctx->merge_entries);
VEC_DELETE(&ctx->next);
Free(ctx->pshadow_ops.ulog);
Free(ctx->transient_ops.ulog);
Free(ctx);
}
/*
* operation_user_buffer_remove -- removes range from the tree and returns 0
*/
static int
operation_user_buffer_remove(void *base, void *addr)
{
PMEMobjpool *pop = base;
if (!pop->ulog_user_buffers.verify)
return 0;
util_mutex_lock(&pop->ulog_user_buffers.lock);
struct ravl *ravl = pop->ulog_user_buffers.map;
enum ravl_predicate predict = RAVL_PREDICATE_EQUAL;
struct user_buffer_def range;
range.addr = addr;
range.size = 0;
struct ravl_node *n = ravl_find(ravl, &range, predict);
ASSERTne(n, NULL);
ravl_remove(ravl, n);
util_mutex_unlock(&pop->ulog_user_buffers.lock);
return 0;
}
/*
* operation_free_logs -- free all logs except first
*/
void
operation_free_logs(struct operation_context *ctx, uint64_t flags)
{
int freed = ulog_free_next(ctx->ulog, ctx->p_ops, ctx->ulog_free,
operation_user_buffer_remove, flags);
if (freed) {
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
}
ASSERTeq(VEC_SIZE(&ctx->next), 0);
}
/*
* operation_merge -- (internal) performs operation on a field
*/
static inline void
operation_merge(struct ulog_entry_base *entry, uint64_t value,
ulog_operation_type type)
{
struct ulog_entry_val *e = (struct ulog_entry_val *)entry;
switch (type) {
case ULOG_OPERATION_AND:
e->value &= value;
break;
case ULOG_OPERATION_OR:
e->value |= value;
break;
case ULOG_OPERATION_SET:
e->value = value;
break;
default:
ASSERT(0); /* unreachable */
}
}
/*
* operation_try_merge_entry -- tries to merge the incoming log entry with
* existing entries
*
* Because this requires a reverse foreach, it cannot be implemented using
* the on-media ulog log structure since there's no way to find what's
* the previous entry in the log. Instead, the last N entries are stored
* in a collection and traversed backwards.
*/
static int
operation_try_merge_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type)
{
int ret = 0;
uint64_t offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, ptr);
struct ulog_entry_val *e;
VECQ_FOREACH_REVERSE(e, &ctx->merge_entries) {
if (ulog_entry_offset(&e->base) == offset) {
if (ulog_entry_type(&e->base) == type) {
operation_merge(&e->base, value, type);
return 1;
} else {
break;
}
}
}
return ret;
}
/*
* operation_merge_entry_add -- adds a new entry to the merge collection,
* keeps capacity at OP_MERGE_SEARCH. Removes old entries in FIFO fashion.
*/
static void
operation_merge_entry_add(struct operation_context *ctx,
struct ulog_entry_val *entry)
{
if (VECQ_SIZE(&ctx->merge_entries) == OP_MERGE_SEARCH)
(void) VECQ_DEQUEUE(&ctx->merge_entries);
if (VECQ_ENQUEUE(&ctx->merge_entries, entry) != 0) {
/* this is fine, only runtime perf will get slower */
LOG(2, "out of memory - unable to track entries");
}
}
/*
* operation_add_typed_value -- adds new entry to the current operation, if the
* same ptr address already exists and the operation type is set,
* the new value is not added and the function has no effect.
*/
int
operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type)
{
struct operation_log *oplog = log_type == LOG_PERSISTENT ?
&ctx->pshadow_ops : &ctx->transient_ops;
/*
* Always make sure to have one extra spare cacheline so that the
* ulog log entry creation has enough room for zeroing.
*/
if (oplog->offset + CACHELINE_SIZE == oplog->capacity) {
size_t ncapacity = oplog->capacity + ULOG_BASE_SIZE;
struct ulog *ulog = Realloc(oplog->ulog,
SIZEOF_ULOG(ncapacity));
if (ulog == NULL)
return -1;
oplog->capacity += ULOG_BASE_SIZE;
oplog->ulog = ulog;
oplog->ulog->capacity = oplog->capacity;
/*
* Realloc invalidated the ulog entries that are inside of this
* vector, need to clear it to avoid use after free.
*/
VECQ_CLEAR(&ctx->merge_entries);
}
if (log_type == LOG_PERSISTENT &&
operation_try_merge_entry(ctx, ptr, value, type) != 0)
return 0;
struct ulog_entry_val *entry = ulog_entry_val_create(
oplog->ulog, oplog->offset, ptr, value, type,
log_type == LOG_TRANSIENT ? &ctx->t_ops : &ctx->s_ops);
if (log_type == LOG_PERSISTENT)
operation_merge_entry_add(ctx, entry);
oplog->offset += ulog_entry_size(&entry->base);
return 0;
}
/*
* operation_add_value -- adds new entry to the current operation with
* entry type autodetected based on the memory location
*/
int
operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value,
ulog_operation_type type)
{
const struct pmem_ops *p_ops = ctx->p_ops;
PMEMobjpool *pop = (PMEMobjpool *)p_ops->base;
int from_pool = OBJ_OFF_IS_VALID(pop,
(uintptr_t)ptr - (uintptr_t)p_ops->base);
return operation_add_typed_entry(ctx, ptr, value, type,
from_pool ? LOG_PERSISTENT : LOG_TRANSIENT);
}
/*
* operation_add_buffer -- adds a buffer operation to the log
*/
int
operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type)
{
size_t real_size = size + sizeof(struct ulog_entry_buf);
/* if there's no space left in the log, reserve some more */
if (ctx->ulog_curr_capacity == 0) {
ctx->ulog_curr_gen_num = ctx->ulog->gen_num;
if (operation_reserve(ctx, ctx->total_logged + real_size) != 0)
return -1;
ctx->ulog_curr = ctx->ulog_curr == NULL ? ctx->ulog :
ulog_next(ctx->ulog_curr, ctx->p_ops);
ASSERTne(ctx->ulog_curr, NULL);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = ctx->ulog_curr->capacity;
}
size_t curr_size = MIN(real_size, ctx->ulog_curr_capacity);
size_t data_size = curr_size - sizeof(struct ulog_entry_buf);
size_t entry_size = ALIGN_UP(curr_size, CACHELINE_SIZE);
/*
* To make sure that the log is consistent and contiguous, we need
* make sure that the header of the entry that would be located
* immediately after this one is zeroed.
*/
struct ulog_entry_base *next_entry = NULL;
if (entry_size == ctx->ulog_curr_capacity) {
struct ulog *u = ulog_next(ctx->ulog_curr, ctx->p_ops);
if (u != NULL)
next_entry = (struct ulog_entry_base *)u->data;
} else {
size_t next_entry_offset = ctx->ulog_curr_offset + entry_size;
next_entry = (struct ulog_entry_base *)(ctx->ulog_curr->data +
next_entry_offset);
}
#ifdef USE_NDP_CLOBBER
int clear_next_header = 0;
if (next_entry != NULL){
clear_next_header = 1;
}
#else
if (next_entry != NULL){
ulog_clobber_entry(next_entry, ctx->p_ops);
}
#endif
#ifdef GET_NDP_BREAKDOWN
uint64_t startCycles = getCycle();
#endif
//ulogcount++;
#ifdef USE_NDP_CLOBBER
ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops,
clear_next_header);
#else
ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops);
#endif
#ifdef GET_NDP_BREAKDOWN
uint64_t endCycles = getCycle();
ulogCycles += endCycles - startCycles;
#endif
/* create a persistent log entry */
/* struct ulog_entry_buf *e = ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops);
*/
// ASSERT(entry_size == ulog_entry_size(&e->base));
// ASSERT(entry_size <= ctx->ulog_curr_capacity);
ctx->total_logged += entry_size;
ctx->ulog_curr_offset += entry_size;
ctx->ulog_curr_capacity -= entry_size;
/*
* Recursively add the data to the log until the entire buffer is
* processed.
*/
return size - data_size == 0 ? 0 : operation_add_buffer(ctx,
(char *)dest + data_size,
(char *)src + data_size,
size - data_size, type);
}
/*
* operation_user_buffer_range_cmp -- compares addresses of
* user buffers
*/
int
operation_user_buffer_range_cmp(const void *lhs, const void *rhs)
{
const struct user_buffer_def *l = lhs;
const struct user_buffer_def *r = rhs;
if (l->addr > r->addr)
return 1;
else if (l->addr < r->addr)
return -1;
return 0;
}
/*
* operation_user_buffer_try_insert -- adds a user buffer range to the tree,
* if the buffer already exists in the tree function returns -1, otherwise
* it returns 0
*/
static int
operation_user_buffer_try_insert(PMEMobjpool *pop,
struct user_buffer_def *userbuf)
{
int ret = 0;
if (!pop->ulog_user_buffers.verify)
return ret;
util_mutex_lock(&pop->ulog_user_buffers.lock);
void *addr_end = (char *)userbuf->addr + userbuf->size;
struct user_buffer_def search;
search.addr = addr_end;
struct ravl_node *n = ravl_find(pop->ulog_user_buffers.map,
&search, RAVL_PREDICATE_LESS_EQUAL);
if (n != NULL) {
struct user_buffer_def *r = ravl_data(n);
void *r_end = (char *)r->addr + r->size;
if (r_end > userbuf->addr && r->addr < addr_end) {
/* what was found overlaps with what is being added */
ret = -1;
goto out;
}
}
if (ravl_emplace_copy(pop->ulog_user_buffers.map, userbuf) == -1) {
ASSERTne(errno, EEXIST);
ret = -1;
}
out:
util_mutex_unlock(&pop->ulog_user_buffers.lock);
return ret;
}
/*
* operation_user_buffer_verify_align -- verify if the provided buffer can be
* used as a transaction log, and if so - perform necessary alignments
*/
int
operation_user_buffer_verify_align(struct operation_context *ctx,
struct user_buffer_def *userbuf)
{
/*
* Address of the buffer has to be aligned up, and the size
* has to be aligned down, taking into account the number of bytes
* the address was incremented by. The remaining size has to be large
* enough to contain the header and at least one ulog entry.
*/
uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base,
userbuf->addr);
ptrdiff_t size_diff = (intptr_t)ulog_by_offset(buffer_offset,
ctx->p_ops) - (intptr_t)userbuf->addr;
ssize_t capacity_unaligned = (ssize_t)userbuf->size - size_diff
- (ssize_t)sizeof(struct ulog);
if (capacity_unaligned < (ssize_t)CACHELINE_SIZE) {
ERR("Capacity insufficient");
return -1;
}
size_t capacity_aligned = ALIGN_DOWN((size_t)capacity_unaligned,
CACHELINE_SIZE);
userbuf->addr = ulog_by_offset(buffer_offset, ctx->p_ops);
userbuf->size = capacity_aligned + sizeof(struct ulog);
if (operation_user_buffer_try_insert(ctx->p_ops->base, userbuf)) {
ERR("Buffer currently used");
return -1;
}
return 0;
}
/*
* operation_add_user_buffer -- add user buffer to the ulog
*/
void
operation_add_user_buffer(struct operation_context *ctx,
struct user_buffer_def *userbuf)
{
uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base,
userbuf->addr);
size_t capacity = userbuf->size - sizeof(struct ulog);
ulog_construct(buffer_offset, capacity, ctx->ulog->gen_num,
1, ULOG_USER_OWNED, ctx->p_ops);
struct ulog *last_log;
/* if there is only one log */
if (!VEC_SIZE(&ctx->next))
last_log = ctx->ulog;
else /* get last element from vector */
last_log = ulog_by_offset(VEC_BACK(&ctx->next), ctx->p_ops);
ASSERTne(last_log, NULL);
size_t next_size = sizeof(last_log->next);
VALGRIND_ADD_TO_TX(&last_log->next, next_size);
last_log->next = buffer_offset;
pmemops_persist(ctx->p_ops, &last_log->next, next_size);
VEC_PUSH_BACK(&ctx->next, buffer_offset);
ctx->ulog_capacity += capacity;
operation_set_any_user_buffer(ctx, 1);
}
/*
* operation_set_auto_reserve -- set auto reserve value for context
*/
void
operation_set_auto_reserve(struct operation_context *ctx, int auto_reserve)
{
ctx->ulog_auto_reserve = auto_reserve;
}
/*
* operation_set_any_user_buffer -- set ulog_any_user_buffer value for context
*/
void
operation_set_any_user_buffer(struct operation_context *ctx,
int any_user_buffer)
{
ctx->ulog_any_user_buffer = any_user_buffer;
}
/*
* operation_get_any_user_buffer -- get ulog_any_user_buffer value from context
*/
int
operation_get_any_user_buffer(struct operation_context *ctx)
{
return ctx->ulog_any_user_buffer;
}
/*
* operation_process_persistent_redo -- (internal) process using ulog
*/
static void
operation_process_persistent_redo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_store(ctx->ulog, ctx->pshadow_ops.ulog,
ctx->pshadow_ops.offset, ctx->ulog_base_nbytes,
ctx->ulog_capacity,
&ctx->next, ctx->p_ops);
#ifdef USE_NDP_REDO
if(!use_ndp_redo){
#endif
ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
//ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX,
// ctx->p_ops);
#ifdef USE_NDP_REDO
}
else {
//ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
// ctx->p_ops);
//while(1){}
ulog_process_ndp(ctx->ulog, ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
//while(1){}
}
#endif
// while(((*((uint32_t*)(ctx->p_ops->device)+254)) & 2) != 2){
//asm volatile ("clflush (%0)" :: "r"((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting %x %x\n",*((uint32_t*)(tx->pop->p_ops.device)+11),*((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting!!\n");
// }
ulog_clobber(ctx->ulog, &ctx->next, ctx->p_ops);
}
/*
* operation_process_persistent_undo -- (internal) process using ulog
*/
static void
operation_process_persistent_undo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops);
}
/*
* operation_reserve -- (internal) reserves new capacity in persistent ulog log
*/
int
operation_reserve(struct operation_context *ctx, size_t new_capacity)
{
if (new_capacity > ctx->ulog_capacity) {
if (ctx->extend == NULL) {
ERR("no extend function present");
return -1;
}
if (ulog_reserve(ctx->ulog,
ctx->ulog_base_nbytes,
ctx->ulog_curr_gen_num,
ctx->ulog_auto_reserve,
&new_capacity, ctx->extend,
&ctx->next, ctx->p_ops) != 0)
return -1;
ctx->ulog_capacity = new_capacity;
}
return 0;
}
/*
* operation_init -- initializes runtime state of an operation
*/
void
operation_init(struct operation_context *ctx)
{
struct operation_log *plog = &ctx->pshadow_ops;
struct operation_log *tlog = &ctx->transient_ops;
VALGRIND_ANNOTATE_NEW_MEMORY(ctx, sizeof(*ctx));
VALGRIND_ANNOTATE_NEW_MEMORY(tlog->ulog, sizeof(struct ulog) +
tlog->capacity);
VALGRIND_ANNOTATE_NEW_MEMORY(plog->ulog, sizeof(struct ulog) +
plog->capacity);
tlog->offset = 0;
plog->offset = 0;
VECQ_REINIT(&ctx->merge_entries);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr_gen_num = 0;
ctx->ulog_curr = NULL;
ctx->total_logged = 0;
ctx->ulog_auto_reserve = 1;
ctx->ulog_any_user_buffer = 0;
}
/*
* operation_start -- initializes and starts a new operation
*/
void
operation_start(struct operation_context *ctx)
{
operation_init(ctx);
ASSERTeq(ctx->state, OPERATION_IDLE);
ctx->state = OPERATION_IN_PROGRESS;
}
void
operation_resume(struct operation_context *ctx)
{
operation_start(ctx);
ctx->total_logged = ulog_base_nbytes(ctx->ulog);
}
/*
* operation_cancel -- cancels a running operation
*/
void
operation_cancel(struct operation_context *ctx)
{
ASSERTeq(ctx->state, OPERATION_IN_PROGRESS);
ctx->state = OPERATION_IDLE;
}
/*
* operation_process -- processes registered operations
*
* The order of processing is important: persistent, transient.
* This is because the transient entries that reside on persistent memory might
* require write to a location that is currently occupied by a valid persistent
* state but becomes a transient state after operation is processed.
*/
void
operation_process(struct operation_context *ctx)
{
/*
* If there's exactly one persistent entry there's no need to involve
* the redo log. We can simply assign the value, the operation will be
* atomic.
*/
int redo_process = ctx->type == LOG_TYPE_REDO &&
ctx->pshadow_ops.offset != 0;
if (redo_process &&
ctx->pshadow_ops.offset == sizeof(struct ulog_entry_val)) {
struct ulog_entry_base *e = (struct ulog_entry_base *)
ctx->pshadow_ops.ulog->data;
ulog_operation_type t = ulog_entry_type(e);
if (t == ULOG_OPERATION_SET || t == ULOG_OPERATION_AND ||
t == ULOG_OPERATION_OR) {
ulog_entry_apply(e, 1, ctx->p_ops); //could not be effectiv ein ndp
redo_process = 0;
}
}
if (redo_process) {
operation_process_persistent_redo(ctx); //ndp
ctx->state = OPERATION_CLEANUP;
} else if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0) {
operation_process_persistent_undo(ctx);
ctx->state = OPERATION_CLEANUP;
}
/* process transient entries with transient memory ops */
if (ctx->transient_ops.offset != 0)
ulog_process(ctx->transient_ops.ulog, NULL, &ctx->t_ops); //where is this used?
}
/*
* operation_finish -- finalizes the operation
*/
void
operation_finish(struct operation_context *ctx, unsigned flags)
{
ASSERTne(ctx->state, OPERATION_IDLE);
if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0)
ctx->state = OPERATION_CLEANUP;
if (ctx->ulog_any_user_buffer) {
flags |= ULOG_ANY_USER_BUFFER;
ctx->state = OPERATION_CLEANUP;
}
if (ctx->state != OPERATION_CLEANUP)
goto out;
if (ctx->type == LOG_TYPE_UNDO) {
int ret = ulog_clobber_data(ctx->ulog,
ctx->total_logged, ctx->ulog_base_nbytes,
&ctx->next, ctx->ulog_free,
operation_user_buffer_remove,
ctx->p_ops, flags);
if (ret == 0)
goto out;
} else if (ctx->type == LOG_TYPE_REDO) {
int ret = ulog_free_next(ctx->ulog, ctx->p_ops,
ctx->ulog_free, operation_user_buffer_remove,
flags);
if (ret == 0)
goto out;
}
/* clobbering shrunk the ulog */
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
out:
ctx->state = OPERATION_IDLE;
}
| 24,116 | 25.589857 | 113 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/stats.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* stats.c -- implementation of statistics
*/
#include "obj.h"
#include "stats.h"
STATS_CTL_HANDLER(persistent, curr_allocated, heap_curr_allocated);
STATS_CTL_HANDLER(transient, run_allocated, heap_run_allocated);
STATS_CTL_HANDLER(transient, run_active, heap_run_active);
static const struct ctl_node CTL_NODE(heap)[] = {
STATS_CTL_LEAF(persistent, curr_allocated),
STATS_CTL_LEAF(transient, run_allocated),
STATS_CTL_LEAF(transient, run_active),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(enabled) -- returns whether or not statistics are enabled
*/
static int
CTL_READ_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
enum pobj_stats_enabled *arg_out = arg;
*arg_out = pop->stats->enabled;
return 0;
}
/*
* stats_enabled_parser -- parses the stats enabled type
*/
static int
stats_enabled_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_stats_enabled *enabled = dest;
ASSERTeq(dest_size, sizeof(enum pobj_stats_enabled));
int bool_out;
if (ctl_arg_boolean(arg, &bool_out, sizeof(bool_out)) == 0) {
*enabled = bool_out ?
POBJ_STATS_ENABLED_BOTH : POBJ_STATS_DISABLED;
return 0;
}
if (strcmp(vstr, "disabled") == 0) {
*enabled = POBJ_STATS_DISABLED;
} else if (strcmp(vstr, "both") == 0) {
*enabled = POBJ_STATS_ENABLED_BOTH;
} else if (strcmp(vstr, "persistent") == 0) {
*enabled = POBJ_STATS_ENABLED_PERSISTENT;
} else if (strcmp(vstr, "transient") == 0) {
*enabled = POBJ_STATS_ENABLED_TRANSIENT;
} else {
ERR("invalid enable type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_WRITE_HANDLER(enabled) -- enables or disables statistics counting
*/
static int
CTL_WRITE_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
pop->stats->enabled = *(enum pobj_stats_enabled *)arg;
return 0;
}
static const struct ctl_argument CTL_ARG(enabled) = {
.dest_size = sizeof(enum pobj_stats_enabled),
.parsers = {
CTL_ARG_PARSER(sizeof(enum pobj_stats_enabled),
stats_enabled_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(stats)[] = {
CTL_CHILD(heap),
CTL_LEAF_RW(enabled),
CTL_NODE_END
};
/*
* stats_new -- allocates and initializes statistics instance
*/
struct stats *
stats_new(PMEMobjpool *pop)
{
struct stats *s = Malloc(sizeof(*s));
if (s == NULL) {
ERR("!Malloc");
return NULL;
}
s->enabled = POBJ_STATS_ENABLED_TRANSIENT;
s->persistent = &pop->stats_persistent;
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(s->persistent, sizeof(*s->persistent));
s->transient = Zalloc(sizeof(struct stats_transient));
if (s->transient == NULL)
goto error_transient_alloc;
return s;
error_transient_alloc:
Free(s);
return NULL;
}
/*
* stats_delete -- deletes statistics instance
*/
void
stats_delete(PMEMobjpool *pop, struct stats *s)
{
pmemops_persist(&pop->p_ops, s->persistent,
sizeof(struct stats_persistent));
Free(s->transient);
Free(s);
}
/*
* stats_ctl_register -- registers ctl nodes for statistics
*/
void
stats_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, stats);
}
| 3,293 | 20.671053 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/ctl_debug.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* ctl_debug.h -- definitions for the debug CTL namespace
*/
#ifndef LIBPMEMOBJ_CTL_DEBUG_H
#define LIBPMEMOBJ_CTL_DEBUG_H 1
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
void debug_ctl_register(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CTL_DEBUG_H */
| 386 | 15.826087 | 57 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/heap.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* heap.h -- internal definitions for heap
*/
#ifndef LIBPMEMOBJ_HEAP_H
#define LIBPMEMOBJ_HEAP_H 1
#include <stddef.h>
#include <stdint.h>
#include "bucket.h"
#include "memblock.h"
#include "memops.h"
#include "palloc.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_OFF_TO_PTR(heap, off) ((void *)((char *)((heap)->base) + (off)))
#define HEAP_PTR_TO_OFF(heap, ptr)\
((uintptr_t)(ptr) - (uintptr_t)((heap)->base))
#define BIT_IS_CLR(a, i) (!((a) & (1ULL << (i))))
#define HEAP_ARENA_PER_THREAD (0)
int heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void heap_cleanup(struct palloc_heap *heap);
int heap_check(void *heap_start, uint64_t heap_size);
int heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
int heap_buckets_init(struct palloc_heap *heap);
int heap_create_alloc_class_buckets(struct palloc_heap *heap,
struct alloc_class *c);
int heap_extend(struct palloc_heap *heap, struct bucket *defb, size_t size);
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size);
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, uint8_t class_id,
uint16_t arena_id);
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b);
int heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m);
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m);
os_mutex_t *heap_get_run_lock(struct palloc_heap *heap,
uint32_t chunk_id);
void
heap_force_recycle(struct palloc_heap *heap);
void
heap_discard_run(struct palloc_heap *heap, struct memory_block *m);
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m);
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket, struct memory_block *m);
void heap_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block start);
struct alloc_class_collection *heap_alloc_classes(struct palloc_heap *heap);
void *heap_end(struct palloc_heap *heap);
unsigned heap_get_narenas_total(struct palloc_heap *heap);
unsigned heap_get_narenas_max(struct palloc_heap *heap);
int heap_set_narenas_max(struct palloc_heap *heap, unsigned size);
unsigned heap_get_narenas_auto(struct palloc_heap *heap);
unsigned heap_get_thread_arena_id(struct palloc_heap *heap);
int heap_arena_create(struct palloc_heap *heap);
struct bucket **
heap_get_arena_buckets(struct palloc_heap *heap, unsigned arena_id);
int heap_get_arena_auto(struct palloc_heap *heap, unsigned arena_id);
int heap_set_arena_auto(struct palloc_heap *heap, unsigned arena_id,
int automatic);
void heap_set_arena_thread(struct palloc_heap *heap, unsigned arena_id);
void heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects);
static inline struct chunk_header *
heap_get_chunk_hdr(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_HDR(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk *
heap_get_chunk(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk_run *
heap_get_chunk_run(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_RUN(heap->layout, m->zone_id, m->chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 3,719 | 26.969925 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/list.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* list.c -- implementation of persistent atomic lists module
*/
#include <inttypes.h>
#include "list.h"
#include "obj.h"
#include "os_thread.h"
#include "out.h"
#include "sync.h"
#include "valgrind_internal.h"
#include "memops.h"
#define PREV_OFF (offsetof(struct list_entry, pe_prev) + offsetof(PMEMoid, off))
#define NEXT_OFF (offsetof(struct list_entry, pe_next) + offsetof(PMEMoid, off))
/*
* list_args_common -- common arguments for operations on list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* entry_ptr - list entry structure of element
*/
struct list_args_common {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_entry *entry_ptr;
};
/*
* list_args_insert -- arguments for inserting element to list
*
* head - list head
* dest - destination element OID
* dest_entry_ptr - list entry of destination element
* before - insert before or after destination element
*/
struct list_args_insert {
struct list_head *head;
PMEMoid dest;
struct list_entry *dest_entry_ptr;
int before;
};
/*
* list_args_reinsert -- arguments for reinserting element on list
*
* head - list head
* entry_ptr - list entry of old element
* obj_doffset - offset to element's data relative to pmemobj pool
*/
struct list_args_reinsert {
struct list_head *head;
struct list_entry *entry_ptr;
uint64_t obj_doffset;
};
/*
* list_args_remove -- arguments for removing element from list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* head - list head
* entry_ptr - list entry structure of element
*/
struct list_args_remove {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_head *head;
struct list_entry *entry_ptr;
};
/*
* list_mutexes_lock -- (internal) grab one or two locks in ascending
* address order
*/
static inline int
list_mutexes_lock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2)
return pmemobj_mutex_lock(pop, &head1->lock);
PMEMmutex *lock1;
PMEMmutex *lock2;
if ((uintptr_t)&head1->lock < (uintptr_t)&head2->lock) {
lock1 = &head1->lock;
lock2 = &head2->lock;
} else {
lock1 = &head2->lock;
lock2 = &head1->lock;
}
int ret;
if ((ret = pmemobj_mutex_lock(pop, lock1)))
goto err;
if ((ret = pmemobj_mutex_lock(pop, lock2)))
goto err_unlock;
return 0;
err_unlock:
pmemobj_mutex_unlock(pop, lock1);
err:
return ret;
}
/*
* list_mutexes_unlock -- (internal) release one or two locks
*/
static inline void
list_mutexes_unlock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2) {
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
return;
}
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
pmemobj_mutex_unlock_nofail(pop, &head2->lock);
}
/*
* list_get_dest -- (internal) return destination object ID
*
* If the input dest is not OID_NULL returns dest.
* If the input dest is OID_NULL and before is set returns first element.
* If the input dest is OID_NULL and before is no set returns last element.
*/
static inline PMEMoid
list_get_dest(PMEMobjpool *pop, struct list_head *head, PMEMoid dest,
ssize_t pe_offset, int before)
{
if (dest.off)
return dest;
if (head->pe_first.off == 0 || !!before == POBJ_LIST_DEST_HEAD)
return head->pe_first;
struct list_entry *first_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)head->pe_first.off + pe_offset));
return first_ptr->pe_prev;
}
/*
* list_set_oid_redo_log -- (internal) set PMEMoid value using redo log
*/
static size_t
list_set_oid_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
PMEMoid *oidp, uint64_t obj_doffset, int oidp_inited)
{
ASSERT(OBJ_PTR_IS_VALID(pop, oidp));
if (!oidp_inited || oidp->pool_uuid_lo != pop->uuid_lo) {
if (oidp_inited)
ASSERTeq(oidp->pool_uuid_lo, 0);
operation_add_entry(ctx, &oidp->pool_uuid_lo, pop->uuid_lo,
ULOG_OPERATION_SET);
}
operation_add_entry(ctx, &oidp->off, obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_update_head -- (internal) update pe_first entry in list head
*/
static size_t
list_update_head(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_head *head, uint64_t first_offset)
{
LOG(15, NULL);
operation_add_entry(ctx, &head->pe_first.off, first_offset,
ULOG_OPERATION_SET);
if (head->pe_first.pool_uuid_lo == 0) {
operation_add_entry(ctx, &head->pe_first.pool_uuid_lo,
pop->uuid_lo, ULOG_OPERATION_SET);
}
return 0;
}
/*
* u64_add_offset -- (internal) add signed offset to unsigned integer and check
* for overflows
*/
static void
u64_add_offset(uint64_t *value, ssize_t off)
{
uint64_t prev = *value;
if (off >= 0) {
*value += (size_t)off;
ASSERT(*value >= prev); /* detect overflow */
} else {
*value -= (size_t)-off;
ASSERT(*value < prev);
}
}
/*
* list_fill_entry_persist -- (internal) fill new entry using persist function
*
* Used for newly allocated objects.
*/
static void
list_fill_entry_persist(PMEMobjpool *pop, struct list_entry *entry_ptr,
uint64_t next_offset, uint64_t prev_offset)
{
LOG(15, NULL);
VALGRIND_ADD_TO_TX(entry_ptr, sizeof(*entry_ptr));
entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_next.off = next_offset;
entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_prev.off = prev_offset;
VALGRIND_REMOVE_FROM_TX(entry_ptr, sizeof(*entry_ptr));
pmemops_persist(&pop->p_ops, entry_ptr, sizeof(*entry_ptr));
}
/*
* list_fill_entry_redo_log -- (internal) fill new entry using redo log
*
* Used to update entry in existing object.
*/
static size_t
list_fill_entry_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_common *args,
uint64_t next_offset, uint64_t prev_offset, int set_uuid)
{
LOG(15, NULL);
struct pmem_ops *ops = &pop->p_ops;
ASSERTne(args->entry_ptr, NULL);
ASSERTne(args->obj_doffset, 0);
if (set_uuid) {
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
/* don't need to fill pool uuid using redo log */
args->entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
args->entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
pmemops_persist(ops, args->entry_ptr, sizeof(*args->entry_ptr));
} else {
ASSERTeq(args->entry_ptr->pe_next.pool_uuid_lo, pop->uuid_lo);
ASSERTeq(args->entry_ptr->pe_prev.pool_uuid_lo, pop->uuid_lo);
}
/* set current->next and current->prev using redo log */
uint64_t next_off_off = args->obj_doffset + NEXT_OFF;
uint64_t prev_off_off = args->obj_doffset + PREV_OFF;
u64_add_offset(&next_off_off, args->pe_offset);
u64_add_offset(&prev_off_off, args->pe_offset);
void *next_ptr = (char *)pop + next_off_off;
void *prev_ptr = (char *)pop + prev_off_off;
operation_add_entry(ctx, next_ptr, next_offset, ULOG_OPERATION_SET);
operation_add_entry(ctx, prev_ptr, prev_offset, ULOG_OPERATION_SET);
return 0;
}
/*
* list_remove_single -- (internal) remove element from single list
*/
static size_t
list_remove_single(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_remove *args)
{
LOG(15, NULL);
if (args->entry_ptr->pe_next.off == args->obj_doffset) {
/* only one element on list */
ASSERTeq(args->head->pe_first.off, args->obj_doffset);
ASSERTeq(args->entry_ptr->pe_prev.off, args->obj_doffset);
return list_update_head(pop, ctx, args->head, 0);
} else {
/* set next->prev = prev and prev->next = next */
uint64_t next_off = args->entry_ptr->pe_next.off;
uint64_t next_prev_off = next_off + PREV_OFF;
u64_add_offset(&next_prev_off, args->pe_offset);
uint64_t prev_off = args->entry_ptr->pe_prev.off;
uint64_t prev_next_off = prev_off + NEXT_OFF;
u64_add_offset(&prev_next_off, args->pe_offset);
void *prev_ptr = (char *)pop + next_prev_off;
void *next_ptr = (char *)pop + prev_next_off;
operation_add_entry(ctx, prev_ptr, prev_off,
ULOG_OPERATION_SET);
operation_add_entry(ctx, next_ptr, next_off,
ULOG_OPERATION_SET);
if (args->head->pe_first.off == args->obj_doffset) {
/* removing element is the first one */
return list_update_head(pop, ctx,
args->head, next_off);
} else {
return 0;
}
}
}
/*
* list_insert_before -- (internal) insert element at offset before an element
*/
static size_t
list_insert_before(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest and current->prev = dest->prev */
*next_offset = args->dest.off;
*prev_offset = args->dest_entry_ptr->pe_prev.off;
/* dest->prev = current and dest->prev->next = current */
uint64_t dest_prev_off = args->dest.off + PREV_OFF;
u64_add_offset(&dest_prev_off, args_common->pe_offset);
uint64_t dest_prev_next_off = args->dest_entry_ptr->pe_prev.off +
NEXT_OFF;
u64_add_offset(&dest_prev_next_off, args_common->pe_offset);
void *dest_prev_ptr = (char *)pop + dest_prev_off;
void *dest_prev_next_ptr = (char *)pop + dest_prev_next_off;
operation_add_entry(ctx, dest_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_prev_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_after -- (internal) insert element at offset after an element
*/
static size_t
list_insert_after(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest->next and current->prev = dest */
*next_offset = args->dest_entry_ptr->pe_next.off;
*prev_offset = args->dest.off;
/* dest->next = current and dest->next->prev = current */
uint64_t dest_next_off = args->dest.off + NEXT_OFF;
u64_add_offset(&dest_next_off, args_common->pe_offset);
uint64_t dest_next_prev_off = args->dest_entry_ptr->pe_next.off +
PREV_OFF;
u64_add_offset(&dest_next_prev_off, args_common->pe_offset);
void *dest_next_ptr = (char *)pop + dest_next_off;
void *dest_next_prev_ptr = (char *)pop + dest_next_prev_off;
operation_add_entry(ctx, dest_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_next_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_user -- (internal) insert element at offset to a user list
*/
static size_t
list_insert_user(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
if (args->dest.off == 0) {
/* inserting the first element on list */
ASSERTeq(args->head->pe_first.off, 0);
/* set loop on current element */
*next_offset = args_common->obj_doffset;
*prev_offset = args_common->obj_doffset;
/* update head */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
} else {
if (args->before) {
/* inserting before dest */
list_insert_before(pop, ctx, args, args_common,
next_offset, prev_offset);
if (args->dest.off == args->head->pe_first.off) {
/* current element at first position */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
}
} else {
/* inserting after dest */
list_insert_after(pop, ctx, args, args_common,
next_offset, prev_offset);
}
}
return 0;
}
/*
* list_insert_new -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, must be locked if not NULL
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
static int
list_insert_new(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
int ret;
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct pobj_action reserved;
if (palloc_reserve(&pop->heap, size, constructor, arg,
type_num, 0, 0, 0, &reserved) != 0) {
ERR("!palloc_reserve");
ret = -1;
goto err_pmalloc;
}
uint64_t obj_doffset = reserved.heap.offset;
struct operation_context *ctx = lane->external;
operation_start(ctx);
ASSERT((ssize_t)pe_offset >= 0);
dest = list_get_dest(pop, user_head, dest,
(ssize_t)pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset);
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = user_head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = obj_doffset,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop,
ctx, &args, &args_common,
&next_offset, &prev_offset);
/* don't need to use redo log for filling new element */
list_fill_entry_persist(pop, entry_ptr,
next_offset, prev_offset);
if (oidp != NULL) {
if (OBJ_PTR_IS_VALID(pop, oidp)) {
list_set_oid_redo_log(pop, ctx,
oidp, obj_doffset, 0);
} else {
oidp->off = obj_doffset;
oidp->pool_uuid_lo = pop->uuid_lo;
}
}
palloc_publish(&pop->heap, &reserved, 1, ctx);
ret = 0;
err_pmalloc:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert_new_user -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
int
list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
ret = list_insert_new(pop, pe_offset, user_head,
dest, before, size, type_num, constructor, arg, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert -- insert object to a single list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid dest, int before,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
struct lane *lane;
lane_hold(pop, &lane);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head, dest, pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)oid.off + pe_offset));
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)dest.off + pe_offset));
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop, ctx,
&args, &args_common, &next_offset, &prev_offset);
/* fill entry of existing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, 1);
operation_process(ctx);
operation_finish(ctx, 0);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_remove_free -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, *must* be locked if not NULL
* oidp - pointer to target object ID
*/
static void
list_remove_free(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct pobj_action deferred;
palloc_defer_free(&pop->heap, oidp->off, &deferred);
uint64_t obj_doffset = oidp->off;
ASSERT((ssize_t)pe_offset >= 0);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = user_head,
.entry_ptr = entry_ptr,
.obj_doffset = obj_doffset
};
/* remove from user list */
list_remove_single(pop, ctx, &args);
/* clear the oid */
if (OBJ_PTR_IS_VALID(pop, oidp))
list_set_oid_redo_log(pop, ctx, oidp, 0, 1);
else
oidp->off = 0;
palloc_publish(&pop->heap, &deferred, 1, ctx);
lane_release(pop);
}
/*
* list_remove_free_user -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* oidp - pointer to target object ID
*/
int
list_remove_free_user(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
list_remove_free(pop, pe_offset, user_head, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
return 0;
}
/*
* list_remove -- remove object from list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* oid - target object ID
*/
int
list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + (size_t)pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = head,
.entry_ptr = entry_ptr,
.obj_doffset = oid.off,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
/* remove element from user list */
list_remove_single(pop, ctx, &args);
/* clear next and prev offsets in removing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, 0, 0, 0);
operation_process(ctx);
operation_finish(ctx, 0);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_move -- move object between two lists
*
* pop - pmemobj handle
* pe_offset_old - offset to old list entry relative to user data
* head_old - old list head
* pe_offset_new - offset to new list entry relative to user data
* head_new - new list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head_old, NULL);
ASSERTne(head_new, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
/*
* Grab locks in specified order to avoid dead-locks.
*
* XXX performance improvement: initialize oob locks at pool opening
*/
if ((ret = list_mutexes_lock(pop, head_new, head_old))) {
errno = ret;
LOG(2, "list_mutexes_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head_new, dest,
(ssize_t)pe_offset_new, before);
struct list_entry *entry_ptr_old =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_old);
struct list_entry *entry_ptr_new =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_new);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset_new);
if (head_old == head_new) {
/* moving within the same list */
if (dest.off == oid.off)
goto unlock;
if (before && dest_entry_ptr->pe_prev.off == oid.off) {
if (head_old->pe_first.off != dest.off)
goto unlock;
list_update_head(pop, ctx,
head_old, oid.off);
goto redo_last;
}
if (!before && dest_entry_ptr->pe_next.off == oid.off) {
if (head_old->pe_first.off != oid.off)
goto unlock;
list_update_head(pop, ctx,
head_old, entry_ptr_old->pe_next.off);
goto redo_last;
}
}
ASSERT((ssize_t)pe_offset_old >= 0);
struct list_args_remove args_remove = {
.pe_offset = (ssize_t)pe_offset_old,
.head = head_old,
.entry_ptr = entry_ptr_old,
.obj_doffset = oid.off,
};
struct list_args_insert args_insert = {
.head = head_new,
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.before = before,
};
ASSERT((ssize_t)pe_offset_new >= 0);
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr_new,
.pe_offset = (ssize_t)pe_offset_new,
};
uint64_t next_offset;
uint64_t prev_offset;
/* remove element from user list */
list_remove_single(pop, ctx, &args_remove);
/* insert element to user list */
list_insert_user(pop, ctx, &args_insert,
&args_common, &next_offset, &prev_offset);
/* offsets differ, move is between different list entries - set uuid */
int set_uuid = pe_offset_new != pe_offset_old ? 1 : 0;
/* fill next and prev offsets of moving element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, set_uuid);
redo_last:
unlock:
operation_process(ctx);
operation_finish(ctx, 0);
list_mutexes_unlock(pop, head_new, head_old);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
| 24,297 | 24.848936 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/palloc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* palloc.c -- implementation of pmalloc POSIX-like API
*
* This is the front-end part of the persistent memory allocator. It uses both
* transient and persistent representation of the heap to provide memory blocks
* in a reasonable time and with an acceptable common-case fragmentation.
*
* Lock ordering in the entirety of the allocator is simple, but might be hard
* to follow at times because locks are, by necessity, externalized.
* There are two sets of locks that need to be taken into account:
* - runtime state locks, represented by buckets.
* - persistent state locks, represented by memory block mutexes.
*
* To properly use them, follow these rules:
* - When nesting, always lock runtime state first.
* Doing the reverse might cause deadlocks in other parts of the code.
*
* - When introducing functions that would require runtime state locks,
* always try to move the lock acquiring to the upper most layer. This
* usually means that the functions will simply take "struct bucket" as
* their argument. By doing so most of the locking can happen in
* the frontend part of the allocator and it's easier to follow the first
* rule because all functions in the backend can safely use the persistent
* state locks - the runtime lock, if it is needed, will be already taken
* by the upper layer.
*
* General lock ordering:
* 1. arenas.lock
* 2. buckets (sorted by ID)
* 3. memory blocks (sorted by lock address)
*/
#include "valgrind_internal.h"
#include "heap_layout.h"
#include "heap.h"
#include "alloc_class.h"
#include "out.h"
#include "sys_util.h"
#include "palloc.h"
#include "ravl.h"
#include "vec.h"
struct pobj_action_internal {
/* type of operation (alloc/free vs set) */
enum pobj_action_type type;
/* not used */
uint32_t padding;
/*
* Action-specific lock that needs to be taken for the duration of
* an action.
*/
os_mutex_t *lock;
/* action-specific data */
union {
/* valid only when type == POBJ_ACTION_TYPE_HEAP */
struct {
uint64_t offset;
uint64_t usable_size;
enum memblock_state new_state;
struct memory_block m;
struct memory_block_reserved *mresv;
};
/* valid only when type == POBJ_ACTION_TYPE_MEM */
struct {
uint64_t *ptr;
uint64_t value;
};
/* padding, not used */
uint64_t data2[14];
};
};
/*
* palloc_set_value -- creates a new set memory action
*/
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value)
{
act->type = POBJ_ACTION_TYPE_MEM;
struct pobj_action_internal *actp = (struct pobj_action_internal *)act;
actp->ptr = ptr;
actp->value = value;
actp->lock = NULL;
}
/*
* alloc_prep_block -- (internal) prepares a memory block for allocation
*
* Once the block is fully reserved and it's guaranteed that no one else will
* be able to write to this memory region it is safe to write the allocation
* header and call the object construction function.
*
* Because the memory block at this stage is only reserved in transient state
* there's no need to worry about fail-safety of this method because in case
* of a crash the memory will be back in the free blocks collection.
*/
static int
alloc_prep_block(struct palloc_heap *heap, const struct memory_block *m,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
struct pobj_action_internal *out)
{
void *uptr = m->m_ops->get_user_data(m);
size_t usize = m->m_ops->get_user_size(m);
VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize);
VALGRIND_DO_MAKE_MEM_UNDEFINED(uptr, usize);
VALGRIND_ANNOTATE_NEW_MEMORY(uptr, usize);
m->m_ops->write_header(m, extra_field, object_flags);
/*
* Set allocated memory with pattern, if debug.heap.alloc_pattern CTL
* parameter had been set.
*/
if (unlikely(heap->alloc_pattern > PALLOC_CTL_DEBUG_NO_PATTERN)) {
pmemops_memset(&heap->p_ops, uptr, heap->alloc_pattern,
usize, 0);
VALGRIND_DO_MAKE_MEM_UNDEFINED(uptr, usize);
}
int ret;
if (constructor != NULL &&
(ret = constructor(heap->base, uptr, usize, arg)) != 0) {
/*
* If canceled, revert the block back to the free state in vg
* machinery.
*/
VALGRIND_DO_MEMPOOL_FREE(heap->layout, uptr);
return ret;
}
/*
* To avoid determining the user data pointer twice this method is also
* responsible for calculating the offset of the object in the pool that
* will be used to set the offset destination pointer provided by the
* caller.
*/
out->offset = HEAP_PTR_TO_OFF(heap, uptr);
out->usable_size = usize;
return 0;
}
/*
* palloc_reservation_create -- creates a volatile reservation of a
* memory block.
*
* The first step in the allocation of a new block is reserving it in
* the transient heap - which is represented by the bucket abstraction.
*
* To provide optimal scaling for multi-threaded applications and reduce
* fragmentation the appropriate bucket is chosen depending on the
* current thread context and to which allocation class the requested
* size falls into.
*
* Once the bucket is selected, just enough memory is reserved for the
* requested size. The underlying block allocation algorithm
* (best-fit, next-fit, ...) varies depending on the bucket container.
*/
static int
palloc_reservation_create(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct pobj_action_internal *out)
{
int err = 0;
struct memory_block *new_block = &out->m;
out->type = POBJ_ACTION_TYPE_HEAP;
ASSERT(class_id < UINT8_MAX);
struct alloc_class *c = class_id == 0 ?
heap_get_best_class(heap, size) :
alloc_class_by_id(heap_alloc_classes(heap),
(uint8_t)class_id);
if (c == NULL) {
ERR("no allocation class for size %lu bytes", size);
errno = EINVAL;
return -1;
}
/*
* The caller provided size in bytes, but buckets operate in
* 'size indexes' which are multiples of the block size in the
* bucket.
*
* For example, to allocate 500 bytes from a bucket that
* provides 256 byte blocks two memory 'units' are required.
*/
ssize_t size_idx = alloc_class_calc_size_idx(c, size);
if (size_idx < 0) {
ERR("allocation class not suitable for size %lu bytes",
size);
errno = EINVAL;
return -1;
}
ASSERT(size_idx <= UINT32_MAX);
*new_block = MEMORY_BLOCK_NONE;
new_block->size_idx = (uint32_t)size_idx;
struct bucket *b = heap_bucket_acquire(heap, c->id, arena_id);
err = heap_get_bestfit_block(heap, b, new_block);
if (err != 0)
goto out;
if (alloc_prep_block(heap, new_block, constructor, arg,
extra_field, object_flags, out) != 0) {
/*
* Constructor returned non-zero value which means
* the memory block reservation has to be rolled back.
*/
if (new_block->type == MEMORY_BLOCK_HUGE) {
bucket_insert_block(b, new_block);
}
err = ECANCELED;
goto out;
}
/*
* Each as of yet unfulfilled reservation needs to be tracked in the
* runtime state.
* The memory block cannot be put back into the global state unless
* there are no active reservations.
*/
if ((out->mresv = b->active_memory_block) != NULL)
util_fetch_and_add64(&out->mresv->nresv, 1);
out->lock = new_block->m_ops->get_lock(new_block);
out->new_state = MEMBLOCK_ALLOCATED;
out:
heap_bucket_release(heap, b);
if (err == 0)
return 0;
errno = err;
return -1;
}
/*
* palloc_heap_action_exec -- executes a single heap action (alloc, free)
*/
static void
palloc_heap_action_exec(struct palloc_heap *heap,
const struct pobj_action_internal *act,
struct operation_context *ctx)
{
#ifdef DEBUG
//printf("memblk unknown %d\n",MEMBLOCK_STATE_UNKNOWN);
//printf("memblk alloced %d\n",MEMBLOCK_ALLOCATED);
//printf("memblk free %d\n",MEMBLOCK_FREE);
//printf("memblk max %d\n",MAX_MEMBLOCK_STATE);
//printf("action pointer %p left %d right %d\n",&act->m, act->m.m_ops->get_state(&act->m),act->new_state);
if (act->m.m_ops->get_state(&act->m) == act->new_state) {
ERR("invalid operation or heap corruption");
ASSERT(0);
}
#endif /* DEBUG */
/*
* The actual required metadata modifications are chunk-type
* dependent, but it always is a modification of a single 8 byte
* value - either modification of few bits in a bitmap or
* changing a chunk type from free to used or vice versa.
*/
act->m.m_ops->prep_hdr(&act->m, act->new_state, ctx);
}
/*
* palloc_restore_free_chunk_state -- updates the runtime state of a free chunk.
*
* This function also takes care of coalescing of huge chunks.
*/
static void
palloc_restore_free_chunk_state(struct palloc_heap *heap,
struct memory_block *m)
{
if (m->type == MEMORY_BLOCK_HUGE) {
struct bucket *b = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
if (heap_free_chunk_reuse(heap, b, m) != 0) {
if (errno == EEXIST) {
FATAL(
"duplicate runtime chunk state, possible double free");
} else {
LOG(2, "unable to track runtime chunk state");
}
}
heap_bucket_release(heap, b);
}
}
/*
* palloc_mem_action_noop -- empty handler for unused memory action funcs
*/
static void
palloc_mem_action_noop(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
}
/*
* palloc_reservation_clear -- clears the reservation state of the block,
* discards the associated memory block if possible
*/
static void
palloc_reservation_clear(struct palloc_heap *heap,
struct pobj_action_internal *act, int publish)
{
if (act->mresv == NULL)
return;
struct memory_block_reserved *mresv = act->mresv;
struct bucket *b = mresv->bucket;
if (!publish) {
util_mutex_lock(&b->lock);
struct memory_block *am = &b->active_memory_block->m;
/*
* If a memory block used for the action is the currently active
* memory block of the bucket it can be inserted back to the
* bucket. This way it will be available for future allocation
* requests, improving performance.
*/
if (b->is_active &&
am->chunk_id == act->m.chunk_id &&
am->zone_id == act->m.zone_id) {
ASSERTeq(b->active_memory_block, mresv);
bucket_insert_block(b, &act->m);
}
util_mutex_unlock(&b->lock);
}
if (util_fetch_and_sub64(&mresv->nresv, 1) == 1) {
VALGRIND_ANNOTATE_HAPPENS_AFTER(&mresv->nresv);
/*
* If the memory block used for the action is not currently used
* in any bucket nor action it can be discarded (given back to
* the heap).
*/
heap_discard_run(heap, &mresv->m);
Free(mresv);
} else {
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&mresv->nresv);
}
}
/*
* palloc_heap_action_on_cancel -- restores the state of the heap
*/
static void
palloc_heap_action_on_cancel(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
if (act->new_state == MEMBLOCK_FREE)
return;
VALGRIND_DO_MEMPOOL_FREE(heap->layout,
act->m.m_ops->get_user_data(&act->m));
act->m.m_ops->invalidate(&act->m);
palloc_restore_free_chunk_state(heap, &act->m);
palloc_reservation_clear(heap, act, 0 /* publish */);
}
/*
* palloc_heap_action_on_process -- performs finalization steps under a lock
* on the persistent state
*/
static void
palloc_heap_action_on_process(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
if (act->new_state == MEMBLOCK_ALLOCATED) {
STATS_INC(heap->stats, persistent, heap_curr_allocated,
act->m.m_ops->get_real_size(&act->m));
if (act->m.type == MEMORY_BLOCK_RUN) {
STATS_INC(heap->stats, transient, heap_run_allocated,
act->m.m_ops->get_real_size(&act->m));
}
} else if (act->new_state == MEMBLOCK_FREE) {
if (On_memcheck) {
void *ptr = act->m.m_ops->get_user_data(&act->m);
VALGRIND_DO_MEMPOOL_FREE(heap->layout, ptr);
} else if (On_pmemcheck) {
/*
* The sync module, responsible for implementations of
* persistent memory resident volatile variables,
* de-registers the pmemcheck pmem mapping at the time
* of initialization. This is done so that usage of
* pmem locks is not reported as an error due to
* missing flushes/stores outside of transaction. But,
* after we freed an object, we need to reestablish
* the pmem mapping, otherwise pmemchek might miss bugs
* that occur in newly allocated memory locations, that
* once were occupied by a lock/volatile variable.
*/
void *ptr = act->m.m_ops->get_user_data(&act->m);
size_t size = act->m.m_ops->get_real_size(&act->m);
VALGRIND_REGISTER_PMEM_MAPPING(ptr, size);
}
STATS_SUB(heap->stats, persistent, heap_curr_allocated,
act->m.m_ops->get_real_size(&act->m));
if (act->m.type == MEMORY_BLOCK_RUN) {
STATS_SUB(heap->stats, transient, heap_run_allocated,
act->m.m_ops->get_real_size(&act->m));
}
heap_memblock_on_free(heap, &act->m);
}
}
/*
* palloc_heap_action_on_unlock -- performs finalization steps that need to be
* performed without a lock on persistent state
*/
static void
palloc_heap_action_on_unlock(struct palloc_heap *heap,
struct pobj_action_internal *act)
{
if (act->new_state == MEMBLOCK_ALLOCATED) {
palloc_reservation_clear(heap, act, 1 /* publish */);
} else if (act->new_state == MEMBLOCK_FREE) {
palloc_restore_free_chunk_state(heap, &act->m);
}
}
/*
* palloc_mem_action_exec -- executes a single memory action (set, and, or)
*/
static void
palloc_mem_action_exec(struct palloc_heap *heap,
const struct pobj_action_internal *act,
struct operation_context *ctx)
{
operation_add_entry(ctx, act->ptr, act->value, ULOG_OPERATION_SET);
}
static const struct {
/*
* Translate action into some number of operation_entry'ies.
*/
void (*exec)(struct palloc_heap *heap,
const struct pobj_action_internal *act,
struct operation_context *ctx);
/*
* Cancel any runtime state changes. Can be called only when action has
* not been translated to persistent operation yet.
*/
void (*on_cancel)(struct palloc_heap *heap,
struct pobj_action_internal *act);
/*
* Final steps after persistent state has been modified. Performed
* under action-specific lock.
*/
void (*on_process)(struct palloc_heap *heap,
struct pobj_action_internal *act);
/*
* Final steps after persistent state has been modified. Performed
* after action-specific lock has been dropped.
*/
void (*on_unlock)(struct palloc_heap *heap,
struct pobj_action_internal *act);
} action_funcs[POBJ_MAX_ACTION_TYPE] = {
[POBJ_ACTION_TYPE_HEAP] = {
.exec = palloc_heap_action_exec,
.on_cancel = palloc_heap_action_on_cancel,
.on_process = palloc_heap_action_on_process,
.on_unlock = palloc_heap_action_on_unlock,
},
[POBJ_ACTION_TYPE_MEM] = {
.exec = palloc_mem_action_exec,
.on_cancel = palloc_mem_action_noop,
.on_process = palloc_mem_action_noop,
.on_unlock = palloc_mem_action_noop,
}
};
/*
* palloc_action_compare -- compares two actions based on lock address
*/
static int
palloc_action_compare(const void *lhs, const void *rhs)
{
const struct pobj_action_internal *mlhs = lhs;
const struct pobj_action_internal *mrhs = rhs;
uintptr_t vlhs = (uintptr_t)(mlhs->lock);
uintptr_t vrhs = (uintptr_t)(mrhs->lock);
if (vlhs < vrhs)
return -1;
if (vlhs > vrhs)
return 1;
return 0;
}
/*
* palloc_exec_actions -- perform the provided free/alloc operations
*/
static void
palloc_exec_actions(struct palloc_heap *heap,
struct operation_context *ctx,
struct pobj_action_internal *actv,
size_t actvcnt)
{
/*
* The operations array is sorted so that proper lock ordering is
* ensured.
*/
if (actv) {
qsort(actv, actvcnt, sizeof(struct pobj_action_internal),
palloc_action_compare);
} else {
ASSERTeq(actvcnt, 0);
}
//asm volatile("invd" ::: "memory");
struct pobj_action_internal *act;
for (size_t i = 0; i < actvcnt; ++i) {
act = &actv[i];
/*
* This lock must be held for the duration between the creation
* of the allocation metadata updates in the operation context
* and the operation processing. This is because a different
* thread might operate on the same 8-byte value of the run
* bitmap and override allocation performed by this thread.
*/
if (i == 0 || act->lock != actv[i - 1].lock) {
if (act->lock)
util_mutex_lock(act->lock);
}
/* translate action to some number of operation_entry'ies */
action_funcs[act->type].exec(heap, act, ctx);
}
/* wait for all allocated object headers to be persistent */
pmemops_drain(&heap->p_ops);
/* perform all persistent memory operations */
operation_process(ctx);
for (size_t i = 0; i < actvcnt; ++i) {
act = &actv[i];
action_funcs[act->type].on_process(heap, act);
if (i == actvcnt - 1 || act->lock != actv[i + 1].lock) {
if (act->lock)
util_mutex_unlock(act->lock);
}
}
for (size_t i = 0; i < actvcnt; ++i) {
act = &actv[i];
action_funcs[act->type].on_unlock(heap, act);
}
operation_finish(ctx, 0);
}
/*
* palloc_reserve -- creates a single reservation
*/
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct pobj_action *act)
{
COMPILE_ERROR_ON(sizeof(struct pobj_action) !=
sizeof(struct pobj_action_internal));
return palloc_reservation_create(heap, size, constructor, arg,
extra_field, object_flags, class_id, arena_id,
(struct pobj_action_internal *)act);
}
/*
* palloc_defer_free -- creates an internal deferred free action
*/
static void
palloc_defer_free_create(struct palloc_heap *heap, uint64_t off,
struct pobj_action_internal *out)
{
COMPILE_ERROR_ON(sizeof(struct pobj_action) !=
sizeof(struct pobj_action_internal));
out->type = POBJ_ACTION_TYPE_HEAP;
out->offset = off;
out->m = memblock_from_offset(heap, off);
/*
* For the duration of free we may need to protect surrounding
* metadata from being modified.
*/
out->lock = out->m.m_ops->get_lock(&out->m);
out->mresv = NULL;
out->new_state = MEMBLOCK_FREE;
}
/*
* palloc_defer_free -- creates a deferred free action
*/
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act)
{
COMPILE_ERROR_ON(sizeof(struct pobj_action) !=
sizeof(struct pobj_action_internal));
palloc_defer_free_create(heap, off, (struct pobj_action_internal *)act);
}
/*
* palloc_cancel -- cancels all reservations in the array
*/
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt)
{
struct pobj_action_internal *act;
for (size_t i = 0; i < actvcnt; ++i) {
act = (struct pobj_action_internal *)&actv[i];
action_funcs[act->type].on_cancel(heap, act);
}
}
/*
* palloc_publish -- publishes all reservations in the array
*/
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx)
{
palloc_exec_actions(heap, ctx,
(struct pobj_action_internal *)actv, actvcnt);
}
/*
* palloc_operation -- persistent memory operation. Takes a NULL pointer
* or an existing memory block and modifies it to occupy, at least, 'size'
* number of bytes.
*
* The malloc, free and realloc routines are implemented in the context of this
* common operation which encompasses all of the functionality usually done
* separately in those methods.
*
* The first thing that needs to be done is determining which memory blocks
* will be affected by the operation - this varies depending on the whether the
* operation will need to modify or free an existing block and/or allocate
* a new one.
*
* Simplified allocation process flow is as follows:
* - reserve a new block in the transient heap
* - prepare the new block
* - create redo log of required modifications
* - chunk metadata
* - offset of the new object
* - commit and process the redo log
*
* And similarly, the deallocation process:
* - create redo log of required modifications
* - reverse the chunk metadata back to the 'free' state
* - set the destination of the object offset to zero
* - commit and process the redo log
* There's an important distinction in the deallocation process - it does not
* return the memory block to the transient container. That is done once no more
* memory is available.
*
* Reallocation is a combination of the above, with one additional step
* of copying the old content.
*/
int
palloc_operation(struct palloc_heap *heap,
uint64_t off, uint64_t *dest_off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct operation_context *ctx)
{
size_t user_size = 0;
size_t nops = 0;
struct pobj_action_internal ops[2];
struct pobj_action_internal *alloc = NULL;
struct pobj_action_internal *dealloc = NULL;
/*
* The offset of an existing block can be nonzero which means this
* operation is either free or a realloc - either way the offset of the
* object needs to be translated into memory block, which is a structure
* that all of the heap methods expect.
*/
if (off != 0) {
dealloc = &ops[nops++];
palloc_defer_free_create(heap, off, dealloc);
user_size = dealloc->m.m_ops->get_user_size(&dealloc->m);
if (user_size == size) {
operation_cancel(ctx);
return 0;
}
}
/* alloc or realloc */
if (size != 0) {
alloc = &ops[nops++];
if (palloc_reservation_create(heap, size, constructor, arg,
extra_field, object_flags,
class_id, arena_id, alloc) != 0) {
operation_cancel(ctx);
return -1;
}
}
/* realloc */
if (alloc != NULL && dealloc != NULL) {
/* copy data to newly allocated memory */
size_t old_size = user_size;
size_t to_cpy = old_size > size ? size : old_size;
VALGRIND_ADD_TO_TX(
HEAP_OFF_TO_PTR(heap, alloc->offset),
to_cpy);
pmemops_memcpy(&heap->p_ops,
HEAP_OFF_TO_PTR(heap, alloc->offset),
HEAP_OFF_TO_PTR(heap, off),
to_cpy,
0);
VALGRIND_REMOVE_FROM_TX(
HEAP_OFF_TO_PTR(heap, alloc->offset),
to_cpy);
}
/*
* If the caller provided a destination value to update, it needs to be
* modified atomically alongside the heap metadata, and so the operation
* context must be used.
*/
if (dest_off) {
operation_add_entry(ctx, dest_off,
alloc ? alloc->offset : 0, ULOG_OPERATION_SET);
}
/* and now actually perform the requested operation! */
palloc_exec_actions(heap, ctx, ops, nops);
return 0;
}
/*
* palloc_offset_compare -- (internal) comparator for sorting by the offset of
* an object.
*/
static int
palloc_offset_compare(const void *lhs, const void *rhs)
{
const uint64_t * const * mlhs = lhs;
const uint64_t * const * mrhs = rhs;
uintptr_t vlhs = **mlhs;
uintptr_t vrhs = **mrhs;
if (vlhs < vrhs)
return 1;
if (vlhs > vrhs)
return -1;
return 0;
}
struct palloc_defrag_entry {
uint64_t **offsetp;
};
/*
* palloc_pointer_compare -- (internal) comparator for sorting by the
* pointer of an offset in the tree.
*/
static int
palloc_pointer_compare(const void *lhs, const void *rhs)
{
const struct palloc_defrag_entry *mlhs = lhs;
const struct palloc_defrag_entry *mrhs = rhs;
uintptr_t vlhs = (uintptr_t)*mlhs->offsetp;
uintptr_t vrhs = (uintptr_t)*mrhs->offsetp;
if (vlhs > vrhs)
return 1;
if (vlhs < vrhs)
return -1;
return 0;
}
VEC(pobj_actions, struct pobj_action);
/*
* pobj_actions_add -- add a new action to the end of the vector and return
* its slot. Vector must be able to hold the new value. Reallocation is
* forbidden.
*/
static struct pobj_action *
pobj_actions_add(struct pobj_actions *actv)
{
/*
* This shouldn't happen unless there's a bug in the calculation
* of the maximum number of actions.
*/
if (VEC_SIZE(actv) == VEC_CAPACITY(actv))
abort();
actv->size++;
return &VEC_BACK(actv);
}
/*
* palloc_defrag -- forces recycling of all available memory, and reallocates
* provided objects so that they have the lowest possible address.
*/
int
palloc_defrag(struct palloc_heap *heap, uint64_t **objv, size_t objcnt,
struct operation_context *ctx, struct pobj_defrag_result *result)
{
int ret = -1;
/*
* Offsets pointers need to be sorted by the offset of the object in
* descending order. This gives us two things, a) the defragmentation
* process is more likely to move objects to a lower offset, improving
* locality and tentatively enabling the heap to shrink, and b) pointers
* to the same object are next to each other in the array, so it's easy
* to reallocate the object once and simply update all remaining
* pointers.
*/
qsort(objv, objcnt, sizeof(uint64_t *), palloc_offset_compare);
/*
* We also need to store pointers to objects in a tree, so that it's
* possible to update pointers to other objects on the provided list
* that reside in the objects that were already reallocated or
* will be reallocated later on in the process.
*/
struct ravl *objvp = ravl_new_sized(palloc_pointer_compare,
sizeof(struct palloc_defrag_entry));
if (objvp == NULL)
goto err_ravl;
/*
* We need to calculate how many pointers to the same object we will
* need to update during defrag. This will be used to calculate capacity
* for the action vector and the redo log.
*/
size_t longest_object_sequence = 1;
size_t current_object_sequence = 1;
for (size_t i = 0; i < objcnt; ++i) {
if (i != 0 && *objv[i - 1] == *objv[i]) {
current_object_sequence += 1;
} else {
if (current_object_sequence > longest_object_sequence)
longest_object_sequence =
current_object_sequence;
current_object_sequence = 1;
}
struct palloc_defrag_entry e = {&objv[i]};
if (ravl_emplace_copy(objvp, &e) != 0)
goto err_objvp;
}
if (current_object_sequence > longest_object_sequence)
longest_object_sequence = current_object_sequence;
heap_force_recycle(heap);
/*
* The number of actions at which the action vector will be processed.
*/
const size_t actions_per_realloc = 3; /* alloc + free + set */
const size_t max_actions =
LANE_REDO_EXTERNAL_SIZE / sizeof(struct ulog_entry_val)
- actions_per_realloc;
struct pobj_actions actv;
VEC_INIT(&actv);
/*
* Vector needs enough capacity to handle the largest
* possible sequence of actions. Given that the actions are published
* once the max_actions threshold is crossed AND the sequence for the
* current object is finished, worst-case capacity is a sum of
* max_actions and the largest object sequence - because that sequence
* might happen to begin when current object number i == max_action.
*/
size_t actv_required_capacity =
max_actions + longest_object_sequence + actions_per_realloc;
if (VEC_RESERVE(&actv, actv_required_capacity) != 0)
goto err;
/*
* Do NOT reallocate action vector after this line, because
* prev_reserve can point to the slot in the original vector.
*/
struct pobj_action *prev_reserve = NULL;
uint64_t prev_offset = 0;
for (size_t i = 0; i < objcnt; ++i) {
uint64_t *offsetp = objv[i];
uint64_t offset = *offsetp;
/*
* We want to keep our redo logs relatively small, and so
* actions vector is processed on a regular basis.
*/
if (prev_offset != offset && VEC_SIZE(&actv) >= max_actions) {
/*
* If there are any pointers on the tree to the
* memory actions that are being applied, they need to
* be removed. Future reallocations will already have
* these modifications applied.
*/
struct pobj_action *iter;
VEC_FOREACH_BY_PTR(iter, &actv) {
if (iter->type != POBJ_ACTION_TYPE_MEM)
continue;
struct pobj_action_internal *iteri =
(struct pobj_action_internal *)iter;
struct palloc_defrag_entry e = {&iteri->ptr};
struct ravl_node *n = ravl_find(objvp, &e,
RAVL_PREDICATE_EQUAL);
if (n != NULL)
ravl_remove(objvp, n);
}
size_t entries_size =
VEC_SIZE(&actv) * sizeof(struct ulog_entry_val);
if (operation_reserve(ctx, entries_size) != 0)
goto err;
palloc_publish(heap, VEC_ARR(&actv), VEC_SIZE(&actv),
ctx);
operation_start(ctx);
VEC_CLEAR(&actv);
}
/*
* If the previous pointer of this offset was skipped,
* skip all pointers for that object.
*/
if (prev_reserve == NULL && prev_offset == offset)
continue;
/*
* If this is an offset to an object that was already
* reallocated in the previous iteration, we need to only update
* the pointer to the new offset.
*/
if (prev_reserve && prev_offset == offset) {
struct pobj_action *set = pobj_actions_add(&actv);
palloc_set_value(heap, set,
offsetp, prev_reserve->heap.offset);
struct pobj_action_internal *seti =
(struct pobj_action_internal *)set;
/*
* Since this pointer can reside in an object that will
* be reallocated later on we need to be able to
* find and update it when that happens.
*/
struct palloc_defrag_entry e = {&seti->ptr};
struct ravl_node *n = ravl_find(objvp, &e,
RAVL_PREDICATE_EQUAL);
if (n != NULL)
ravl_remove(objvp, n);
/*
* Notice that the tree is ordered by the content of the
* pointer, not the pointer itself. This might look odd,
* but we are inserting a *different* pointer to the
* same pointer to an offset.
*/
if (ravl_emplace_copy(objvp, &e) != 0)
goto err;
continue;
}
if (result)
result->total++;
prev_reserve = NULL;
prev_offset = offset;
struct memory_block m = memblock_from_offset(heap, offset);
if (m.type == MEMORY_BLOCK_HUGE)
continue;
os_mutex_t *mlock = m.m_ops->get_lock(&m);
os_mutex_lock(mlock);
unsigned original_fillpct = m.m_ops->fill_pct(&m);
os_mutex_unlock(mlock);
/*
* Empirically, 50% fill rate is the sweetspot for moving
* objects between runs. Other values tend to produce worse
* results.
*/
if (original_fillpct > 50)
continue;
size_t user_size = m.m_ops->get_user_size(&m);
struct pobj_action *reserve = pobj_actions_add(&actv);
if (palloc_reservation_create(heap, user_size,
NULL, NULL,
m.m_ops->get_extra(&m), m.m_ops->get_flags(&m),
0, HEAP_ARENA_PER_THREAD,
(struct pobj_action_internal *)reserve) != 0) {
VEC_POP_BACK(&actv);
continue;
}
uint64_t new_offset = reserve->heap.offset;
VALGRIND_ADD_TO_TX(
HEAP_OFF_TO_PTR(heap, new_offset),
user_size);
pmemops_memcpy(&heap->p_ops,
HEAP_OFF_TO_PTR(heap, new_offset),
HEAP_OFF_TO_PTR(heap, *offsetp),
user_size,
0);
VALGRIND_REMOVE_FROM_TX(
HEAP_OFF_TO_PTR(heap, new_offset),
user_size);
/*
* If there is a pointer provided by the user inside of the
* object we are in the process of reallocating, we need to
* find that pointer and update it to reflect the new location
* of PMEMoid.
*/
ptrdiff_t diff = (ptrdiff_t)(new_offset - offset);
uint64_t *objptr = (uint64_t *)((uint64_t)heap->base + offset);
uint64_t objend = ((uint64_t)objptr + user_size);
struct ravl_node *nptr = NULL;
enum ravl_predicate p = RAVL_PREDICATE_GREATER_EQUAL;
struct palloc_defrag_entry search_entry = {&objptr};
while ((nptr = ravl_find(objvp, &search_entry, p)) != NULL) {
p = RAVL_PREDICATE_GREATER;
struct palloc_defrag_entry *e = ravl_data(nptr);
uint64_t poffset = (uint64_t)(*e->offsetp);
if (poffset >= objend)
break;
struct palloc_defrag_entry ne = *e;
ravl_remove(objvp, nptr);
objptr = (uint64_t *)poffset;
poffset = (uint64_t)((ptrdiff_t)poffset + diff);
*ne.offsetp = (uint64_t *)poffset;
}
offsetp = objv[i];
struct pobj_action *set = pobj_actions_add(&actv);
/*
* We need to change the pointer in the tree to the pointer
* of this new unpublished action, so that it can be updated
* later on if needed.
*/
palloc_set_value(heap, set, offsetp, new_offset);
struct pobj_action_internal *seti =
(struct pobj_action_internal *)set;
struct palloc_defrag_entry e = {&seti->ptr};
struct ravl_node *n = ravl_find(objvp, &e,
RAVL_PREDICATE_EQUAL);
if (n != NULL)
ravl_remove(objvp, n);
/* same as above, this is a different pointer to same content */
if (ravl_emplace_copy(objvp, &e) != 0)
goto err;
struct pobj_action *dfree = pobj_actions_add(&actv);
palloc_defer_free(heap, offset, dfree);
if (result)
result->relocated++;
prev_reserve = reserve;
prev_offset = offset;
}
if (VEC_SIZE(&actv) != 0) {
size_t entries_size =
VEC_SIZE(&actv) * sizeof(struct ulog_entry_val);
if (operation_reserve(ctx, entries_size) != 0)
goto err;
palloc_publish(heap, VEC_ARR(&actv), VEC_SIZE(&actv), ctx);
} else {
operation_cancel(ctx);
}
ret = 0;
err:
if (ret != 0)
palloc_cancel(heap, VEC_ARR(&actv), VEC_SIZE(&actv));
VEC_DELETE(&actv);
err_objvp:
ravl_delete(objvp);
err_ravl:
if (ret != 0)
operation_cancel(ctx);
return ret;
}
/*
* palloc_usable_size -- returns the number of bytes in the memory block
*/
size_t
palloc_usable_size(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
return m.m_ops->get_user_size(&m);
}
/*
* palloc_extra -- returns allocation extra field
*/
uint64_t
palloc_extra(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
return m.m_ops->get_extra(&m);
}
/*
* palloc_flags -- returns allocation flags
*/
uint16_t
palloc_flags(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
return m.m_ops->get_flags(&m);
}
/*
* pmalloc_search_cb -- (internal) foreach callback.
*/
static int
pmalloc_search_cb(const struct memory_block *m, void *arg)
{
struct memory_block *out = arg;
if (MEMORY_BLOCK_EQUALS(*m, *out))
return 0; /* skip the same object */
*out = *m;
return 1;
}
/*
* palloc_first -- returns the first object from the heap.
*/
uint64_t
palloc_first(struct palloc_heap *heap)
{
struct memory_block search = MEMORY_BLOCK_NONE;
heap_foreach_object(heap, pmalloc_search_cb,
&search, MEMORY_BLOCK_NONE);
if (MEMORY_BLOCK_IS_NONE(search))
return 0;
void *uptr = search.m_ops->get_user_data(&search);
return HEAP_PTR_TO_OFF(heap, uptr);
}
/*
* palloc_next -- returns the next object relative to 'off'.
*/
uint64_t
palloc_next(struct palloc_heap *heap, uint64_t off)
{
struct memory_block m = memblock_from_offset(heap, off);
struct memory_block search = m;
heap_foreach_object(heap, pmalloc_search_cb, &search, m);
if (MEMORY_BLOCK_IS_NONE(search) ||
MEMORY_BLOCK_EQUALS(search, m))
return 0;
void *uptr = search.m_ops->get_user_data(&search);
return HEAP_PTR_TO_OFF(heap, uptr);
}
/*
* palloc_boot -- initializes allocator section
*/
int
palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops, struct stats *stats,
struct pool_set *set)
{
return heap_boot(heap, heap_start, heap_size, sizep,
base, p_ops, stats, set);
}
/*
* palloc_buckets_init -- initialize buckets
*/
int
palloc_buckets_init(struct palloc_heap *heap)
{
return heap_buckets_init(heap);
}
/*
* palloc_init -- initializes palloc heap
*/
int
palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops)
{
return heap_init(heap_start, heap_size, sizep, p_ops);
}
/*
* palloc_heap_end -- returns first address after heap
*/
void *
palloc_heap_end(struct palloc_heap *h)
{
return heap_end(h);
}
/*
* palloc_heap_check -- verifies heap state
*/
int
palloc_heap_check(void *heap_start, uint64_t heap_size)
{
return heap_check(heap_start, heap_size);
}
/*
* palloc_heap_check_remote -- verifies state of remote replica
*/
int
palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops)
{
return heap_check_remote(heap_start, heap_size, ops);
}
/*
* palloc_heap_cleanup -- cleanups the volatile heap state
*/
void
palloc_heap_cleanup(struct palloc_heap *heap)
{
heap_cleanup(heap);
}
#if VG_MEMCHECK_ENABLED
/*
* palloc_vg_register_alloc -- (internal) registers allocation header
* in Valgrind
*/
static int
palloc_vg_register_alloc(const struct memory_block *m, void *arg)
{
struct palloc_heap *heap = arg;
m->m_ops->reinit_header(m);
void *uptr = m->m_ops->get_user_data(m);
size_t usize = m->m_ops->get_user_size(m);
VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize);
VALGRIND_DO_MAKE_MEM_DEFINED(uptr, usize);
return 0;
}
/*
* palloc_heap_vg_open -- notifies Valgrind about heap layout
*/
void
palloc_heap_vg_open(struct palloc_heap *heap, int objects)
{
heap_vg_open(heap, palloc_vg_register_alloc, heap, objects);
}
#endif
| 36,470 | 26.156366 | 107 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/memops.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* memops.h -- aggregated memory operations helper definitions
*/
#ifndef LIBPMEMOBJ_MEMOPS_H
#define LIBPMEMOBJ_MEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
#include "ulog.h"
#include "lane.h"
#ifdef __cplusplus
extern "C" {
#endif
enum operation_log_type {
LOG_PERSISTENT, /* log of persistent modifications */
LOG_TRANSIENT, /* log of transient memory modifications */
MAX_OPERATION_LOG_TYPE
};
enum log_type {
LOG_TYPE_UNDO,
LOG_TYPE_REDO,
MAX_LOG_TYPE,
};
struct user_buffer_def {
void *addr;
size_t size;
};
#ifdef GET_NDP_BREAKDOWN
extern uint64_t ulogCycles;
#endif
#ifdef USE_NDP_REDO
extern int use_ndp_redo;
#endif
struct operation_context;
struct operation_context *
operation_new(struct ulog *redo, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type);
void operation_init(struct operation_context *ctx);
void operation_start(struct operation_context *ctx);
void operation_resume(struct operation_context *ctx);
void operation_delete(struct operation_context *ctx);
void operation_free_logs(struct operation_context *ctx, uint64_t flags);
int operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type);
int operation_add_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type);
int operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type);
int operation_user_buffer_verify_align(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_add_user_buffer(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_set_auto_reserve(struct operation_context *ctx,
int auto_reserve);
void operation_set_any_user_buffer(struct operation_context *ctx,
int any_user_buffer);
int operation_get_any_user_buffer(struct operation_context *ctx);
int operation_user_buffer_range_cmp(const void *lhs, const void *rhs);
int operation_reserve(struct operation_context *ctx, size_t new_capacity);
void operation_process(struct operation_context *ctx);
void operation_finish(struct operation_context *ctx, unsigned flags);
void operation_cancel(struct operation_context *ctx);
#ifdef __cplusplus
}
#endif
#endif
| 2,467 | 26.422222 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/libpmemobj_main.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* libpmemobj_main.c -- entry point for libpmemobj.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemobj_init(void);
void libpmemobj_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemobj_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemobj_fini();
break;
}
return TRUE;
}
| 669 | 19.30303 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/pmalloc.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* pmalloc.h -- internal definitions for persistent malloc
*/
#ifndef LIBPMEMOBJ_PMALLOC_H
#define LIBPMEMOBJ_PMALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/* single operations done in the internal context of the lane */
int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id);
int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
void pfree(PMEMobjpool *pop, uint64_t *off);
/* external operation to be used together with context-aware palloc funcs */
struct operation_context *pmalloc_operation_hold(PMEMobjpool *pop);
struct operation_context *pmalloc_operation_hold_no_start(PMEMobjpool *pop);
void pmalloc_operation_release(PMEMobjpool *pop);
void pmalloc_ctl_register(PMEMobjpool *pop);
int pmalloc_cleanup(PMEMobjpool *pop);
int pmalloc_boot(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 1,291 | 24.333333 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/recycler.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* recycler.h -- internal definitions of run recycler
*
* This is a container that stores runs that are currently not used by any of
* the buckets.
*/
#ifndef LIBPMEMOBJ_RECYCLER_H
#define LIBPMEMOBJ_RECYCLER_H 1
#include "memblock.h"
#include "vec.h"
#ifdef __cplusplus
extern "C" {
#endif
struct recycler;
VEC(empty_runs, struct memory_block);
struct recycler_element {
uint32_t max_free_block;
uint32_t free_space;
uint32_t chunk_id;
uint32_t zone_id;
};
struct recycler *recycler_new(struct palloc_heap *layout,
size_t nallocs, size_t *peak_arenas);
void recycler_delete(struct recycler *r);
struct recycler_element recycler_element_new(struct palloc_heap *heap,
const struct memory_block *m);
int recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element);
int recycler_get(struct recycler *r, struct memory_block *m);
struct empty_runs recycler_recalc(struct recycler *r, int force);
void recycler_inc_unaccounted(struct recycler *r,
const struct memory_block *m);
#ifdef __cplusplus
}
#endif
#endif
| 1,158 | 20.867925 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/palloc.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* palloc.h -- internal definitions for persistent allocator
*/
#ifndef LIBPMEMOBJ_PALLOC_H
#define LIBPMEMOBJ_PALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "ulog.h"
#include "valgrind_internal.h"
#include "stats.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PALLOC_CTL_DEBUG_NO_PATTERN (-1)
struct palloc_heap {
struct pmem_ops p_ops;
struct heap_layout *layout;
struct heap_rt *rt;
uint64_t *sizep;
uint64_t growsize;
struct stats *stats;
struct pool_set *set;
void *base;
int alloc_pattern;
};
struct memory_block;
typedef int (*palloc_constr)(void *base, void *ptr,
size_t usable_size, void *arg);
int palloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct operation_context *ctx);
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct pobj_action *act);
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act);
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt);
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx);
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value);
uint64_t palloc_first(struct palloc_heap *heap);
uint64_t palloc_next(struct palloc_heap *heap, uint64_t off);
size_t palloc_usable_size(struct palloc_heap *heap, uint64_t off);
uint64_t palloc_extra(struct palloc_heap *heap, uint64_t off);
uint16_t palloc_flags(struct palloc_heap *heap, uint64_t off);
int palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int palloc_buckets_init(struct palloc_heap *heap);
int palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void *palloc_heap_end(struct palloc_heap *h);
int palloc_heap_check(void *heap_start, uint64_t heap_size);
int palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
void palloc_heap_cleanup(struct palloc_heap *heap);
size_t palloc_heap(void *heap_start);
int palloc_defrag(struct palloc_heap *heap, uint64_t **objv, size_t objcnt,
struct operation_context *ctx, struct pobj_defrag_result *result);
/* foreach callback, terminates iteration if return value is non-zero */
typedef int (*object_callback)(const struct memory_block *m, void *arg);
#if VG_MEMCHECK_ENABLED
void palloc_heap_vg_open(struct palloc_heap *heap, int objects);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,006 | 25.377193 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* container.h -- internal definitions for block containers
*/
#ifndef LIBPMEMOBJ_CONTAINER_H
#define LIBPMEMOBJ_CONTAINER_H 1
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container {
const struct block_container_ops *c_ops;
struct palloc_heap *heap;
};
struct block_container_ops {
/* inserts a new memory block into the container */
int (*insert)(struct block_container *c, const struct memory_block *m);
/* removes exact match memory block */
int (*get_rm_exact)(struct block_container *c,
const struct memory_block *m);
/* removes and returns the best-fit memory block for size */
int (*get_rm_bestfit)(struct block_container *c,
struct memory_block *m);
/* checks whether the container is empty */
int (*is_empty)(struct block_container *c);
/* removes all elements from the container */
void (*rm_all)(struct block_container *c);
/* deletes the container */
void (*destroy)(struct block_container *c);
};
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_H */
| 1,125 | 21.979592 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/stats.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* stats.h -- definitions of statistics
*/
#ifndef LIBPMEMOBJ_STATS_H
#define LIBPMEMOBJ_STATS_H 1
#include "ctl.h"
#include "libpmemobj/ctl.h"
#ifdef __cplusplus
extern "C" {
#endif
struct stats_transient {
uint64_t heap_run_allocated;
uint64_t heap_run_active;
};
struct stats_persistent {
uint64_t heap_curr_allocated;
};
struct stats {
enum pobj_stats_enabled enabled;
struct stats_transient *transient;
struct stats_persistent *persistent;
};
#define STATS_INC(stats, type, name, value) do {\
STATS_INC_##type(stats, name, value);\
} while (0)
#define STATS_INC_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_INC_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SUB(stats, type, name, value) do {\
STATS_SUB_##type(stats, name, value);\
} while (0)
#define STATS_SUB_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_SUB_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SET(stats, type, name, value) do {\
STATS_SET_##type(stats, name, value);\
} while (0)
#define STATS_SET_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->transient->name),\
(value), memory_order_release);\
} while (0)
#define STATS_SET_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->persistent->name),\
(value), memory_order_release);\
} while (0)
#define STATS_CTL_LEAF(type, name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(type##_##name), NULL, NULL},\
NULL, NULL}
#define STATS_CTL_HANDLER(type, name, varname)\
static int CTL_READ_HANDLER(type##_##name)(void *ctx,\
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)\
{\
PMEMobjpool *pop = ctx;\
uint64_t *argv = arg;\
util_atomic_load_explicit64(&pop->stats->type->varname,\
argv, memory_order_acquire);\
return 0;\
}
void stats_ctl_register(PMEMobjpool *pop);
struct stats *stats_new(PMEMobjpool *pop);
void stats_delete(PMEMobjpool *pop, struct stats *stats);
#ifdef __cplusplus
}
#endif
#endif
| 2,990 | 26.440367 | 71 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/bucket.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* bucket.c -- bucket implementation
*
* Buckets manage volatile state of the heap. They are the abstraction layer
* between the heap-managed chunks/runs and memory allocations.
*
* Each bucket instance can have a different underlying container that is
* responsible for selecting blocks - which means that whether the allocator
* serves memory blocks in best/first/next -fit manner is decided during bucket
* creation.
*/
#include "alloc_class.h"
#include "bucket.h"
#include "heap.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* bucket_new -- creates a new bucket instance
*/
struct bucket *
bucket_new(struct block_container *c, struct alloc_class *aclass)
{
if (c == NULL)
return NULL;
struct bucket *b = Malloc(sizeof(*b));
if (b == NULL)
return NULL;
b->container = c;
b->c_ops = c->c_ops;
util_mutex_init(&b->lock);
b->is_active = 0;
b->active_memory_block = NULL;
if (aclass && aclass->type == CLASS_RUN) {
b->active_memory_block =
Zalloc(sizeof(struct memory_block_reserved));
if (b->active_memory_block == NULL)
goto error_active_alloc;
}
b->aclass = aclass;
return b;
error_active_alloc:
util_mutex_destroy(&b->lock);
Free(b);
return NULL;
}
/*
* bucket_insert_block -- inserts a block into the bucket
*/
int
bucket_insert_block(struct bucket *b, const struct memory_block *m)
{
#if VG_MEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_DRD_ENABLED
if (On_memcheck || On_drd_or_hg) {
size_t size = m->m_ops->get_real_size(m);
void *data = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_NOACCESS(data, size);
VALGRIND_ANNOTATE_NEW_MEMORY(data, size);
}
#endif
return b->c_ops->insert(b->container, m);
}
/*
* bucket_delete -- cleanups and deallocates bucket instance
*/
void
bucket_delete(struct bucket *b)
{
if (b->active_memory_block)
Free(b->active_memory_block);
util_mutex_destroy(&b->lock);
b->c_ops->destroy(b->container);
Free(b);
}
/*
* bucket_current_resvp -- returns the pointer to the current reservation count
*/
int *
bucket_current_resvp(struct bucket *b)
{
return b->active_memory_block ? &b->active_memory_block->nresv : NULL;
}
| 2,251 | 21.52 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container_seglists.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* container_seglists.c -- implementation of segregated lists block container
*
* This container is constructed from N (up to 64) intrusive lists and a
* single 8 byte bitmap that stores the information whether a given list is
* empty or not.
*/
#include "container_seglists.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#include "vecq.h"
#define SEGLIST_BLOCK_LISTS 64U
struct block_container_seglists {
struct block_container super;
struct memory_block m;
VECQ(, uint32_t) blocks[SEGLIST_BLOCK_LISTS];
uint64_t nonempty_lists;
};
/*
* container_seglists_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_seglists_insert_block(struct block_container *bc,
const struct memory_block *m)
{
ASSERT(m->chunk_id < MAX_CHUNK);
ASSERT(m->zone_id < UINT16_MAX);
ASSERTne(m->size_idx, 0);
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
if (c->nonempty_lists == 0)
c->m = *m;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
ASSERT(m->chunk_id == c->m.chunk_id);
ASSERT(m->zone_id == c->m.zone_id);
if (VECQ_ENQUEUE(&c->blocks[m->size_idx - 1], m->block_off) != 0)
return -1;
/* marks the list as nonempty */
c->nonempty_lists |= 1ULL << (m->size_idx - 1);
return 0;
}
/*
* container_seglists_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_seglists_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
uint32_t i = 0;
/* applicable lists */
uint64_t size_mask = (1ULL << (m->size_idx - 1)) - 1;
uint64_t v = c->nonempty_lists & ~size_mask;
if (v == 0)
return ENOMEM;
/* finds the list that serves the smallest applicable size */
i = util_lssb_index64(v);
uint32_t block_offset = VECQ_DEQUEUE(&c->blocks[i]);
if (VECQ_SIZE(&c->blocks[i]) == 0) /* marks the list as empty */
c->nonempty_lists &= ~(1ULL << (i));
*m = c->m;
m->block_off = block_offset;
m->size_idx = i + 1;
return 0;
}
/*
* container_seglists_is_empty -- (internal) checks whether the container is
* empty
*/
static int
container_seglists_is_empty(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
return c->nonempty_lists == 0;
}
/*
* container_seglists_rm_all -- (internal) removes all elements from the tree
*/
static void
container_seglists_rm_all(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_CLEAR(&c->blocks[i]);
c->nonempty_lists = 0;
}
/*
* container_seglists_delete -- (internal) deletes the container
*/
static void
container_seglists_destroy(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_DELETE(&c->blocks[i]);
Free(c);
}
/*
* This container does not support retrieval of exact memory blocks, but other
* than provides best-fit in O(1) time for unit sizes that do not exceed 64.
*/
static const struct block_container_ops container_seglists_ops = {
.insert = container_seglists_insert_block,
.get_rm_exact = NULL,
.get_rm_bestfit = container_seglists_get_rm_block_bestfit,
.is_empty = container_seglists_is_empty,
.rm_all = container_seglists_rm_all,
.destroy = container_seglists_destroy,
};
/*
* container_new_seglists -- allocates and initializes a seglists container
*/
struct block_container *
container_new_seglists(struct palloc_heap *heap)
{
struct block_container_seglists *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_seglists_ops;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_INIT(&bc->blocks[i]);
bc->nonempty_lists = 0;
return (struct block_container *)&bc->super;
error_container_malloc:
return NULL;
}
| 4,215 | 23.511628 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container_ravl.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* container_ravl.h -- internal definitions for ravl-based block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_RAVL_H
#define LIBPMEMOBJ_CONTAINER_RAVL_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_ravl(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_RAVL_H */
| 445 | 17.583333 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/tx.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* tx.h -- internal definitions for transactions
*/
#ifndef LIBPMEMOBJ_INTERNAL_TX_H
#define LIBPMEMOBJ_INTERNAL_TX_H 1
#include <stdint.h>
#include "obj.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TX_DEFAULT_RANGE_CACHE_SIZE (1 << 15)
#define TX_DEFAULT_RANGE_CACHE_THRESHOLD (1 << 12)
#define TX_RANGE_MASK (8ULL - 1)
#define TX_RANGE_MASK_LEGACY (32ULL - 1)
#define TX_ALIGN_SIZE(s, amask) (((s) + (amask)) & ~(amask))
#define TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT CACHELINE_SIZE
#define TX_SNAPSHOT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_SNAPSHOT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_buf)
#define TX_INTENT_LOG_BUFFER_ALIGNMENT CACHELINE_SIZE
#define TX_INTENT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_INTENT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_val)
struct tx_parameters {
size_t cache_size;
};
/*
* Returns the current transaction's pool handle, NULL if not within
* a transaction.
*/
PMEMobjpool *tx_get_pop(void);
void tx_ctl_register(PMEMobjpool *pop);
struct tx_parameters *tx_params_new(void);
void tx_params_delete(struct tx_parameters *tx_params);
#ifdef __cplusplus
}
#endif
#endif
| 1,258 | 22.314815 | 68 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/critnib.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* critnib.c -- implementation of critnib tree
*
* It offers identity lookup (like a hashmap) and <= lookup (like a search
* tree). Unlike some hashing algorithms (cuckoo hash, perfect hashing) the
* complexity isn't constant, but for data sizes we expect it's several
* times as fast as cuckoo, and has no "stop the world" cases that would
* cause latency (ie, better worst case behaviour).
*/
/*
* STRUCTURE DESCRIPTION
*
* Critnib is a hybrid between a radix tree and DJ Bernstein's critbit:
* it skips nodes for uninteresting radix nodes (ie, ones that would have
* exactly one child), this requires adding to every node a field that
* describes the slice (4-bit in our case) that this radix level is for.
*
* This implementation also stores each node's path (ie, bits that are
* common to every key in that subtree) -- this doesn't help with lookups
* at all (unused in == match, could be reconstructed at no cost in <=
* after first dive) but simplifies inserts and removes. If we ever want
* that piece of memory it's easy to trim it down.
*/
/*
* CONCURRENCY ISSUES
*
* Reads are completely lock-free sync-free, but only almost wait-free:
* if for some reason a read thread gets pathologically stalled, it will
* notice the data being stale and restart the work. In usual cases,
* the structure having been modified does _not_ cause a restart.
*
* Writes could be easily made lock-free as well (with only a cmpxchg
* sync), but this leads to problems with removes. A possible solution
* would be doing removes by overwriting by NULL w/o freeing -- yet this
* would lead to the structure growing without bounds. Complex per-node
* locks would increase concurrency but they slow down individual writes
* enough that in practice a simple global write lock works faster.
*
* Removes are the only operation that can break reads. The structure
* can do local RCU well -- the problem being knowing when it's safe to
* free. Any synchronization with reads would kill their speed, thus
* instead we have a remove count. The grace period is DELETED_LIFE,
* after which any read will notice staleness and restart its work.
*/
#include <errno.h>
#include <stdbool.h>
#include "alloc.h"
#include "critnib.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* A node that has been deleted is left untouched for this many delete
* cycles. Reads have guaranteed correctness if they took no longer than
* DELETED_LIFE concurrent deletes, otherwise they notice something is
* wrong and restart. The memory of deleted nodes is never freed to
* malloc nor their pointers lead anywhere wrong, thus a stale read will
* (temporarily) get a wrong answer but won't crash.
*
* There's no need to count writes as they never interfere with reads.
*
* Allowing stale reads (of arbitrarily old writes or of deletes less than
* DELETED_LIFE old) might sound counterintuitive, but it doesn't affect
* semantics in any way: the thread could have been stalled just after
* returning from our code. Thus, the guarantee is: the result of get() or
* find_le() is a value that was current at any point between the call
* start and end.
*/
#define DELETED_LIFE 16
#define SLICE 4
#define NIB ((1ULL << SLICE) - 1)
#define SLNODES (1 << SLICE)
typedef unsigned char sh_t;
struct critnib_node {
/*
* path is the part of a tree that's already traversed (be it through
* explicit nodes or collapsed links) -- ie, any subtree below has all
* those bits set to this value.
*
* nib is a 4-bit slice that's an index into the node's children.
*
* shift is the length (in bits) of the part of the key below this node.
*
* nib
* |XXXXXXXXXX|?|*****|
* path ^
* +-----+
* shift
*/
struct critnib_node *child[SLNODES];
uint64_t path;
sh_t shift;
};
struct critnib_leaf {
uint64_t key;
void *value;
};
struct critnib {
struct critnib_node *root;
/* pool of freed nodes: singly linked list, next at child[0] */
struct critnib_node *deleted_node;
struct critnib_leaf *deleted_leaf;
/* nodes removed but not yet eligible for reuse */
struct critnib_node *pending_del_nodes[DELETED_LIFE];
struct critnib_leaf *pending_del_leaves[DELETED_LIFE];
uint64_t remove_count;
os_mutex_t mutex; /* writes/removes */
};
/*
* atomic load
*/
static void
load(void *src, void *dst)
{
util_atomic_load_explicit64((uint64_t *)src, (uint64_t *)dst,
memory_order_acquire);
}
/*
* atomic store
*/
static void
store(void *dst, void *src)
{
util_atomic_store_explicit64((uint64_t *)dst, (uint64_t)src,
memory_order_release);
}
/*
* internal: is_leaf -- check tagged pointer for leafness
*/
static inline bool
is_leaf(struct critnib_node *n)
{
return (uint64_t)n & 1;
}
/*
* internal: to_leaf -- untag a leaf pointer
*/
static inline struct critnib_leaf *
to_leaf(struct critnib_node *n)
{
return (void *)((uint64_t)n & ~1ULL);
}
/*
* internal: path_mask -- return bit mask of a path above a subtree [shift]
* bits tall
*/
static inline uint64_t
path_mask(sh_t shift)
{
return ~NIB << shift;
}
/*
* internal: slice_index -- return index of child at the given nib
*/
static inline unsigned
slice_index(uint64_t key, sh_t shift)
{
return (unsigned)((key >> shift) & NIB);
}
/*
* critnib_new -- allocates a new critnib structure
*/
struct critnib *
critnib_new(void)
{
struct critnib *c = Zalloc(sizeof(struct critnib));
if (!c)
return NULL;
util_mutex_init(&c->mutex);
VALGRIND_HG_DRD_DISABLE_CHECKING(&c->root, sizeof(c->root));
VALGRIND_HG_DRD_DISABLE_CHECKING(&c->remove_count,
sizeof(c->remove_count));
return c;
}
/*
* internal: delete_node -- recursively free (to malloc) a subtree
*/
static void
delete_node(struct critnib_node *__restrict n)
{
if (!is_leaf(n)) {
for (int i = 0; i < SLNODES; i++) {
if (n->child[i])
delete_node(n->child[i]);
}
Free(n);
} else {
Free(to_leaf(n));
}
}
/*
* critnib_delete -- destroy and free a critnib struct
*/
void
critnib_delete(struct critnib *c)
{
if (c->root)
delete_node(c->root);
util_mutex_destroy(&c->mutex);
for (struct critnib_node *m = c->deleted_node; m; ) {
struct critnib_node *mm = m->child[0];
Free(m);
m = mm;
}
for (struct critnib_leaf *k = c->deleted_leaf; k; ) {
struct critnib_leaf *kk = k->value;
Free(k);
k = kk;
}
for (int i = 0; i < DELETED_LIFE; i++) {
Free(c->pending_del_nodes[i]);
Free(c->pending_del_leaves[i]);
}
Free(c);
}
/*
* internal: free_node -- free (to internal pool, not malloc) a node.
*
* We cannot free them to malloc as a stalled reader thread may still walk
* through such nodes; it will notice the result being bogus but only after
* completing the walk, thus we need to ensure any freed nodes still point
* to within the critnib structure.
*/
static void
free_node(struct critnib *__restrict c, struct critnib_node *__restrict n)
{
if (!n)
return;
ASSERT(!is_leaf(n));
n->child[0] = c->deleted_node;
c->deleted_node = n;
}
/*
* internal: alloc_node -- allocate a node from our pool or from malloc
*/
static struct critnib_node *
alloc_node(struct critnib *__restrict c)
{
if (!c->deleted_node) {
struct critnib_node *n = Malloc(sizeof(struct critnib_node));
if (n == NULL)
ERR("!Malloc");
return n;
}
struct critnib_node *n = c->deleted_node;
c->deleted_node = n->child[0];
VALGRIND_ANNOTATE_NEW_MEMORY(n, sizeof(*n));
return n;
}
/*
* internal: free_leaf -- free (to internal pool, not malloc) a leaf.
*
* See free_node().
*/
static void
free_leaf(struct critnib *__restrict c, struct critnib_leaf *__restrict k)
{
if (!k)
return;
k->value = c->deleted_leaf;
c->deleted_leaf = k;
}
/*
* internal: alloc_leaf -- allocate a leaf from our pool or from malloc
*/
static struct critnib_leaf *
alloc_leaf(struct critnib *__restrict c)
{
if (!c->deleted_leaf) {
struct critnib_leaf *k = Malloc(sizeof(struct critnib_leaf));
if (k == NULL)
ERR("!Malloc");
return k;
}
struct critnib_leaf *k = c->deleted_leaf;
c->deleted_leaf = k->value;
VALGRIND_ANNOTATE_NEW_MEMORY(k, sizeof(*k));
return k;
}
/*
* crinib_insert -- write a key:value pair to the critnib structure
*
* Returns:
* • 0 on success
* • EEXIST if such a key already exists
* • ENOMEM if we're out of memory
*
* Takes a global write lock but doesn't stall any readers.
*/
int
critnib_insert(struct critnib *c, uint64_t key, void *value)
{
util_mutex_lock(&c->mutex);
struct critnib_leaf *k = alloc_leaf(c);
if (!k) {
util_mutex_unlock(&c->mutex);
return ENOMEM;
}
VALGRIND_HG_DRD_DISABLE_CHECKING(k, sizeof(struct critnib_leaf));
k->key = key;
k->value = value;
struct critnib_node *kn = (void *)((uint64_t)k | 1);
struct critnib_node *n = c->root;
if (!n) {
c->root = kn;
util_mutex_unlock(&c->mutex);
return 0;
}
struct critnib_node **parent = &c->root;
struct critnib_node *prev = c->root;
while (n && !is_leaf(n) && (key & path_mask(n->shift)) == n->path) {
prev = n;
parent = &n->child[slice_index(key, n->shift)];
n = *parent;
}
if (!n) {
n = prev;
store(&n->child[slice_index(key, n->shift)], kn);
util_mutex_unlock(&c->mutex);
return 0;
}
uint64_t path = is_leaf(n) ? to_leaf(n)->key : n->path;
/* Find where the path differs from our key. */
uint64_t at = path ^ key;
if (!at) {
ASSERT(is_leaf(n));
free_leaf(c, to_leaf(kn));
/* fail instead of replacing */
util_mutex_unlock(&c->mutex);
return EEXIST;
}
/* and convert that to an index. */
sh_t sh = util_mssb_index64(at) & (sh_t)~(SLICE - 1);
struct critnib_node *m = alloc_node(c);
if (!m) {
free_leaf(c, to_leaf(kn));
util_mutex_unlock(&c->mutex);
return ENOMEM;
}
VALGRIND_HG_DRD_DISABLE_CHECKING(m, sizeof(struct critnib_node));
for (int i = 0; i < SLNODES; i++)
m->child[i] = NULL;
m->child[slice_index(key, sh)] = kn;
m->child[slice_index(path, sh)] = n;
m->shift = sh;
m->path = key & path_mask(sh);
store(parent, m);
util_mutex_unlock(&c->mutex);
return 0;
}
/*
* critnib_remove -- delete a key from the critnib structure, return its value
*/
void *
critnib_remove(struct critnib *c, uint64_t key)
{
struct critnib_leaf *k;
void *value = NULL;
util_mutex_lock(&c->mutex);
struct critnib_node *n = c->root;
if (!n)
goto not_found;
uint64_t del = util_fetch_and_add64(&c->remove_count, 1) % DELETED_LIFE;
free_node(c, c->pending_del_nodes[del]);
free_leaf(c, c->pending_del_leaves[del]);
c->pending_del_nodes[del] = NULL;
c->pending_del_leaves[del] = NULL;
if (is_leaf(n)) {
k = to_leaf(n);
if (k->key == key) {
store(&c->root, NULL);
goto del_leaf;
}
goto not_found;
}
/*
* n and k are a parent:child pair (after the first iteration); k is the
* leaf that holds the key we're deleting.
*/
struct critnib_node **k_parent = &c->root;
struct critnib_node **n_parent = &c->root;
struct critnib_node *kn = n;
while (!is_leaf(kn)) {
n_parent = k_parent;
n = kn;
k_parent = &kn->child[slice_index(key, kn->shift)];
kn = *k_parent;
if (!kn)
goto not_found;
}
k = to_leaf(kn);
if (k->key != key)
goto not_found;
store(&n->child[slice_index(key, n->shift)], NULL);
/* Remove the node if there's only one remaining child. */
int ochild = -1;
for (int i = 0; i < SLNODES; i++) {
if (n->child[i]) {
if (ochild != -1)
goto del_leaf;
ochild = i;
}
}
ASSERTne(ochild, -1);
store(n_parent, n->child[ochild]);
c->pending_del_nodes[del] = n;
del_leaf:
value = k->value;
c->pending_del_leaves[del] = k;
not_found:
util_mutex_unlock(&c->mutex);
return value;
}
/*
* critnib_get -- query for a key ("==" match), returns value or NULL
*
* Doesn't need a lock but if many deletes happened while our thread was
* somehow stalled the query is restarted (as freed nodes remain unused only
* for a grace period).
*
* Counterintuitively, it's pointless to return the most current answer,
* we need only one that was valid at any point after the call started.
*/
void *
critnib_get(struct critnib *c, uint64_t key)
{
uint64_t wrs1, wrs2;
void *res;
do {
struct critnib_node *n;
load(&c->remove_count, &wrs1);
load(&c->root, &n);
/*
* critbit algorithm: dive into the tree, looking at nothing but
* each node's critical bit^H^H^Hnibble. This means we risk
* going wrong way if our path is missing, but that's ok...
*/
while (n && !is_leaf(n))
load(&n->child[slice_index(key, n->shift)], &n);
/* ... as we check it at the end. */
struct critnib_leaf *k = to_leaf(n);
res = (n && k->key == key) ? k->value : NULL;
load(&c->remove_count, &wrs2);
} while (wrs1 + DELETED_LIFE <= wrs2);
return res;
}
/*
* internal: find_successor -- return the rightmost non-null node in a subtree
*/
static void *
find_successor(struct critnib_node *__restrict n)
{
while (1) {
int nib;
for (nib = NIB; nib >= 0; nib--)
if (n->child[nib])
break;
if (nib < 0)
return NULL;
n = n->child[nib];
if (is_leaf(n))
return to_leaf(n)->value;
}
}
/*
* internal: find_le -- recursively search <= in a subtree
*/
static void *
find_le(struct critnib_node *__restrict n, uint64_t key)
{
if (!n)
return NULL;
if (is_leaf(n)) {
struct critnib_leaf *k = to_leaf(n);
return (k->key <= key) ? k->value : NULL;
}
/*
* is our key outside the subtree we're in?
*
* If we're inside, all bits above the nib will be identical; note
* that shift points at the nib's lower rather than upper edge, so it
* needs to be masked away as well.
*/
if ((key ^ n->path) >> (n->shift) & ~NIB) {
/*
* subtree is too far to the left?
* -> its rightmost value is good
*/
if (n->path < key)
return find_successor(n);
/*
* subtree is too far to the right?
* -> it has nothing of interest to us
*/
return NULL;
}
unsigned nib = slice_index(key, n->shift);
/* recursive call: follow the path */
{
struct critnib_node *m;
load(&n->child[nib], &m);
void *value = find_le(m, key);
if (value)
return value;
}
/*
* nothing in that subtree? We strayed from the path at this point,
* thus need to search every subtree to our left in this node. No
* need to dive into any but the first non-null, though.
*/
for (; nib > 0; nib--) {
struct critnib_node *m;
load(&n->child[nib - 1], &m);
if (m) {
n = m;
if (is_leaf(n))
return to_leaf(n)->value;
return find_successor(n);
}
}
return NULL;
}
/*
* critnib_find_le -- query for a key ("<=" match), returns value or NULL
*
* Same guarantees as critnib_get().
*/
void *
critnib_find_le(struct critnib *c, uint64_t key)
{
uint64_t wrs1, wrs2;
void *res;
do {
load(&c->remove_count, &wrs1);
struct critnib_node *n; /* avoid a subtle TOCTOU */
load(&c->root, &n);
res = n ? find_le(n, key) : NULL;
load(&c->remove_count, &wrs2);
} while (wrs1 + DELETED_LIFE <= wrs2);
return res;
}
| 15,052 | 22.087423 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/memblock.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memblock.h -- internal definitions for memory block
*/
#ifndef LIBPMEMOBJ_MEMBLOCK_H
#define LIBPMEMOBJ_MEMBLOCK_H 1
#include <stddef.h>
#include <stdint.h>
#include "os_thread.h"
#include "heap_layout.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MEMORY_BLOCK_NONE \
(struct memory_block)\
{0, 0, 0, 0, NULL, NULL, MAX_HEADER_TYPES, MAX_MEMORY_BLOCK, NULL}
#define MEMORY_BLOCK_IS_NONE(_m)\
((_m).heap == NULL)
#define MEMORY_BLOCK_EQUALS(lhs, rhs)\
((lhs).zone_id == (rhs).zone_id && (lhs).chunk_id == (rhs).chunk_id &&\
(lhs).block_off == (rhs).block_off && (lhs).heap == (rhs).heap)
enum memory_block_type {
/*
* Huge memory blocks are directly backed by memory chunks. A single
* huge block can consist of several chunks.
* The persistent representation of huge memory blocks can be thought
* of as a doubly linked list with variable length elements.
* That list is stored in the chunk headers array where one element
* directly corresponds to one chunk.
*
* U - used, F - free, R - footer, . - empty
* |U| represents a used chunk with a size index of 1, with type
* information (CHUNK_TYPE_USED) stored in the corresponding header
* array element - chunk_headers[chunk_id].
*
* |F...R| represents a free chunk with size index of 5. The empty
* chunk headers have undefined values and shouldn't be used. All
* chunks with size larger than 1 must have a footer in the last
* corresponding header array - chunk_headers[chunk_id - size_idx - 1].
*
* The above representation of chunks will be used to describe the
* way fail-safety is achieved during heap operations.
*
* Allocation of huge memory block with size index 5:
* Initial heap state: |U| <> |F..R| <> |U| <> |F......R|
*
* The only block that matches that size is at very end of the chunks
* list: |F......R|
*
* As the request was for memory block of size 5, and this ones size is
* 7 there's a need to first split the chunk in two.
* 1) The last chunk header of the new allocation is marked as footer
* and the block after that one is marked as free: |F...RF.R|
* This is allowed and has no impact on the heap because this
* modification is into chunk header that is otherwise unused, in
* other words the linked list didn't change.
*
* 2) The size index of the first header is changed from previous value
* of 7 to 5: |F...R||F.R|
* This is a single fail-safe atomic operation and this is the
* first change that is noticeable by the heap operations.
* A single linked list element is split into two new ones.
*
* 3) The allocation process either uses redo log or changes directly
* the chunk header type from free to used: |U...R| <> |F.R|
*
* In a similar fashion the reverse operation, free, is performed:
* Initial heap state: |U| <> |F..R| <> |F| <> |U...R| <> |F.R|
*
* This is the heap after the previous example with the single chunk
* in between changed from used to free.
*
* 1) Determine the neighbors of the memory block which is being
* freed.
*
* 2) Update the footer (if needed) information of the last chunk which
* is the memory block being freed or it's neighbor to the right.
* |F| <> |U...R| <> |F.R << this one|
*
* 3) Update the size index and type of the left-most chunk header.
* And so this: |F << this one| <> |U...R| <> |F.R|
* becomes this: |F.......R|
* The entire chunk header can be updated in a single fail-safe
* atomic operation because it's size is only 64 bytes.
*/
MEMORY_BLOCK_HUGE,
/*
* Run memory blocks are chunks with CHUNK_TYPE_RUN and size index of 1.
* The entire chunk is subdivided into smaller blocks and has an
* additional metadata attached in the form of a bitmap - each bit
* corresponds to a single block.
* In this case there's no need to perform any coalescing or splitting
* on the persistent metadata.
* The bitmap is stored on a variable number of 64 bit values and
* because of the requirement of allocation fail-safe atomicity the
* maximum size index of a memory block from a run is 64 - since that's
* the limit of atomic write guarantee.
*
* The allocation/deallocation process is a single 8 byte write that
* sets/clears the corresponding bits. Depending on the user choice
* it can either be made atomically or using redo-log when grouped with
* other operations.
* It's also important to note that in a case of realloc it might so
* happen that a single 8 byte bitmap value has its bits both set and
* cleared - that's why the run memory block metadata changes operate
* on AND'ing or OR'ing a bitmask instead of directly setting the value.
*/
MEMORY_BLOCK_RUN,
MAX_MEMORY_BLOCK
};
enum memblock_state {
MEMBLOCK_STATE_UNKNOWN,
MEMBLOCK_ALLOCATED,
MEMBLOCK_FREE,
MAX_MEMBLOCK_STATE,
};
/* runtime bitmap information for a run */
struct run_bitmap {
unsigned nvalues; /* number of 8 byte values - size of values array */
unsigned nbits; /* number of valid bits */
size_t size; /* total size of the bitmap in bytes */
uint64_t *values; /* pointer to the bitmap's values array */
};
/* runtime information necessary to create a run */
struct run_descriptor {
uint16_t flags; /* chunk flags for the run */
size_t unit_size; /* the size of a single unit in a run */
uint32_t size_idx; /* size index of a single run instance */
size_t alignment; /* required alignment of objects */
unsigned nallocs; /* number of allocs per run */
struct run_bitmap bitmap;
};
struct memory_block_ops {
/* returns memory block size */
size_t (*block_size)(const struct memory_block *m);
/* prepares header modification operation */
void (*prep_hdr)(const struct memory_block *m,
enum memblock_state dest_state, struct operation_context *ctx);
/* returns lock associated with memory block */
os_mutex_t *(*get_lock)(const struct memory_block *m);
/* returns whether a block is allocated or not */
enum memblock_state (*get_state)(const struct memory_block *m);
/* returns pointer to the data of a block */
void *(*get_user_data)(const struct memory_block *m);
/*
* Returns the size of a memory block without overhead.
* This is the size of a data block that can be used.
*/
size_t (*get_user_size)(const struct memory_block *m);
/* returns pointer to the beginning of data of a run block */
void *(*get_real_data)(const struct memory_block *m);
/* returns the size of a memory block, including headers */
size_t (*get_real_size)(const struct memory_block *m);
/* writes a header of an allocation */
void (*write_header)(const struct memory_block *m,
uint64_t extra_field, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Checks the header type of a chunk matches the expected type and
* modifies it if necessary. This is fail-safe atomic.
*/
void (*ensure_header_type)(const struct memory_block *m,
enum header_type t);
/*
* Reinitializes a block after a heap restart.
* This is called for EVERY allocation, but *only* under Valgrind.
*/
void (*reinit_header)(const struct memory_block *m);
/* returns the extra field of an allocation */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags of an allocation */
uint16_t (*get_flags)(const struct memory_block *m);
/* initializes memblock in valgrind */
void (*vg_init)(const struct memory_block *m, int objects,
object_callback cb, void *arg);
/* iterates over every free block */
int (*iterate_free)(const struct memory_block *m,
object_callback cb, void *arg);
/* iterates over every used block */
int (*iterate_used)(const struct memory_block *m,
object_callback cb, void *arg);
/* calculates number of free units, valid only for runs */
void (*calc_free)(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block);
/* this is called exactly once for every existing chunk */
void (*reinit_chunk)(const struct memory_block *m);
/*
* Initializes bitmap data for a run.
* Do *not* use this function unless absolutely necessary, it breaks
* the abstraction layer by exposing implementation details.
*/
void (*get_bitmap)(const struct memory_block *m, struct run_bitmap *b);
/* calculates the ratio between occupied and unoccupied space */
unsigned (*fill_pct)(const struct memory_block *m);
};
struct memory_block {
uint32_t chunk_id; /* index of the memory block in its zone */
uint32_t zone_id; /* index of this block zone in the heap */
/*
* Size index of the memory block represented in either multiple of
* CHUNKSIZE in the case of a huge chunk or in multiple of a run
* block size.
*/
uint32_t size_idx;
/*
* Used only for run chunks, must be zeroed for huge.
* Number of preceding blocks in the chunk. In other words, the
* position of this memory block in run bitmap.
*/
uint32_t block_off;
/*
* The variables below are associated with the memory block and are
* stored here for convenience. Those fields are filled by either the
* memblock_from_offset or memblock_rebuild_state, and they should not
* be modified manually.
*/
const struct memory_block_ops *m_ops;
struct palloc_heap *heap;
enum header_type header_type;
enum memory_block_type type;
struct run_bitmap *cached_bitmap;
};
/*
* This is a representation of a run memory block that is active in a bucket or
* is on a pending list in the recycler.
* This structure should never be passed around by value because the address of
* the nresv variable can be in reservations made through palloc_reserve(). Only
* if the number of reservations equals 0 the structure can be moved/freed.
*/
struct memory_block_reserved {
struct memory_block m;
struct bucket *bucket;
/*
* Number of reservations made from this run, the pointer to this value
* is stored in a user facing pobj_action structure. Decremented once
* the reservation is published or canceled.
*/
int nresv;
};
struct memory_block memblock_from_offset(struct palloc_heap *heap,
uint64_t off);
struct memory_block memblock_from_offset_opt(struct palloc_heap *heap,
uint64_t off, int size);
void memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m);
struct memory_block memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx);
struct memory_block memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, struct run_descriptor *rdsc);
void memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b);
#ifdef __cplusplus
}
#endif
#endif
| 10,750 | 34.019544 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/memblock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memblock.c -- implementation of memory block
*
* Memory block is a representation of persistent object that resides in the
* heap. A valid memory block must be either a huge (free or used) chunk or a
* block inside a run.
*
* Huge blocks are 1:1 correlated with the chunk headers in the zone whereas
* run blocks are represented by bits in corresponding chunk bitmap.
*
* This file contains implementations of abstract operations on memory blocks.
* Instead of storing the mbops structure inside each memory block the correct
* method implementation is chosen at runtime.
*/
#include <string.h>
#include "obj.h"
#include "heap.h"
#include "memblock.h"
#include "out.h"
#include "valgrind_internal.h"
#include "alloc_class.h"
/* calculates the size of the entire run, including any additional chunks */
#define SIZEOF_RUN(runp, size_idx)\
(sizeof(*(runp)) + (((size_idx) - 1) * CHUNKSIZE))
/*
* memblock_header_type -- determines the memory block's header type
*/
static enum header_type
memblock_header_type(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
if (hdr->flags & CHUNK_FLAG_COMPACT_HEADER)
return HEADER_COMPACT;
if (hdr->flags & CHUNK_FLAG_HEADER_NONE)
return HEADER_NONE;
return HEADER_LEGACY;
}
/*
* memblock_header_legacy_get_size --
* (internal) returns the size stored in a legacy header
*/
static size_t
memblock_header_legacy_get_size(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
return hdr->size;
}
/*
* memblock_header_compact_get_size --
* (internal) returns the size stored in a compact header
*/
static size_t
memblock_header_compact_get_size(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
return hdr->size & ALLOC_HDR_FLAGS_MASK;
}
/*
* memblock_header_none_get_size --
* (internal) determines the sizes of an object without a header
*/
static size_t
memblock_header_none_get_size(const struct memory_block *m)
{
return m->m_ops->block_size(m);
}
/*
* memblock_header_legacy_get_extra --
* (internal) returns the extra field stored in a legacy header
*/
static uint64_t
memblock_header_legacy_get_extra(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
return hdr->type_num;
}
/*
* memblock_header_compact_get_extra --
* (internal) returns the extra field stored in a compact header
*/
static uint64_t
memblock_header_compact_get_extra(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
return hdr->extra;
}
/*
* memblock_header_none_get_extra --
* (internal) objects without a header don't have an extra field
*/
static uint64_t
memblock_header_none_get_extra(const struct memory_block *m)
{
return 0;
}
/*
* memblock_header_legacy_get_flags --
* (internal) returns the flags stored in a legacy header
*/
static uint16_t
memblock_header_legacy_get_flags(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
return (uint16_t)(hdr->root_size >> ALLOC_HDR_SIZE_SHIFT);
}
/*
* memblock_header_compact_get_flags --
* (internal) returns the flags stored in a compact header
*/
static uint16_t
memblock_header_compact_get_flags(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
return (uint16_t)(hdr->size >> ALLOC_HDR_SIZE_SHIFT);
}
/*
* memblock_header_none_get_flags --
* (internal) objects without a header do not support flags
*/
static uint16_t
memblock_header_none_get_flags(const struct memory_block *m)
{
return 0;
}
/*
* memblock_header_legacy_write --
* (internal) writes a legacy header of an object
*/
static void
memblock_header_legacy_write(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags)
{
struct allocation_header_legacy hdr;
hdr.size = size;
hdr.type_num = extra;
hdr.root_size = ((uint64_t)flags << ALLOC_HDR_SIZE_SHIFT);
struct allocation_header_legacy *hdrp = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdrp, sizeof(*hdrp));
VALGRIND_ADD_TO_TX(hdrp, sizeof(*hdrp));
pmemops_memcpy(&m->heap->p_ops, hdrp, &hdr,
sizeof(hdr), /* legacy header is 64 bytes in size */
PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_RELAXED);
VALGRIND_REMOVE_FROM_TX(hdrp, sizeof(*hdrp));
/* unused fields of the legacy headers are used as a red zone */
VALGRIND_DO_MAKE_MEM_NOACCESS(hdrp->unused, sizeof(hdrp->unused));
}
/*
* memblock_header_compact_write --
* (internal) writes a compact header of an object
*/
static void
memblock_header_compact_write(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags)
{
COMPILE_ERROR_ON(ALLOC_HDR_COMPACT_SIZE > CACHELINE_SIZE);
struct {
struct allocation_header_compact hdr;
uint8_t padding[CACHELINE_SIZE - ALLOC_HDR_COMPACT_SIZE];
} padded;
padded.hdr.size = size | ((uint64_t)flags << ALLOC_HDR_SIZE_SHIFT);
padded.hdr.extra = extra;
struct allocation_header_compact *hdrp = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdrp, sizeof(*hdrp));
/*
* If possible write the entire header with a single memcpy, this allows
* the copy implementation to avoid a cache miss on a partial cache line
* write.
*/
size_t hdr_size = ALLOC_HDR_COMPACT_SIZE;
if ((uintptr_t)hdrp % CACHELINE_SIZE == 0 && size >= sizeof(padded))
hdr_size = sizeof(padded);
VALGRIND_ADD_TO_TX(hdrp, hdr_size);
pmemops_memcpy(&m->heap->p_ops, hdrp, &padded, hdr_size,
PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_RELAXED);
VALGRIND_DO_MAKE_MEM_UNDEFINED((char *)hdrp + ALLOC_HDR_COMPACT_SIZE,
hdr_size - ALLOC_HDR_COMPACT_SIZE);
VALGRIND_REMOVE_FROM_TX(hdrp, hdr_size);
}
/*
* memblock_header_none_write --
* (internal) nothing to write
*/
static void
memblock_header_none_write(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags)
{
/* NOP */
}
/*
* memblock_header_legacy_invalidate --
* (internal) invalidates a legacy header
*/
static void
memblock_header_legacy_invalidate(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
VALGRIND_SET_CLEAN(hdr, sizeof(*hdr));
}
/*
* memblock_header_compact_invalidate --
* (internal) invalidates a compact header
*/
static void
memblock_header_compact_invalidate(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
VALGRIND_SET_CLEAN(hdr, sizeof(*hdr));
}
/*
* memblock_no_header_invalidate --
* (internal) nothing to invalidate
*/
static void
memblock_header_none_invalidate(const struct memory_block *m)
{
/* NOP */
}
/*
* memblock_header_legacy_reinit --
* (internal) reinitializes a legacy header after a heap restart
*/
static void
memblock_header_legacy_reinit(const struct memory_block *m)
{
struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
/* unused fields of the legacy headers are used as a red zone */
VALGRIND_DO_MAKE_MEM_NOACCESS(hdr->unused, sizeof(hdr->unused));
}
/*
* memblock_header_compact_reinit --
* (internal) reinitializes a compact header after a heap restart
*/
static void
memblock_header_compact_reinit(const struct memory_block *m)
{
struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
}
/*
* memblock_header_none_reinit --
* (internal) nothing to reinitialize
*/
static void
memblock_header_none_reinit(const struct memory_block *m)
{
/* NOP */
}
static const struct {
/* determines the sizes of an object */
size_t (*get_size)(const struct memory_block *m);
/* returns the extra field (if available, 0 if not) */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags stored in a header (if available, 0 if not) */
uint16_t (*get_flags)(const struct memory_block *m);
/*
* Stores size, extra info and flags in header of an object
* (if available, does nothing otherwise).
*/
void (*write)(const struct memory_block *m,
size_t size, uint64_t extra, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Reinitializes a header after a heap restart (if available, does
* nothing otherwise) (VG).
*/
void (*reinit)(const struct memory_block *m);
} memblock_header_ops[MAX_HEADER_TYPES] = {
[HEADER_LEGACY] = {
memblock_header_legacy_get_size,
memblock_header_legacy_get_extra,
memblock_header_legacy_get_flags,
memblock_header_legacy_write,
memblock_header_legacy_invalidate,
memblock_header_legacy_reinit,
},
[HEADER_COMPACT] = {
memblock_header_compact_get_size,
memblock_header_compact_get_extra,
memblock_header_compact_get_flags,
memblock_header_compact_write,
memblock_header_compact_invalidate,
memblock_header_compact_reinit,
},
[HEADER_NONE] = {
memblock_header_none_get_size,
memblock_header_none_get_extra,
memblock_header_none_get_flags,
memblock_header_none_write,
memblock_header_none_invalidate,
memblock_header_none_reinit,
}
};
/*
* memblock_run_default_nallocs -- returns the number of memory blocks
* available in the in a run with given parameters using the default
* fixed-bitmap algorithm
*/
static unsigned
memblock_run_default_nallocs(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment)
{
unsigned nallocs = (unsigned)
(RUN_DEFAULT_SIZE_BYTES(*size_idx) / unit_size);
while (nallocs > RUN_DEFAULT_BITMAP_NBITS) {
LOG(3, "tried to create a run (%lu) with number "
"of units (%u) exceeding the bitmap size (%u)",
unit_size, nallocs, RUN_DEFAULT_BITMAP_NBITS);
if (*size_idx > 1) {
*size_idx -= 1;
/* recalculate the number of allocations */
nallocs = (uint32_t)
(RUN_DEFAULT_SIZE_BYTES(*size_idx) / unit_size);
LOG(3, "run (%lu) was constructed with "
"fewer (%u) than requested chunks (%u)",
unit_size, *size_idx, *size_idx + 1);
} else {
LOG(3, "run (%lu) was constructed with "
"fewer units (%u) than optimal (%u), "
"this might lead to "
"inefficient memory utilization!",
unit_size,
RUN_DEFAULT_BITMAP_NBITS, nallocs);
nallocs = RUN_DEFAULT_BITMAP_NBITS;
}
}
return nallocs - (alignment ? 1 : 0);
}
/*
* memblock_run_bitmap -- calculate bitmap parameters for given arguments
*/
void
memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b)
{
ASSERTne(*size_idx, 0);
/*
* Flexible bitmaps have a variably sized values array. The size varies
* depending on:
* alignment - initial run alignment might require up-to a unit
* size idx - the larger the run, the more units it carries
* unit_size - the smaller the unit size, the more units per run
*
* The size of the bitmap also has to be calculated in such a way that
* the beginning of allocations data is cacheline aligned. This is
* required to perform many optimizations throughout the codebase.
* This alignment requirement means that some of the bitmap values might
* remain unused and will serve only as a padding for data.
*/
if (flags & CHUNK_FLAG_FLEX_BITMAP) {
/*
* First calculate the number of values without accounting for
* the bitmap size.
*/
size_t content_size = RUN_CONTENT_SIZE_BYTES(*size_idx);
b->nbits = (unsigned)(content_size / unit_size);
b->nvalues = util_div_ceil(b->nbits, RUN_BITS_PER_VALUE);
/*
* Then, align the number of values up, so that the cacheline
* alignment is preserved.
*/
b->nvalues = ALIGN_UP(b->nvalues + RUN_BASE_METADATA_VALUES,
(unsigned)(CACHELINE_SIZE / sizeof(*b->values)))
- RUN_BASE_METADATA_VALUES;
/*
* This is the total number of bytes needed for the bitmap AND
* padding.
*/
b->size = b->nvalues * sizeof(*b->values);
/*
* Calculate the number of allocations again, but this time
* accounting for the bitmap/padding.
*/
b->nbits = (unsigned)((content_size - b->size) / unit_size)
- (alignment ? 1U : 0U);
/*
* The last step is to calculate how much of the padding
* is left at the end of the bitmap.
*/
unsigned unused_bits = (b->nvalues * RUN_BITS_PER_VALUE)
- b->nbits;
unsigned unused_values = unused_bits / RUN_BITS_PER_VALUE;
b->nvalues -= unused_values;
b->values = (uint64_t *)content;
return;
}
b->size = RUN_DEFAULT_BITMAP_SIZE;
b->nbits = memblock_run_default_nallocs(size_idx, flags,
unit_size, alignment);
unsigned unused_bits = RUN_DEFAULT_BITMAP_NBITS - b->nbits;
unsigned unused_values = unused_bits / RUN_BITS_PER_VALUE;
b->nvalues = RUN_DEFAULT_BITMAP_VALUES - unused_values;
b->values = (uint64_t *)content;
}
/*
* run_get_bitmap -- initializes run bitmap information
*/
static void
run_get_bitmap(const struct memory_block *m, struct run_bitmap *b)
{
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
if (m->cached_bitmap != NULL) {
*b = *m->cached_bitmap;
b->values = (uint64_t *)run->content;
} else {
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
uint32_t size_idx = hdr->size_idx;
memblock_run_bitmap(&size_idx, hdr->flags, run->hdr.block_size,
run->hdr.alignment, run->content, b);
ASSERTeq(size_idx, hdr->size_idx);
}
}
/*
* huge_block_size -- returns the compile-time constant which defines the
* huge memory block size.
*/
static size_t
huge_block_size(const struct memory_block *m)
{
return CHUNKSIZE;
}
/*
* run_block_size -- looks for the right chunk and returns the block size
* information that is attached to the run block metadata.
*/
static size_t
run_block_size(const struct memory_block *m)
{
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
return run->hdr.block_size;
}
/*
* huge_get_real_data -- returns pointer to the beginning data of a huge block
*/
static void *
huge_get_real_data(const struct memory_block *m)
{
return heap_get_chunk(m->heap, m)->data;
}
/*
* run_get_data_start -- (internal) returns the pointer to the beginning of
* allocations in a run
*/
static char *
run_get_data_start(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
struct run_bitmap b;
run_get_bitmap(m, &b);
if (hdr->flags & CHUNK_FLAG_ALIGNED) {
/*
* Alignment is property of user data in allocations. And
* since objects have headers, we need to take them into
* account when calculating the address.
*/
uintptr_t hsize = header_type_to_size[m->header_type];
uintptr_t base = (uintptr_t)run->content +
b.size + hsize;
return (char *)(ALIGN_UP(base, run->hdr.alignment) - hsize);
} else {
return (char *)&run->content + b.size;
}
}
/*
* run_get_data_offset -- (internal) returns the number of bytes between
* run base metadata and data
*/
static size_t
run_get_data_offset(const struct memory_block *m)
{
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
return (size_t)run_get_data_start(m) - (size_t)&run->content;
}
/*
* run_get_real_data -- returns pointer to the beginning data of a run block
*/
static void *
run_get_real_data(const struct memory_block *m)
{
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
ASSERT(run->hdr.block_size != 0);
return run_get_data_start(m) + (run->hdr.block_size * m->block_off);
}
/*
* block_get_user_data -- returns pointer to the data of a block
*/
static void *
block_get_user_data(const struct memory_block *m)
{
return (char *)m->m_ops->get_real_data(m) +
header_type_to_size[m->header_type];
}
/*
* chunk_get_chunk_hdr_value -- (internal) get value of a header for redo log
*/
static uint64_t
chunk_get_chunk_hdr_value(uint16_t type, uint16_t flags, uint32_t size_idx)
{
uint64_t val;
COMPILE_ERROR_ON(sizeof(struct chunk_header) != sizeof(uint64_t));
struct chunk_header hdr;
hdr.type = type;
hdr.flags = flags;
hdr.size_idx = size_idx;
memcpy(&val, &hdr, sizeof(val));
return val;
}
/*
* huge_prep_operation_hdr -- prepares the new value of a chunk header that will
* be set after the operation concludes.
*/
static void
huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op,
struct operation_context *ctx)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
/*
* Depending on the operation that needs to be performed a new chunk
* header needs to be prepared with the new chunk state.
*/
uint64_t val = chunk_get_chunk_hdr_value(
op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE,
hdr->flags,
m->size_idx);
if (ctx == NULL) {
util_atomic_store_explicit64((uint64_t *)hdr, val,
memory_order_relaxed);
pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr));
} else {
operation_add_entry(ctx, hdr, val, ULOG_OPERATION_SET);
}
VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1,
(hdr->size_idx - 1) * sizeof(struct chunk_header));
/*
* In the case of chunks larger than one unit the footer must be
* created immediately AFTER the persistent state is safely updated.
*/
if (m->size_idx == 1)
return;
struct chunk_header *footer = hdr + m->size_idx - 1;
VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer));
val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx);
/*
* It's only safe to write the footer AFTER the persistent part of
* the operation have been successfully processed because the footer
* pointer might point to a currently valid persistent state
* of a different chunk.
* The footer entry change is updated as transient because it will
* be recreated at heap boot regardless - it's just needed for runtime
* operations.
*/
if (ctx == NULL) {
util_atomic_store_explicit64((uint64_t *)footer, val,
memory_order_relaxed);
VALGRIND_SET_CLEAN(footer, sizeof(*footer));
} else {
operation_add_typed_entry(ctx,
footer, val, ULOG_OPERATION_SET, LOG_TRANSIENT);
}
}
/*
* run_prep_operation_hdr -- prepares the new value for a select few bytes of
* a run bitmap that will be set after the operation concludes.
*
* It's VERY important to keep in mind that the particular value of the
* bitmap this method is modifying must not be changed after this function
* is called and before the operation is processed.
*/
static void
run_prep_operation_hdr(const struct memory_block *m, enum memblock_state op,
struct operation_context *ctx)
{
ASSERT(m->size_idx <= RUN_BITS_PER_VALUE);
/*
* Free blocks are represented by clear bits and used blocks by set
* bits - which is the reverse of the commonly used scheme.
*
* Here a bit mask is prepared that flips the bits that represent the
* memory block provided by the caller - because both the size index and
* the block offset are tied 1:1 to the bitmap this operation is
* relatively simple.
*/
uint64_t bmask;
if (m->size_idx == RUN_BITS_PER_VALUE) {
ASSERTeq(m->block_off % RUN_BITS_PER_VALUE, 0);
bmask = UINT64_MAX;
} else {
bmask = ((1ULL << m->size_idx) - 1ULL) <<
(m->block_off % RUN_BITS_PER_VALUE);
}
/*
* The run bitmap is composed of several 8 byte values, so a proper
* element of the bitmap array must be selected.
*/
unsigned bpos = m->block_off / RUN_BITS_PER_VALUE;
struct run_bitmap b;
run_get_bitmap(m, &b);
/* the bit mask is applied immediately by the add entry operations */
if (op == MEMBLOCK_ALLOCATED) {
operation_add_entry(ctx, &b.values[bpos],
bmask, ULOG_OPERATION_OR);
} else if (op == MEMBLOCK_FREE) {
operation_add_entry(ctx, &b.values[bpos],
~bmask, ULOG_OPERATION_AND);
} else {
ASSERT(0);
}
}
/*
* huge_get_lock -- because huge memory blocks are always allocated from a
* single bucket there's no reason to lock them - the bucket itself is
* protected.
*/
static os_mutex_t *
huge_get_lock(const struct memory_block *m)
{
return NULL;
}
/*
* run_get_lock -- gets the runtime mutex from the heap.
*/
static os_mutex_t *
run_get_lock(const struct memory_block *m)
{
return heap_get_run_lock(m->heap, m->chunk_id);
}
/*
* huge_get_state -- returns whether a huge block is allocated or not
*/
static enum memblock_state
huge_get_state(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
if (hdr->type == CHUNK_TYPE_USED)
return MEMBLOCK_ALLOCATED;
if (hdr->type == CHUNK_TYPE_FREE)
return MEMBLOCK_FREE;
return MEMBLOCK_STATE_UNKNOWN;
}
/*
* huge_get_state -- returns whether a block from a run is allocated or not
*/
static enum memblock_state
run_get_state(const struct memory_block *m)
{
struct run_bitmap b;
run_get_bitmap(m, &b);
unsigned v = m->block_off / RUN_BITS_PER_VALUE;
uint64_t bitmap = b.values[v];
unsigned bit = m->block_off % RUN_BITS_PER_VALUE;
unsigned bit_last = bit + m->size_idx;
ASSERT(bit_last <= RUN_BITS_PER_VALUE);
for (unsigned i = bit; i < bit_last; ++i) {
if (!BIT_IS_CLR(bitmap, i)) {
return MEMBLOCK_ALLOCATED;
}
}
return MEMBLOCK_FREE;
}
/*
* huge_ensure_header_type -- checks the header type of a chunk and modifies
* it if necessary. This is fail-safe atomic.
*/
static void
huge_ensure_header_type(const struct memory_block *m,
enum header_type t)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
ASSERTeq(hdr->type, CHUNK_TYPE_FREE);
if ((hdr->flags & header_type_to_flag[t]) == 0) {
VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr));
uint16_t f = ((uint16_t)header_type_to_flag[t]);
hdr->flags |= f;
pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr));
VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr));
}
}
/*
* run_ensure_header_type -- runs must be created with appropriate header type.
*/
static void
run_ensure_header_type(const struct memory_block *m,
enum header_type t)
{
#ifdef DEBUG
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
ASSERTeq(hdr->type, CHUNK_TYPE_RUN);
ASSERT((hdr->flags & header_type_to_flag[t]) == header_type_to_flag[t]);
#endif
}
/*
* block_get_real_size -- returns the size of a memory block that includes all
* of the overhead (headers)
*/
static size_t
block_get_real_size(const struct memory_block *m)
{
/*
* There are two valid ways to get a size. If the memory block
* initialized properly and the size index is set, the chunk unit size
* can be simply multiplied by that index, otherwise we need to look at
* the allocation header.
*/
if (m->size_idx != 0) {
return m->m_ops->block_size(m) * m->size_idx;
} else {
return memblock_header_ops[m->header_type].get_size(m);
}
}
/*
* block_get_user_size -- returns the size of a memory block without overheads,
* this is the size of a data block that can be used.
*/
static size_t
block_get_user_size(const struct memory_block *m)
{
return block_get_real_size(m) - header_type_to_size[m->header_type];
}
/*
* block_write_header -- writes a header of an allocation
*/
static void
block_write_header(const struct memory_block *m,
uint64_t extra_field, uint16_t flags)
{
memblock_header_ops[m->header_type].write(m,
block_get_real_size(m), extra_field, flags);
}
/*
* block_invalidate -- invalidates allocation data and header
*/
static void
block_invalidate(const struct memory_block *m)
{
void *data = m->m_ops->get_user_data(m);
size_t size = m->m_ops->get_user_size(m);
VALGRIND_SET_CLEAN(data, size);
memblock_header_ops[m->header_type].invalidate(m);
}
/*
* block_reinit_header -- reinitializes a block after a heap restart
*/
static void
block_reinit_header(const struct memory_block *m)
{
memblock_header_ops[m->header_type].reinit(m);
}
/*
* block_get_extra -- returns the extra field of an allocation
*/
static uint64_t
block_get_extra(const struct memory_block *m)
{
return memblock_header_ops[m->header_type].get_extra(m);
}
/*
* block_get_flags -- returns the flags of an allocation
*/
static uint16_t
block_get_flags(const struct memory_block *m)
{
return memblock_header_ops[m->header_type].get_flags(m);
}
/*
* heap_run_process_bitmap_value -- (internal) looks for unset bits in the
* value, creates a valid memory block out of them and inserts that
* block into the given bucket.
*/
static int
run_process_bitmap_value(const struct memory_block *m,
uint64_t value, uint32_t base_offset, object_callback cb, void *arg)
{
int ret = 0;
uint64_t shift = 0; /* already processed bits */
struct memory_block s = *m;
do {
/*
* Shift the value so that the next memory block starts on the
* least significant position:
* ..............0 (free block)
* or ..............1 (used block)
*/
uint64_t shifted = value >> shift;
/* all clear or set bits indicate the end of traversal */
if (shifted == 0) {
/*
* Insert the remaining blocks as free. Remember that
* unsigned values are always zero-filled, so we must
* take the current shift into account.
*/
s.block_off = (uint32_t)(base_offset + shift);
s.size_idx = (uint32_t)(RUN_BITS_PER_VALUE - shift);
if ((ret = cb(&s, arg)) != 0)
return ret;
break;
} else if (shifted == UINT64_MAX) {
break;
}
/*
* Offset and size of the next free block, either of these
* can be zero depending on where the free block is located
* in the value.
*/
unsigned off = (unsigned)util_lssb_index64(~shifted);
unsigned size = (unsigned)util_lssb_index64(shifted);
shift += off + size;
if (size != 0) { /* zero size means skip to the next value */
s.block_off = (uint32_t)(base_offset + (shift - size));
s.size_idx = (uint32_t)(size);
memblock_rebuild_state(m->heap, &s);
if ((ret = cb(&s, arg)) != 0)
return ret;
}
} while (shift != RUN_BITS_PER_VALUE);
return 0;
}
/*
* run_iterate_free -- iterates over free blocks in a run
*/
static int
run_iterate_free(const struct memory_block *m, object_callback cb, void *arg)
{
int ret = 0;
uint32_t block_off = 0;
struct run_bitmap b;
run_get_bitmap(m, &b);
struct memory_block nm = *m;
for (unsigned i = 0; i < b.nvalues; ++i) {
uint64_t v = b.values[i];
ASSERT((uint64_t)RUN_BITS_PER_VALUE * (uint64_t)i
<= UINT32_MAX);
block_off = RUN_BITS_PER_VALUE * i;
ret = run_process_bitmap_value(&nm, v, block_off, cb, arg);
if (ret != 0)
return ret;
}
return 0;
}
/*
* run_iterate_used -- iterates over used blocks in a run
*/
static int
run_iterate_used(const struct memory_block *m, object_callback cb, void *arg)
{
uint32_t i = m->block_off / RUN_BITS_PER_VALUE;
uint32_t block_start = m->block_off % RUN_BITS_PER_VALUE;
uint32_t block_off;
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
struct memory_block iter = *m;
struct run_bitmap b;
run_get_bitmap(m, &b);
for (; i < b.nvalues; ++i) {
uint64_t v = b.values[i];
block_off = (uint32_t)(RUN_BITS_PER_VALUE * i);
for (uint32_t j = block_start; j < RUN_BITS_PER_VALUE; ) {
if (block_off + j >= (uint32_t)b.nbits)
break;
if (!BIT_IS_CLR(v, j)) {
iter.block_off = (uint32_t)(block_off + j);
/*
* The size index of this memory block cannot be
* retrieved at this time because the header
* might not be initialized in valgrind yet.
*/
iter.size_idx = 0;
if (cb(&iter, arg) != 0)
return 1;
iter.size_idx = CALC_SIZE_IDX(
run->hdr.block_size,
iter.m_ops->get_real_size(&iter));
j = (uint32_t)(j + iter.size_idx);
} else {
++j;
}
}
block_start = 0;
}
return 0;
}
/*
* huge_iterate_free -- calls cb on memory block if it's free
*/
static int
huge_iterate_free(const struct memory_block *m, object_callback cb, void *arg)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
return hdr->type == CHUNK_TYPE_FREE ? cb(m, arg) : 0;
}
/*
* huge_iterate_free -- calls cb on memory block if it's used
*/
static int
huge_iterate_used(const struct memory_block *m, object_callback cb, void *arg)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
return hdr->type == CHUNK_TYPE_USED ? cb(m, arg) : 0;
}
/*
* huge_vg_init -- initializes chunk metadata in memcheck state
*/
static void
huge_vg_init(const struct memory_block *m, int objects,
object_callback cb, void *arg)
{
struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
struct chunk *chunk = heap_get_chunk(m->heap, m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
/*
* Mark unused chunk headers as not accessible.
*/
VALGRIND_DO_MAKE_MEM_NOACCESS(
&z->chunk_headers[m->chunk_id + 1],
(m->size_idx - 1) *
sizeof(struct chunk_header));
size_t size = block_get_real_size(m);
VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size);
if (objects && huge_get_state(m) == MEMBLOCK_ALLOCATED) {
if (cb(m, arg) != 0)
FATAL("failed to initialize valgrind state");
}
}
/*
* run_vg_init -- initializes run metadata in memcheck state
*/
static void
run_vg_init(const struct memory_block *m, int objects,
object_callback cb, void *arg)
{
struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
struct chunk_run *run = heap_get_chunk_run(m->heap, m);
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
/* set the run metadata as defined */
VALGRIND_DO_MAKE_MEM_DEFINED(run, RUN_BASE_METADATA_SIZE);
struct run_bitmap b;
run_get_bitmap(m, &b);
/*
* Mark run data headers as defined.
*/
for (unsigned j = 1; j < m->size_idx; ++j) {
struct chunk_header *data_hdr =
&z->chunk_headers[m->chunk_id + j];
VALGRIND_DO_MAKE_MEM_DEFINED(data_hdr,
sizeof(struct chunk_header));
ASSERTeq(data_hdr->type, CHUNK_TYPE_RUN_DATA);
}
VALGRIND_DO_MAKE_MEM_NOACCESS(run, SIZEOF_RUN(run, m->size_idx));
/* set the run bitmap as defined */
VALGRIND_DO_MAKE_MEM_DEFINED(run, b.size + RUN_BASE_METADATA_SIZE);
if (objects) {
if (run_iterate_used(m, cb, arg) != 0)
FATAL("failed to initialize valgrind state");
}
}
/*
* run_reinit_chunk -- run reinitialization on first zone traversal
*/
static void
run_reinit_chunk(const struct memory_block *m)
{
/* noop */
}
/*
* huge_write_footer -- (internal) writes a chunk footer
*/
static void
huge_write_footer(struct chunk_header *hdr, uint32_t size_idx)
{
if (size_idx == 1) /* that would overwrite the header */
return;
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr + size_idx - 1, sizeof(*hdr));
struct chunk_header f = *hdr;
f.type = CHUNK_TYPE_FOOTER;
f.size_idx = size_idx;
*(hdr + size_idx - 1) = f;
/* no need to persist, footers are recreated in heap_populate_buckets */
VALGRIND_SET_CLEAN(hdr + size_idx - 1, sizeof(f));
}
/*
* huge_reinit_chunk -- chunk reinitialization on first zone traversal
*/
static void
huge_reinit_chunk(const struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
if (hdr->type == CHUNK_TYPE_USED)
huge_write_footer(hdr, hdr->size_idx);
}
/*
* run_calc_free -- calculates the number of free units in a run
*/
static void
run_calc_free(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block)
{
struct run_bitmap b;
run_get_bitmap(m, &b);
for (unsigned i = 0; i < b.nvalues; ++i) {
uint64_t value = ~b.values[i];
if (value == 0)
continue;
uint32_t free_in_value = util_popcount64(value);
*free_space = *free_space + free_in_value;
/*
* If this value has less free blocks than already found max,
* there's no point in calculating.
*/
if (free_in_value < *max_free_block)
continue;
/* if the entire value is empty, no point in calculating */
if (free_in_value == RUN_BITS_PER_VALUE) {
*max_free_block = RUN_BITS_PER_VALUE;
continue;
}
/* if already at max, no point in calculating */
if (*max_free_block == RUN_BITS_PER_VALUE)
continue;
/*
* Calculate the biggest free block in the bitmap.
* This algorithm is not the most clever imaginable, but it's
* easy to implement and fast enough.
*/
uint16_t n = 0;
while (value != 0) {
value &= (value << 1ULL);
n++;
}
if (n > *max_free_block)
*max_free_block = n;
}
}
/*
* huge_fill_pct -- huge blocks by definition use the entirety of a chunk
*/
static unsigned
huge_fill_pct(const struct memory_block *m)
{
return 100;
}
/*
* run_fill_pct -- calculates the percentage of allocated units inside of a run
*/
static unsigned
run_fill_pct(const struct memory_block *m)
{
struct run_bitmap b;
run_get_bitmap(m, &b);
unsigned clearbits = 0;
for (unsigned i = 0; i < b.nvalues; ++i) {
uint64_t value = ~b.values[i];
if (value == 0)
continue;
clearbits += util_popcount64(value);
}
ASSERT(b.nbits >= clearbits);
unsigned setbits = b.nbits - clearbits;
return (100 * setbits) / b.nbits;
}
static const struct memory_block_ops mb_ops[MAX_MEMORY_BLOCK] = {
[MEMORY_BLOCK_HUGE] = {
.block_size = huge_block_size,
.prep_hdr = huge_prep_operation_hdr,
.get_lock = huge_get_lock,
.get_state = huge_get_state,
.get_user_data = block_get_user_data,
.get_real_data = huge_get_real_data,
.get_user_size = block_get_user_size,
.get_real_size = block_get_real_size,
.write_header = block_write_header,
.invalidate = block_invalidate,
.ensure_header_type = huge_ensure_header_type,
.reinit_header = block_reinit_header,
.vg_init = huge_vg_init,
.get_extra = block_get_extra,
.get_flags = block_get_flags,
.iterate_free = huge_iterate_free,
.iterate_used = huge_iterate_used,
.reinit_chunk = huge_reinit_chunk,
.calc_free = NULL,
.get_bitmap = NULL,
.fill_pct = huge_fill_pct,
},
[MEMORY_BLOCK_RUN] = {
.block_size = run_block_size,
.prep_hdr = run_prep_operation_hdr,
.get_lock = run_get_lock,
.get_state = run_get_state,
.get_user_data = block_get_user_data,
.get_real_data = run_get_real_data,
.get_user_size = block_get_user_size,
.get_real_size = block_get_real_size,
.write_header = block_write_header,
.invalidate = block_invalidate,
.ensure_header_type = run_ensure_header_type,
.reinit_header = block_reinit_header,
.vg_init = run_vg_init,
.get_extra = block_get_extra,
.get_flags = block_get_flags,
.iterate_free = run_iterate_free,
.iterate_used = run_iterate_used,
.reinit_chunk = run_reinit_chunk,
.calc_free = run_calc_free,
.get_bitmap = run_get_bitmap,
.fill_pct = run_fill_pct,
}
};
/*
* memblock_huge_init -- initializes a new huge memory block
*/
struct memory_block
memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx)
{
struct memory_block m = MEMORY_BLOCK_NONE;
m.chunk_id = chunk_id;
m.zone_id = zone_id;
m.size_idx = size_idx;
m.heap = heap;
struct chunk_header nhdr = {
.type = CHUNK_TYPE_FREE,
.flags = 0,
.size_idx = size_idx
};
struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m);
VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr));
VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));
*hdr = nhdr; /* write the entire header (8 bytes) at once */
pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));
huge_write_footer(hdr, size_idx);
memblock_rebuild_state(heap, &m);
return m;
}
/*
* memblock_run_init -- initializes a new run memory block
*/
struct memory_block
memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, struct run_descriptor *rdsc)
{
uint32_t size_idx = rdsc->size_idx;
ASSERTne(size_idx, 0);
struct memory_block m = MEMORY_BLOCK_NONE;
m.chunk_id = chunk_id;
m.zone_id = zone_id;
m.size_idx = size_idx;
m.heap = heap;
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
struct chunk_run *run = heap_get_chunk_run(heap, &m);
size_t runsize = SIZEOF_RUN(run, size_idx);
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize);
/* add/remove chunk_run and chunk_header to valgrind transaction */
VALGRIND_ADD_TO_TX(run, runsize);
run->hdr.block_size = rdsc->unit_size;
run->hdr.alignment = rdsc->alignment;
struct run_bitmap b = rdsc->bitmap;
b.values = (uint64_t *)run->content;
size_t bitmap_size = b.size;
/* set all the bits */
memset(b.values, 0xFF, bitmap_size);
/* clear only the bits available for allocations from this bucket */
memset(b.values, 0, sizeof(*b.values) * (b.nvalues - 1));
unsigned trailing_bits = b.nbits % RUN_BITS_PER_VALUE;
uint64_t last_value = UINT64_MAX << trailing_bits;
b.values[b.nvalues - 1] = last_value;
VALGRIND_REMOVE_FROM_TX(run, runsize);
pmemops_flush(&heap->p_ops, run,
sizeof(struct chunk_run_header) +
bitmap_size);
struct chunk_header run_data_hdr;
run_data_hdr.type = CHUNK_TYPE_RUN_DATA;
run_data_hdr.flags = 0;
VALGRIND_ADD_TO_TX(&z->chunk_headers[chunk_id],
sizeof(struct chunk_header) * size_idx);
struct chunk_header *data_hdr;
for (unsigned i = 1; i < size_idx; ++i) {
data_hdr = &z->chunk_headers[chunk_id + i];
VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr));
VALGRIND_ANNOTATE_NEW_MEMORY(data_hdr, sizeof(*data_hdr));
run_data_hdr.size_idx = i;
*data_hdr = run_data_hdr;
}
pmemops_persist(&heap->p_ops,
&z->chunk_headers[chunk_id + 1],
sizeof(struct chunk_header) * (size_idx - 1));
struct chunk_header *hdr = &z->chunk_headers[chunk_id];
ASSERT(hdr->type == CHUNK_TYPE_FREE);
VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));
struct chunk_header run_hdr;
run_hdr.size_idx = hdr->size_idx;
run_hdr.type = CHUNK_TYPE_RUN;
run_hdr.flags = rdsc->flags;
*hdr = run_hdr;
pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));
VALGRIND_REMOVE_FROM_TX(&z->chunk_headers[chunk_id],
sizeof(struct chunk_header) * size_idx);
memblock_rebuild_state(heap, &m);
m.cached_bitmap = &rdsc->bitmap;
return m;
}
/*
* memblock_detect_type -- looks for the corresponding chunk header and
* depending on the chunks type returns the right memory block type
*/
static enum memory_block_type
memblock_detect_type(struct palloc_heap *heap, const struct memory_block *m)
{
enum memory_block_type ret;
switch (heap_get_chunk_hdr(heap, m)->type) {
case CHUNK_TYPE_RUN:
case CHUNK_TYPE_RUN_DATA:
ret = MEMORY_BLOCK_RUN;
break;
case CHUNK_TYPE_FREE:
case CHUNK_TYPE_USED:
case CHUNK_TYPE_FOOTER:
ret = MEMORY_BLOCK_HUGE;
break;
default:
/* unreachable */
FATAL("possible zone chunks metadata corruption");
}
return ret;
}
/*
* memblock_from_offset -- resolves a memory block data from an offset that
* originates from the heap
*/
struct memory_block
memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size)
{
struct memory_block m = MEMORY_BLOCK_NONE;
m.heap = heap;
off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0);
m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE);
off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone);
m.chunk_id = (uint32_t)(off / CHUNKSIZE);
struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m);
if (hdr->type == CHUNK_TYPE_RUN_DATA)
m.chunk_id -= hdr->size_idx;
off -= CHUNKSIZE * m.chunk_id;
m.header_type = memblock_header_type(&m);
off -= header_type_to_size[m.header_type];
m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE;
ASSERTeq(memblock_detect_type(heap, &m), m.type);
m.m_ops = &mb_ops[m.type];
uint64_t unit_size = m.m_ops->block_size(&m);
if (off != 0) { /* run */
off -= run_get_data_offset(&m);
off -= RUN_BASE_METADATA_SIZE;
m.block_off = (uint16_t)(off / unit_size);
off -= m.block_off * unit_size;
}
struct alloc_class_collection *acc = heap_alloc_classes(heap);
if (acc != NULL) {
struct alloc_class *ac = alloc_class_by_run(acc,
unit_size, hdr->flags, hdr->size_idx);
if (ac != NULL)
m.cached_bitmap = &ac->rdsc.bitmap;
}
m.size_idx = !size ? 0 : CALC_SIZE_IDX(unit_size,
memblock_header_ops[m.header_type].get_size(&m));
ASSERTeq(off, 0);
return m;
}
/*
* memblock_from_offset -- returns memory block with size
*/
struct memory_block
memblock_from_offset(struct palloc_heap *heap, uint64_t off)
{
return memblock_from_offset_opt(heap, off, 1);
}
/*
* memblock_rebuild_state -- fills in the runtime-state related fields of a
* memory block structure
*
* This function must be called on all memory blocks that were created by hand
* (as opposed to retrieved from memblock_from_offset function).
*/
void
memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m)
{
m->heap = heap;
m->header_type = memblock_header_type(m);
m->type = memblock_detect_type(heap, m);
m->m_ops = &mb_ops[m->type];
m->cached_bitmap = NULL;
}
| 40,394 | 25.558185 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/pmalloc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmalloc.c -- implementation of pmalloc POSIX-like API
*
* This is the front-end part of the persistent memory allocator. It uses both
* transient and persistent representation of the heap to provide memory blocks
* in a reasonable time and with an acceptable common-case fragmentation.
*/
#include <inttypes.h>
#include "valgrind_internal.h"
#include "heap.h"
#include "lane.h"
#include "memblock.h"
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "palloc.h"
#include "pmalloc.h"
#include "alloc_class.h"
#include "set.h"
#include "mmap.h"
enum pmalloc_operation_type {
OPERATION_INTERNAL, /* used only for single, one-off operations */
OPERATION_EXTERNAL, /* used for everything else, incl. large redos */
MAX_OPERATION_TYPE,
};
struct lane_alloc_runtime {
struct operation_context *ctx[MAX_OPERATION_TYPE];
};
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context
*/
static struct operation_context *
pmalloc_operation_hold_type(PMEMobjpool *pop, enum pmalloc_operation_type type,
int start)
{
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = type == OPERATION_INTERNAL ?
lane->internal : lane->external;
if (start)
operation_start(ctx);
return ctx;
}
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context without starting
*/
struct operation_context *
pmalloc_operation_hold_no_start(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 0);
}
/*
* pmalloc_operation_hold -- acquires allocator lane section and returns a
* pointer to its redo log
*/
struct operation_context *
pmalloc_operation_hold(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 1);
}
/*
* pmalloc_operation_release -- releases allocator lane section
*/
void
pmalloc_operation_release(PMEMobjpool *pop)
{
lane_release(pop);
}
/*
* pmalloc -- allocates a new block of memory
*
* The pool offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, NULL, NULL,
extra_field, object_flags, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmalloc_construct -- allocates a new block of memory with a constructor
*
* The block offset is written persistently into the off variable, but only
* after the constructor function has been called.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, constructor, arg,
extra_field, object_flags, class_id, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* prealloc -- resizes in-place a previously allocated memory block
*
* The block offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, size, NULL, NULL,
extra_field, object_flags, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pfree -- deallocates a memory block previously allocated by pmalloc
*
* A zero value is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
void
pfree(PMEMobjpool *pop, uint64_t *off)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, 0, NULL, NULL,
0, 0, 0, 0, ctx);
ASSERTeq(ret, 0);
pmalloc_operation_release(pop);
}
/*
* pmalloc_boot -- global runtime init routine of allocator section
*/
int
pmalloc_boot(PMEMobjpool *pop)
{
int ret = palloc_boot(&pop->heap, (char *)pop + pop->heap_offset,
pop->set->poolsize - pop->heap_offset, &pop->heap_size,
pop, &pop->p_ops,
pop->stats, pop->set);
if (ret)
return ret;
#if VG_MEMCHECK_ENABLED
if (On_memcheck)
palloc_heap_vg_open(&pop->heap, pop->vg_boot);
#endif
ret = palloc_buckets_init(&pop->heap);
if (ret)
palloc_heap_cleanup(&pop->heap);
return ret;
}
/*
* pmalloc_cleanup -- global cleanup routine of allocator section
*/
int
pmalloc_cleanup(PMEMobjpool *pop)
{
palloc_heap_cleanup(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(desc) -- creates a new allocation class
*/
static int
CTL_WRITE_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct alloc_class_collection *ac = heap_alloc_classes(&pop->heap);
struct pobj_alloc_class_desc *p = arg;
if (p->unit_size <= 0 || p->unit_size > PMEMOBJ_MAX_ALLOC_SIZE ||
p->units_per_block <= 0) {
errno = EINVAL;
return -1;
}
if (p->alignment != 0 && p->unit_size % p->alignment != 0) {
ERR("unit size must be evenly divisible by alignment");
errno = EINVAL;
return -1;
}
if (p->alignment > (MEGABYTE * 2)) {
ERR("alignment cannot be larger than 2 megabytes");
errno = EINVAL;
return -1;
}
enum header_type lib_htype = MAX_HEADER_TYPES;
switch (p->header_type) {
case POBJ_HEADER_LEGACY:
lib_htype = HEADER_LEGACY;
break;
case POBJ_HEADER_COMPACT:
lib_htype = HEADER_COMPACT;
break;
case POBJ_HEADER_NONE:
lib_htype = HEADER_NONE;
break;
case MAX_POBJ_HEADER_TYPES:
default:
ERR("invalid header type");
errno = EINVAL;
return -1;
}
if (PMDK_SLIST_EMPTY(indexes)) {
if (alloc_class_find_first_free_slot(ac, &id) != 0) {
ERR("no available free allocation class identifier");
errno = EINVAL;
return -1;
}
} else {
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
if (alloc_class_reserve(ac, id) != 0) {
ERR("attempted to overwrite an allocation class");
errno = EEXIST;
return -1;
}
}
size_t runsize_bytes =
CHUNK_ALIGN_UP((p->units_per_block * p->unit_size) +
RUN_BASE_METADATA_SIZE);
/* aligning the buffer might require up-to to 'alignment' bytes */
if (p->alignment != 0)
runsize_bytes += p->alignment;
uint32_t size_idx = (uint32_t)(runsize_bytes / CHUNKSIZE);
if (size_idx > UINT16_MAX)
size_idx = UINT16_MAX;
struct alloc_class *c = alloc_class_new(id,
heap_alloc_classes(&pop->heap), CLASS_RUN,
lib_htype, p->unit_size, p->alignment, size_idx);
if (c == NULL) {
errno = EINVAL;
return -1;
}
if (heap_create_alloc_class_buckets(&pop->heap, c) != 0) {
alloc_class_delete(ac, c);
return -1;
}
p->class_id = c->id;
p->units_per_block = c->rdsc.nallocs;
return 0;
}
/*
* pmalloc_header_type_parser -- parses the alloc header type argument
*/
static int
pmalloc_header_type_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_header_type *htype = dest;
ASSERTeq(dest_size, sizeof(enum pobj_header_type));
if (strcmp(vstr, "none") == 0) {
*htype = POBJ_HEADER_NONE;
} else if (strcmp(vstr, "compact") == 0) {
*htype = POBJ_HEADER_COMPACT;
} else if (strcmp(vstr, "legacy") == 0) {
*htype = POBJ_HEADER_LEGACY;
} else {
ERR("invalid header type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_READ_HANDLER(desc) -- reads the information about allocation class
*/
static int
CTL_READ_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
struct alloc_class *c = alloc_class_by_id(
heap_alloc_classes(&pop->heap), id);
if (c == NULL) {
ERR("class with the given id does not exist");
errno = ENOENT;
return -1;
}
enum pobj_header_type user_htype = MAX_POBJ_HEADER_TYPES;
switch (c->header_type) {
case HEADER_LEGACY:
user_htype = POBJ_HEADER_LEGACY;
break;
case HEADER_COMPACT:
user_htype = POBJ_HEADER_COMPACT;
break;
case HEADER_NONE:
user_htype = POBJ_HEADER_NONE;
break;
default:
ASSERT(0); /* unreachable */
break;
}
struct pobj_alloc_class_desc *p = arg;
p->units_per_block = c->type == CLASS_HUGE ? 0 : c->rdsc.nallocs;
p->header_type = user_htype;
p->unit_size = c->unit_size;
p->class_id = c->id;
p->alignment = c->flags & CHUNK_FLAG_ALIGNED ? c->rdsc.alignment : 0;
return 0;
}
static const struct ctl_argument CTL_ARG(desc) = {
.dest_size = sizeof(struct pobj_alloc_class_desc),
.parsers = {
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
unit_size, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
alignment, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
units_per_block, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
header_type, pmalloc_header_type_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(class_id)[] = {
CTL_LEAF_RW(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(new)[] = {
CTL_LEAF_WO(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(alloc_class)[] = {
CTL_INDEXED(class_id),
CTL_INDEXED(new),
CTL_NODE_END
};
/*
* CTL_RUNNABLE_HANDLER(extend) -- extends the pool by the given size
*/
static int
CTL_RUNNABLE_HANDLER(extend)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(ssize_t *)arg;
if (arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect size for extend, must be larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
struct palloc_heap *heap = &pop->heap;
struct bucket *defb = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
int ret = heap_extend(heap, defb, (size_t)arg_in) < 0 ? -1 : 0;
heap_bucket_release(heap, defb);
return ret;
}
/*
* CTL_READ_HANDLER(granularity) -- reads the current heap grow size
*/
static int
CTL_READ_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t *arg_out = arg;
*arg_out = (ssize_t)pop->heap.growsize;
return 0;
}
/*
* CTL_WRITE_HANDLER(granularity) -- changes the heap grow size
*/
static int
CTL_WRITE_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(int *)arg;
if (arg_in != 0 && arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect grow size, must be 0 or larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
pop->heap.growsize = (size_t)arg_in;
return 0;
}
static const struct ctl_argument CTL_ARG(granularity) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(total) -- reads a number of the arenas
*/
static int
CTL_READ_HANDLER(total)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *narenas = arg;
*narenas = heap_get_narenas_total(&pop->heap);
return 0;
}
/*
* CTL_READ_HANDLER(max) -- reads a max number of the arenas
*/
static int
CTL_READ_HANDLER(max)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *max = arg;
*max = heap_get_narenas_max(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(max) -- write a max number of the arenas
*/
static int
CTL_WRITE_HANDLER(max)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned size = *(unsigned *)arg;
int ret = heap_set_narenas_max(&pop->heap, size);
if (ret) {
LOG(1, "cannot change max arena number");
return -1;
}
return 0;
}
static const struct ctl_argument CTL_ARG(max) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(automatic) -- reads a number of the automatic arenas
*/
static int
CTL_READ_HANDLER(automatic, narenas)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *narenas = arg;
*narenas = heap_get_narenas_auto(&pop->heap);
return 0;
}
/*
* CTL_READ_HANDLER(arena_id) -- reads the id of the arena
* assigned to the calling thread
*/
static int
CTL_READ_HANDLER(arena_id)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *arena_id = arg;
*arena_id = heap_get_thread_arena_id(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(arena_id) -- assigns the arena to the calling thread
*/
static int
CTL_WRITE_HANDLER(arena_id)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned arena_id = *(unsigned *)arg;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
heap_set_arena_thread(&pop->heap, arena_id);
return 0;
}
static const struct ctl_argument CTL_ARG(arena_id) = CTL_ARG_LONG_LONG;
/*
* CTL_WRITE_HANDLER(automatic) -- updates automatic status of the arena
*/
static int
CTL_WRITE_HANDLER(automatic)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
unsigned arena_id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
arena_id = (unsigned)idx->value;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
if (arg_in != 0 && arg_in != 1) {
LOG(1, "incorrect arena state, must be 0 or 1");
return -1;
}
return heap_set_arena_auto(&pop->heap, arena_id, arg_in);
}
/*
* CTL_READ_HANDLER(automatic) -- reads automatic status of the arena
*/
static int
CTL_READ_HANDLER(automatic)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
unsigned arena_id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
arena_id = (unsigned)idx->value;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
*arg_out = heap_get_arena_auto(&pop->heap, arena_id);
return 0;
}
static struct ctl_argument CTL_ARG(automatic) = CTL_ARG_BOOLEAN;
static const struct ctl_node CTL_NODE(size)[] = {
CTL_LEAF_RW(granularity),
CTL_LEAF_RUNNABLE(extend),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(size) -- reads usable size of specified arena
*/
static int
CTL_READ_HANDLER(size)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned arena_id;
unsigned narenas;
size_t *arena_size = arg;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
/* take index of arena */
arena_id = (unsigned)idx->value;
/* take number of arenas */
narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
/* take buckets for arena */
struct bucket **buckets;
buckets = heap_get_arena_buckets(&pop->heap, arena_id);
/* calculate number of reservation for arena using buckets */
unsigned size = 0;
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
if (buckets[i] != NULL && buckets[i]->is_active)
size += buckets[i]->active_memory_block->m.size_idx;
}
*arena_size = size * CHUNKSIZE;
return 0;
}
/*
* CTL_RUNNABLE_HANDLER(create) -- create new arena in the heap
*/
static int
CTL_RUNNABLE_HANDLER(create)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *arena_id = arg;
struct palloc_heap *heap = &pop->heap;
int ret = heap_arena_create(heap);
if (ret < 0)
return -1;
*arena_id = (unsigned)ret;
return 0;
}
static const struct ctl_node CTL_NODE(arena_id)[] = {
CTL_LEAF_RO(size),
CTL_LEAF_RW(automatic),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(arena)[] = {
CTL_INDEXED(arena_id),
CTL_LEAF_RUNNABLE(create),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(narenas)[] = {
CTL_LEAF_RO(automatic, narenas),
CTL_LEAF_RO(total),
CTL_LEAF_RW(max),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(thread)[] = {
CTL_LEAF_RW(arena_id),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_CHILD(alloc_class),
CTL_CHILD(arena),
CTL_CHILD(size),
CTL_CHILD(thread),
CTL_CHILD(narenas),
CTL_NODE_END
};
/*
* pmalloc_ctl_register -- registers ctl nodes for "heap" module
*/
void
pmalloc_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, heap);
}
| 18,444 | 22.114035 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/critnib.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* critnib.h -- internal definitions for critnib tree
*/
#ifndef LIBPMEMOBJ_CRITNIB_H
#define LIBPMEMOBJ_CRITNIB_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
struct critnib;
struct critnib *critnib_new(void);
void critnib_delete(struct critnib *c);
int critnib_insert(struct critnib *c, uint64_t key, void *value);
void *critnib_remove(struct critnib *c, uint64_t key);
void *critnib_get(struct critnib *c, uint64_t key);
void *critnib_find_le(struct critnib *c, uint64_t key);
#ifdef __cplusplus
}
#endif
#endif
| 625 | 18.5625 | 65 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/pmemops.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
#ifndef LIBPMEMOBJ_PMEMOPS_H
#define LIBPMEMOBJ_PMEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*persist_fn)(void *base, const void *, size_t, unsigned);
typedef int (*flush_fn)(void *base, const void *, size_t, unsigned);
typedef void (*drain_fn)(void *base);
typedef void *(*memcpy_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *base, void *dest, int c, size_t len,
unsigned flags);
typedef int (*remote_read_fn)(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
struct pmem_ops {
/* for 'master' replica: with or without data replication */
persist_fn persist; /* persist function */
flush_fn flush; /* flush function */
drain_fn drain; /* drain function */
memcpy_fn memcpy; /* persistent memcpy function */
memmove_fn memmove; /* persistent memmove function */
memset_fn memset; /* persistent memset function */
void *base;
//char a;
//temp var end
struct remote_ops {
remote_read_fn read;
void *ctx;
uintptr_t base;
} remote;
void *device;
uint16_t objid;
};
static force_inline int
pmemops_xpersist(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->persist(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_persist(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xpersist(p_ops, d, s, 0);
}
static force_inline int
pmemops_xflush(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->flush(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_flush(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xflush(p_ops, d, s, 0);
}
static force_inline void
pmemops_drain(const struct pmem_ops *p_ops)
{
p_ops->drain(p_ops->base);
}
static force_inline void *
pmemops_memcpy(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memcpy(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memmove(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memmove(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memset(const struct pmem_ops *p_ops, void *dest, int c,
size_t len, unsigned flags)
{
return p_ops->memset(p_ops->base, dest, c, len, flags);
}
#ifdef __cplusplus
}
#endif
#endif
| 2,672 | 22.866071 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/ulog.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* ulog.c -- unified log implementation
*/
#include <inttypes.h>
#include <string.h>
#include "libpmemobj.h"
#include "pmemops.h"
#include "ulog.h"
#include "obj.h"
#include "out.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* Operation flag at the three most significant bits
*/
#define ULOG_OPERATION(op) ((uint64_t)(op))
#define ULOG_OPERATION_MASK ((uint64_t)(0b111ULL << 61ULL))
#define ULOG_OPERATION_FROM_OFFSET(off) (ulog_operation_type)\
((off) & ULOG_OPERATION_MASK)
#define ULOG_OFFSET_MASK (~(ULOG_OPERATION_MASK))
#define CACHELINE_ALIGN(size) ALIGN_UP(size, CACHELINE_SIZE)
#define IS_CACHELINE_ALIGNED(ptr)\
(((uintptr_t)(ptr) & (CACHELINE_SIZE - 1)) == 0)
/*
* ulog_by_offset -- calculates the ulog pointer
*/
struct ulog *
ulog_by_offset(size_t offset, const struct pmem_ops *p_ops)
{
if (offset == 0)
return NULL;
size_t aligned_offset = CACHELINE_ALIGN(offset);
return (struct ulog *)((char *)p_ops->base + aligned_offset);
}
/*
* ulog_next -- retrieves the pointer to the next ulog
*/
struct ulog *
ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops)
{
return ulog_by_offset(ulog->next, p_ops);
}
/*
* ulog_operation -- returns the type of entry operation
*/
ulog_operation_type
ulog_entry_type(const struct ulog_entry_base *entry)
{
return ULOG_OPERATION_FROM_OFFSET(entry->offset);
}
/*
* ulog_offset -- returns offset
*/
uint64_t
ulog_entry_offset(const struct ulog_entry_base *entry)
{
return entry->offset & ULOG_OFFSET_MASK;
}
/*
* ulog_entry_size -- returns the size of a ulog entry
*/
size_t
ulog_entry_size(const struct ulog_entry_base *entry)
{
struct ulog_entry_buf *eb;
switch (ulog_entry_type(entry)) {
case ULOG_OPERATION_AND:
case ULOG_OPERATION_OR:
case ULOG_OPERATION_SET:
return sizeof(struct ulog_entry_val);
case ULOG_OPERATION_BUF_SET:
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)entry;
return CACHELINE_ALIGN(
sizeof(struct ulog_entry_buf) + eb->size);
default:
ASSERT(0);
}
return 0;
}
/*
* ulog_entry_valid -- (internal) checks if a ulog entry is valid
* Returns 1 if the range is valid, otherwise 0 is returned.
*/
static int
ulog_entry_valid(struct ulog *ulog, const struct ulog_entry_base *entry)
{
if (entry->offset == 0)
return 0;
size_t size;
struct ulog_entry_buf *b;
switch (ulog_entry_type(entry)) {
case ULOG_OPERATION_BUF_CPY:
case ULOG_OPERATION_BUF_SET:
size = ulog_entry_size(entry);
b = (struct ulog_entry_buf *)entry;
uint64_t csum = util_checksum_compute(b, size,
&b->checksum, 0);
csum = util_checksum_seq(&ulog->gen_num,
sizeof(ulog->gen_num), csum);
if (b->checksum != csum)
return 0;
break;
default:
break;
}
return 1;
}
/*
* ulog_construct -- initializes the ulog structure
*/
void
ulog_construct(uint64_t offset, size_t capacity, uint64_t gen_num,
int flush, uint64_t flags, const struct pmem_ops *p_ops)
{
struct ulog *ulog = ulog_by_offset(offset, p_ops);
ASSERTne(ulog, NULL);
size_t diff = OBJ_PTR_TO_OFF(p_ops->base, ulog) - offset;
if (diff > 0)
capacity = ALIGN_DOWN(capacity - diff, CACHELINE_SIZE);
VALGRIND_ADD_TO_TX(ulog, SIZEOF_ULOG(capacity));
ulog->capacity = capacity;
ulog->checksum = 0;
ulog->next = 0;
ulog->gen_num = gen_num;
ulog->flags = flags;
memset(ulog->unused, 0, sizeof(ulog->unused));
/* we only need to zero out the header of ulog's first entry */
size_t zeroed_data = CACHELINE_ALIGN(sizeof(struct ulog_entry_base));
if (flush) {
pmemops_xflush(p_ops, ulog, sizeof(*ulog),
PMEMOBJ_F_RELAXED);
pmemops_memset(p_ops, ulog->data, 0, zeroed_data,
PMEMOBJ_F_MEM_NONTEMPORAL |
PMEMOBJ_F_MEM_NODRAIN |
PMEMOBJ_F_RELAXED);
} else {
/*
* We want to avoid replicating zeroes for every ulog of every
* lane, to do that, we need to use plain old memset.
*/
memset(ulog->data, 0, zeroed_data);
}
VALGRIND_REMOVE_FROM_TX(ulog, SIZEOF_ULOG(capacity));
}
/*
* ulog_foreach_entry -- iterates over every existing entry in the ulog
*/
int
ulog_foreach_entry(struct ulog *ulog,
ulog_entry_cb cb, void *arg, const struct pmem_ops *ops, struct ulog *ulognvm)
{
struct ulog_entry_base *e;
int ret = 0;
for (struct ulog *r = ulog; r != NULL; r = ulog_next(r, ops)) {
for (size_t offset = 0; offset < r->capacity; ) {
e = (struct ulog_entry_base *)(r->data + offset);
if (!ulog_entry_valid(ulog, e))
return ret;
if ((ret = cb(e, arg, ops)) != 0){
return ret;
}
offset += ulog_entry_size(e);
}
}
return ret;
}
#ifdef USE_NDP_REDO
int
ulog_foreach_entry_ndp(struct ulog *ulogdram, struct ulog *ulognvm,
ulog_entry_cb_ndp cb, void *arg, const struct pmem_ops *ops)
{
struct ulog_entry_base *e;
struct ulog_entry_base *f;
int ret = 0;
struct ulog *s = ulognvm;
for (struct ulog *r = ulogdram; r != NULL; r = ulog_next(r, ops)) {
for (size_t offset = 0; offset < r->capacity; ) {
e = (struct ulog_entry_base *)(r->data + offset);
f = (struct ulog_entry_base *)(s->data + offset);
if (!ulog_entry_valid(ulogdram, e))
return ret;
if ((ret = cb(e,f, arg, ops)) != 0)
return ret;
offset += ulog_entry_size(e);
}
s = ulog_next(s, ops);
}
return ret;
}
#endif
/*
* ulog_capacity -- (internal) returns the total capacity of the ulog
*/
size_t
ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes,
const struct pmem_ops *p_ops)
{
size_t capacity = ulog_base_bytes;
/* skip the first one, we count it in 'ulog_base_bytes' */
while ((ulog = ulog_next(ulog, p_ops)) != NULL) {
capacity += ulog->capacity;
}
return capacity;
}
/*
* ulog_rebuild_next_vec -- rebuilds the vector of next entries
*/
void
ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
do {
if (ulog->next != 0)
VEC_PUSH_BACK(next, ulog->next);
} while ((ulog = ulog_next(ulog, p_ops)) != NULL);
}
/*
* ulog_reserve -- reserves new capacity in the ulog
*/
int
ulog_reserve(struct ulog *ulog,
size_t ulog_base_nbytes, size_t gen_num,
int auto_reserve, size_t *new_capacity,
ulog_extend_fn extend, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
if (!auto_reserve) {
LOG(1, "cannot auto reserve next ulog");
return -1;
}
size_t capacity = ulog_base_nbytes;
uint64_t offset;
VEC_FOREACH(offset, next) {
ulog = ulog_by_offset(offset, p_ops);
ASSERTne(ulog, NULL);
capacity += ulog->capacity;
}
while (capacity < *new_capacity) {
if (extend(p_ops->base, &ulog->next, gen_num) != 0)
return -1;
VEC_PUSH_BACK(next, ulog->next);
ulog = ulog_next(ulog, p_ops);
ASSERTne(ulog, NULL);
capacity += ulog->capacity;
}
*new_capacity = capacity;
return 0;
}
/*
* ulog_checksum -- (internal) calculates ulog checksum
*/
static int
ulog_checksum(struct ulog *ulog, size_t ulog_base_bytes, int insert)
{
return util_checksum(ulog, SIZEOF_ULOG(ulog_base_bytes),
&ulog->checksum, insert, 0);
}
/*
* ulog_store -- stores the transient src ulog in the
* persistent dest ulog
*
* The source and destination ulogs must be cacheline aligned.
*/
void
ulog_store(struct ulog *dest, struct ulog *src, size_t nbytes,
size_t ulog_base_nbytes, size_t ulog_total_capacity,
struct ulog_next *next, const struct pmem_ops *p_ops)
{
/*
* First, store all entries over the base capacity of the ulog in
* the next logs.
* Because the checksum is only in the first part, we don't have to
* worry about failsafety here.
*/
struct ulog *ulog = dest;
size_t offset = ulog_base_nbytes;
/*
* Copy at least 8 bytes more than needed. If the user always
* properly uses entry creation functions, this will zero-out the
* potential leftovers of the previous log. Since all we really need
* to zero is the offset, sizeof(struct redo_log_entry_base) is enough.
* If the nbytes is aligned, an entire cacheline needs to be
* additionally zeroed.
* But the checksum must be calculated based solely on actual data.
* If the ulog total capacity is equal to the size of the
* ulog being stored (nbytes == ulog_total_capacity), then there's
* nothing to invalidate because the entire log data will
* be overwritten.
*/
size_t checksum_nbytes = MIN(ulog_base_nbytes, nbytes);
if (nbytes != ulog_total_capacity)
nbytes = CACHELINE_ALIGN(nbytes +
sizeof(struct ulog_entry_base));
ASSERT(nbytes <= ulog_total_capacity);
size_t base_nbytes = MIN(ulog_base_nbytes, nbytes);
size_t next_nbytes = nbytes - base_nbytes;
size_t nlog = 0;
while (next_nbytes > 0) {
ulog = ulog_by_offset(VEC_ARR(next)[nlog++], p_ops);
ASSERTne(ulog, NULL);
size_t copy_nbytes = MIN(next_nbytes, ulog->capacity);
next_nbytes -= copy_nbytes;
ASSERT(IS_CACHELINE_ALIGNED(ulog->data));
VALGRIND_ADD_TO_TX(ulog->data, copy_nbytes);
pmemops_memcpy(p_ops,
ulog->data,
src->data + offset,
copy_nbytes,
PMEMOBJ_F_MEM_WC |
PMEMOBJ_F_MEM_NODRAIN |
PMEMOBJ_F_RELAXED);
VALGRIND_REMOVE_FROM_TX(ulog->data, copy_nbytes);
offset += copy_nbytes;
}
if (nlog != 0)
pmemops_drain(p_ops);
/*
* Then, calculate the checksum and store the first part of the
* ulog.
*/
size_t old_capacity = src->capacity;
src->capacity = base_nbytes;
src->next = VEC_SIZE(next) == 0 ? 0 : VEC_FRONT(next);
ulog_checksum(src, checksum_nbytes, 1);
pmemops_memcpy(p_ops, dest, src,
SIZEOF_ULOG(base_nbytes),
PMEMOBJ_F_MEM_WC);
src->capacity = old_capacity;
}
/*
* ulog_entry_val_create -- creates a new log value entry in the ulog
*
* This function requires at least a cacheline of space to be available in the
* ulog.
*/
struct ulog_entry_val *
ulog_entry_val_create(struct ulog *ulog, size_t offset, uint64_t *dest,
uint64_t value, ulog_operation_type type,
const struct pmem_ops *p_ops)
{
struct ulog_entry_val *e =
(struct ulog_entry_val *)(ulog->data + offset);
struct {
struct ulog_entry_val v;
struct ulog_entry_base zeroes;
} data;
COMPILE_ERROR_ON(sizeof(data) != sizeof(data.v) + sizeof(data.zeroes));
/*
* Write a little bit more to the buffer so that the next entry that
* resides in the log is erased. This will prevent leftovers from
* a previous, clobbered, log from being incorrectly applied.
*/
data.zeroes.offset = 0;
data.v.base.offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
data.v.base.offset |= ULOG_OPERATION(type);
data.v.value = value;
pmemops_memcpy(p_ops, e, &data, sizeof(data),
PMEMOBJ_F_MEM_NOFLUSH | PMEMOBJ_F_RELAXED);
return e;
}
/*
* ulog_clobber_entry -- zeroes out a single log entry header
*/
/*
void
ulog_clobber_entry(const struct ulog_entry_base *e,
const struct pmem_ops *p_ops)
{
static const size_t aligned_entry_size =
CACHELINE_ALIGN(sizeof(struct ulog_entry_base));
VALGRIND_ADD_TO_TX(e, aligned_entry_size);
pmemops_memset(p_ops, (char *)e, 0, aligned_entry_size,
PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(e, aligned_entry_size);
//printf("ulog entry base %lx %lx\n", (uint64_t)e, (uint64_t)aligned_entry_size);
}
*/
void
ulog_clobber_entry(const struct ulog_entry_base *e,
const struct pmem_ops *p_ops)
{
static const size_t aligned_entry_size =
CACHELINE_ALIGN(sizeof(struct ulog_entry_base));
VALGRIND_ADD_TO_TX(e, aligned_entry_size);
pmemops_memset(p_ops, (char *)e, 0, aligned_entry_size,
PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(e, aligned_entry_size);
//printf("ulog entry base %lx %lx\n", (uint64_t)e, (uint64_t)aligned_entry_size);
}
char flag_to_sel_log = 0;
/*
* ulog_entry_buf_create -- atomically creates a buffer entry in the log
*/
#ifdef USE_NDP_CLOBBER
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t gen_num,
uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops, int clear_next_header)
#else
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t gen_num,
uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops)
#endif
{
struct ulog_entry_buf *e =
(struct ulog_entry_buf *)(ulog->data + offset);
/*
* Depending on the size of the source buffer, we might need to perform
* up to three separate copies:
* 1. The first cacheline, 24b of metadata and 40b of data
* If there's still data to be logged:
* 2. The entire remainder of data data aligned down to cacheline,
* for example, if there's 150b left, this step will copy only
* 128b.
* Now, we are left with between 0 to 63 bytes. If nonzero:
* 3. Create a stack allocated cacheline-sized buffer, fill in the
* remainder of the data, and copy the entire cacheline.
*
* This is done so that we avoid a cache-miss on misaligned writes.
*/
/*
struct ulog_entry_buf *b = alloca(CACHELINE_SIZE);
b->base.offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
b->base.offset |= ULOG_OPERATION(type);
b->size = size;
b->checksum = 0;
*/
//printf("orig addr %lx\n",(uint64_t)p_ops->device);
//printf("ulog %ld\n",size);
//basic command write
uint64_t ulog_offset = (uint64_t)ulog->data + (uint64_t)offset;
uint64_t base_offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
base_offset |= ULOG_OPERATION(type);
//printf("size %lx\n", size);
//printf("ulog base %lx\n", base_offset);
//printf("base %lx src %lx ulog %lx\n",base_offset,(uint64_t)src,ulog_offset);
//printf("objid %d\n",p_ops->objid);
*((uint64_t*)p_ops->device) = ulog_offset;
*((uint64_t*)(p_ops->device)+1) = base_offset;
*((uint64_t*)(p_ops->device)+2) = (uint64_t)src;
*((uint64_t*)(p_ops->device)+3) = ((uint64_t)(((p_ops->objid) << 16)| 7) << 32) | size;
#ifdef USE_NDP_CLOBBER
if(clear_next_header==1)
*(((uint32_t*)(p_ops->device))+255) = (uint32_t)(((p_ops->objid) << 16)| 8);
else
*(((uint32_t*)(p_ops->device))+255) = (uint32_t)(((p_ops->objid) << 16)| 7);
#else
*(((uint32_t*)(p_ops->device))+255) = (uint32_t)(((p_ops->objid) << 16)| 7);
#endif
//end of basic command write
//Command write optimization
/*
struct ulog_cmd_packet ulog_packet;
ulog_packet.ulog_offset = ulog_offset & 0xffffffff;
ulog_packet.base_offset = base_offset & 0xffffffff;
ulog_packet.src = (uint64_t)src & 0xffffffff;
ulog_packet.size = size & 0xffffffff;
memcpy( p_ops->device, &ulog_packet, sizeof(ulog_packet) );
*(((uint32_t*)(p_ops->device))+255) = 7;
*/
//end of Command write optimization
// while(*((uint32_t*)(p_ops->device)+255) == 7){
// printf("waiting\n");
// }
// while(*((uint32_t*)(p_ops->device)+11) != 3){
// printf("waiting %d\n",*((uint32_t*)(p_ops->device)+11));
// }
/* size_t bdatasize = CACHELINE_SIZE - sizeof(struct ulog_entry_buf);
size_t ncopy = MIN(size, bdatasize);
memcpy(b->data, src, ncopy);
memset(b->data + ncopy, 0, bdatasize - ncopy);
size_t remaining_size = ncopy > size ? 0 : size - ncopy;
char *srcof = (char *)src + ncopy;
size_t rcopy = ALIGN_DOWN(remaining_size, CACHELINE_SIZE);
size_t lcopy = remaining_size - rcopy;
uint8_t last_cacheline[CACHELINE_SIZE];
if (lcopy != 0) {
memcpy(last_cacheline, srcof + rcopy, lcopy);
memset(last_cacheline + lcopy, 0, CACHELINE_SIZE - lcopy);
}
if (rcopy != 0) {
void *dest = e->data + ncopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, rcopy);
pmemops_memcpy(p_ops, dest, srcof, rcopy,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, rcopy);
}
if (lcopy != 0) {
void *dest = e->data + ncopy + rcopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, CACHELINE_SIZE);
pmemops_memcpy(p_ops, dest, last_cacheline, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, CACHELINE_SIZE);
}
b->checksum = util_checksum_seq(b, CACHELINE_SIZE, 0);
if (rcopy != 0)
b->checksum = util_checksum_seq(srcof, rcopy, b->checksum);
if (lcopy != 0)
b->checksum = util_checksum_seq(last_cacheline,
CACHELINE_SIZE, b->checksum);
b->checksum = util_checksum_seq(&gen_num, sizeof(gen_num),
b->checksum);
ASSERT(IS_CACHELINE_ALIGNED(e));
VALGRIND_ADD_TO_TX(e, CACHELINE_SIZE);
pmemops_memcpy(p_ops, e, b, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(e, CACHELINE_SIZE);
pmemops_drain(p_ops);
*/
/*
* Allow having uninitialized data in the b uffer - this requires marking
* data as defined so that comparing checksums is not reported as an
* error by memcheck.
*/
#if VG_MEMCHECK_ENABLED
if (On_memcheck) {
// VALGRIND_MAKE_MEM_DEFINED(e->data, ncopy + rcopy + lcopy);
VALGRIND_MAKE_MEM_DEFINED(&e->checksum, sizeof(e->checksum));
}
#endif
//ASSERT(ulog_entry_valid(ulog, &e->base));
return e;
}
/*struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t gen_num,
uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops)
{
struct ulog_entry_buf *e =
(struct ulog_entry_buf *)(ulog->data + offset);
*/
/*
* Depending on the size of the source buffer, we might need to perform
* up to three separate copies:
* 1. The first cacheline, 24b of metadata and 40b of data
* If there's still data to be logged:
* 2. The entire remainder of data data aligned down to cacheline,
* for example, if there's 150b left, this step will copy only
* 128b.
* Now, we are left with between 0 to 63 bytes. If nonzero:
* 3. Create a stack allocated cacheline-sized buffer, fill in the
* remainder of the data, and copy the entire cacheline.
*
* This is done so that we avoid a cache-miss on misaligned writes.
*/
/*
struct ulog_entry_buf *b = alloca(CACHELINE_SIZE);
b->base.offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
b->base.offset |= ULOG_OPERATION(type);
b->size = size;
b->checksum = 0;
size_t bdatasize = CACHELINE_SIZE - sizeof(struct ulog_entry_buf);
size_t ncopy = MIN(size, bdatasize);
memcpy(b->data, src, ncopy);
memset(b->data + ncopy, 0, bdatasize - ncopy);
size_t remaining_size = ncopy > size ? 0 : size - ncopy;
char *srcof = (char *)src + ncopy;
size_t rcopy = ALIGN_DOWN(remaining_size, CACHELINE_SIZE);
size_t lcopy = remaining_size - rcopy;
uint8_t last_cacheline[CACHELINE_SIZE];
if (lcopy != 0) {
memcpy(last_cacheline, srcof + rcopy, lcopy);
memset(last_cacheline + lcopy, 0, CACHELINE_SIZE - lcopy);
}
if (rcopy != 0) {
void *dest = e->data + ncopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, rcopy);
pmemops_memcpy(p_ops, dest, srcof, rcopy,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, rcopy);
}
if (lcopy != 0) {
void *dest = e->data + ncopy + rcopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, CACHELINE_SIZE);
pmemops_memcpy(p_ops, dest, last_cacheline, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, CACHELINE_SIZE);
}
b->checksum = util_checksum_seq(b, CACHELINE_SIZE, 0);
if (rcopy != 0)
b->checksum = util_checksum_seq(srcof, rcopy, b->checksum);
if (lcopy != 0)
b->checksum = util_checksum_seq(last_cacheline,
CACHELINE_SIZE, b->checksum);
b->checksum = util_checksum_seq(&gen_num, sizeof(gen_num),
b->checksum);
ASSERT(IS_CACHELINE_ALIGNED(e));
VALGRIND_ADD_TO_TX(e, CACHELINE_SIZE);
pmemops_memcpy(p_ops, e, b, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(e, CACHELINE_SIZE);
pmemops_drain(p_ops);
*/
/*
* Allow having uninitialized data in the buffer - this requires marking
* data as defined so that comparing checksums is not reported as an
* error by memcheck.
*/
/*#if VG_MEMCHECK_ENABLED
if (On_memcheck) {
VALGRIND_MAKE_MEM_DEFINED(e->data, ncopy + rcopy + lcopy);
VALGRIND_MAKE_MEM_DEFINED(&e->checksum, sizeof(e->checksum));
}
#endif
ASSERT(ulog_entry_valid(ulog, &e->base));
return e;
}*/
/*
* ulog_entry_apply -- applies modifications of a single ulog entry
*/
#ifdef USE_NDP_REDO
/*
void
ulog_entry_apply_ndp(const struct ulog_entry_base *e, const struct ulog_entry_base *f, int persist,
const struct pmem_ops *p_ops)
{
ulog_operation_type t = ulog_entry_type(e);
uint64_t offset = ulog_entry_offset(e);
size_t dst_size = sizeof(uint64_t);
uint64_t *dst = (uint64_t *)((uintptr_t)p_ops->base + offset);
struct ulog_entry_val *ev;
struct ulog_entry_buf *eb;
//flush_fn fn = persist ? p_ops->persist : p_ops->flush;
switch (t) {
case ULOG_OPERATION_AND:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst &= ev->value;
// fn(p_ops->base, dst, sizeof(uint64_t),
// PMEMOBJ_F_RELAXED);
//printf("orig and %llx\n",ULOG_OPERATION_AND);
break;
case ULOG_OPERATION_OR:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst |= ev->value;
// fn(p_ops->base, dst, sizeof(uint64_t),
// PMEMOBJ_F_RELAXED);
//printf("orig or %llx\n",ULOG_OPERATION_OR);
break;
case ULOG_OPERATION_SET:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst = ev->value;
// fn(p_ops->base, dst, sizeof(uint64_t),
// PMEMOBJ_F_RELAXED);
//printf("orig set %llx data %lx\n",ULOG_OPERATION_SET, ev->value);
break;
case ULOG_OPERATION_BUF_SET:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memset(p_ops, dst, *eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
//printf("orig buf set %llx data %x\n",ULOG_OPERATION_BUF_SET,*eb->data);
break;
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memcpy(p_ops, dst, eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
//printf("orig buf copy %llx\n",ULOG_OPERATION_BUF_CPY);
break;
default:
ASSERT(0);
}
VALGRIND_REMOVE_FROM_TX(dst, dst_size);
}
*/
void
ulog_entry_apply_ndp(const struct ulog_entry_base *e, const struct ulog_entry_base *f, int persist,
const struct pmem_ops *p_ops)
{
//send to ndp
// ulog type - logsrc [63:56]
// destination - ulog-offset
// src ev value or ev data - logsrc
//
// size - size
ulog_operation_type t = ulog_entry_type(e);
uint64_t offset = ulog_entry_offset(e);
uint64_t * dst = (uint64_t *)((uintptr_t)p_ops->base + offset);
struct ulog_entry_val *ev;
struct ulog_entry_val *ev1;
struct ulog_entry_buf *eb;
uint64_t src = 0;
uint64_t size = sizeof(uint64_t);
switch (t) {
case ULOG_OPERATION_AND:
ev = (struct ulog_entry_val *)f;
ev1 = (struct ulog_entry_val *)e;
src = (uint64_t)ULOG_OPERATION_AND | (uint64_t)(&(ev->value));
*dst &= ev1->value;
//base_offset = *((uint64_t *)dst) & (uint64_t)ev->value;
//printf("and %llx src data %lx dst data %lx src %p dest %lx\n",ULOG_OPERATION_SET, ev->value,*((uint64_t *)dst),&(ev->value),dst);
VALGRIND_ADD_TO_TX(dst, size);
break;
case ULOG_OPERATION_OR:
ev = (struct ulog_entry_val *)f;
ev1 = (struct ulog_entry_val *)e;
src = (uint64_t)ULOG_OPERATION_OR | (uint64_t)(&(ev->value));
*dst |= ev1->value;
//base_offset = *((uint64_t *)dst) | (uint64_t)ev->value;
//printf("or %llx src data %lx dst data %lx src %p dest %lx\n",ULOG_OPERATION_SET, ev->value,*((uint64_t *)dst),&(ev->value),dst);
VALGRIND_ADD_TO_TX(dst, size);
break;
case ULOG_OPERATION_SET:
ev = (struct ulog_entry_val *)f;
ev1 = (struct ulog_entry_val *)e;
src = (uint64_t)ULOG_OPERATION_SET | (uint64_t)(&(ev->value));
*dst = ev1->value;
//base_offset = (uint64_t)ev->value;
//printf("set value address %p\n",&(ev->value));
//printf("set %llx src data %lx dst data %lx src %p dest %lx\n",ULOG_OPERATION_SET, ev->value,*((uint64_t *)dst),&(ev->value),dst);
VALGRIND_ADD_TO_TX(dst, size);
break;
case ULOG_OPERATION_BUF_SET:
eb = (struct ulog_entry_buf *)f;
src = (uint64_t)ULOG_OPERATION_BUF_SET | (uint64_t)eb->data;
size = eb->size;
//printf("set buf %llx src %lx dest %lx\n",ULOG_OPERATION_SET, (uint64_t)eb->data,dst);
VALGRIND_ADD_TO_TX(dst, size);
break;
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)f;
src = ((uint64_t)ULOG_OPERATION_BUF_CPY) | (uint64_t)(&(eb->data));
size = eb->size;
//printf("set cpy %llx src %p dest %lx\n",ULOG_OPERATION_SET, &(eb->data),dst);
VALGRIND_ADD_TO_TX(dst, size);
break;
default:
src = 0;
size = 0;
ASSERT(0);
}
//p_ops->persist(p_ops->base, (uint64_t *)dst, size,
// PMEMOBJ_F_RELAXED);
//printf("ndp dest %lx\nsrc %lx\nsize %lx\n",dst,src,size );
//printf("redo operation %lx\n",t);
//printf("objid %d\n",p_ops->objid);
//printf("bufset %llx\n", ULOG_OPERATION_BUF_SET);
//printf("bufcpy %llx\n", ULOG_OPERATION_BUF_CPY);
*((uint64_t*)p_ops->device) = (uint64_t)dst;
*((uint64_t*)(p_ops->device)+2) = src;
*((uint64_t*)(p_ops->device)+3) = ((uint64_t)(((p_ops->objid) << 16)| 9) <<32) |size;
*(((uint32_t*)(p_ops->device))+255) = (uint32_t)(((p_ops->objid) << 16)| 9);
VALGRIND_REMOVE_FROM_TX(dst, size);
}
#endif
void
ulog_entry_apply(const struct ulog_entry_base *e, int persist,
const struct pmem_ops *p_ops)
{
ulog_operation_type t = ulog_entry_type(e);
uint64_t offset = ulog_entry_offset(e);
size_t dst_size = sizeof(uint64_t);
uint64_t *dst = (uint64_t *)((uintptr_t)p_ops->base + offset);
struct ulog_entry_val *ev;
struct ulog_entry_buf *eb;
flush_fn f = persist ? p_ops->persist : p_ops->flush;
//
/*
PMEMobjpool *pop = pmemobj_createU(path, layout, poolsize, mode);
pop->p_ops.device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0");
pop->p_ops.objid = (uint16_t)pop->run_id;
PMEMoid root = pmemobj_root(pop, sizeof(uint64_t));
uint64_t* tmp = pmemobj_direct(root);
*/
//
switch (t) {
case ULOG_OPERATION_AND:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst &= ev->value;
*(dst+256) = 7;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
//printf("orig and %llx\n",ULOG_OPERATION_AND);
break;
case ULOG_OPERATION_OR:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst |= ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
//printf("orig or %llx\n",ULOG_OPERATION_OR);
break;
case ULOG_OPERATION_SET:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst = ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
//printf("orig set %llx data %lx\n",ULOG_OPERATION_SET, ev->value);
break;
case ULOG_OPERATION_BUF_SET:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memset(p_ops, dst, *eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
//printf("orig buf set %llx data %x\n",ULOG_OPERATION_BUF_SET,*eb->data);
break;
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memcpy(p_ops, dst, eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
//printf("orig buf copy %llx\n",ULOG_OPERATION_BUF_CPY);
break;
default:
ASSERT(0);
}
VALGRIND_REMOVE_FROM_TX(dst, dst_size);
}
/*
* ulog_process_entry -- (internal) processes a single ulog entry
*/
static int
ulog_process_entry(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
ulog_entry_apply(e, 0, p_ops);
return 0;
}
#ifdef USE_NDP_REDO
//static int
//ulog_process_entry_ndp(struct ulog_entry_base *e, struct ulog_entry_base *f, void *arg,
// const struct pmem_ops *p_ops)
static int
ulog_process_entry_ndp(struct ulog_entry_base *e, struct ulog_entry_base *f, void *arg,
const struct pmem_ops *p_ops)
{
ulog_entry_apply_ndp(e, f, 0, p_ops);
return 0;
}
#endif
/*
* ulog_inc_gen_num -- (internal) increments gen num in the ulog
*/
static void
ulog_inc_gen_num(struct ulog *ulog, const struct pmem_ops *p_ops)
{
size_t gns = sizeof(ulog->gen_num);
VALGRIND_ADD_TO_TX(&ulog->gen_num, gns);
ulog->gen_num++;
if (p_ops)
pmemops_persist(p_ops, &ulog->gen_num, gns);
else
VALGRIND_SET_CLEAN(&ulog->gen_num, gns);
VALGRIND_REMOVE_FROM_TX(&ulog->gen_num, gns);
}
/*
* ulog_free_by_ptr_next -- free all ulogs starting from the indicated one.
* Function returns 1 if any ulog have been freed or unpinned, 0 otherwise.
*/
int
ulog_free_next(struct ulog *u, const struct pmem_ops *p_ops,
ulog_free_fn ulog_free, ulog_rm_user_buffer_fn user_buff_remove,
uint64_t flags)
{
int ret = 0;
if (u == NULL)
return ret;
VEC(, uint64_t *) ulogs_internal_except_first;
VEC_INIT(&ulogs_internal_except_first);
/*
* last_internal - pointer to a last found ulog allocated
* internally by the libpmemobj
*/
struct ulog *last_internal = u;
struct ulog *current;
/* iterate all linked logs and unpin user defined */
while ((flags & ULOG_ANY_USER_BUFFER) &&
last_internal != NULL && last_internal->next != 0) {
current = ulog_by_offset(last_internal->next, p_ops);
/*
* handle case with user logs one after the other
* or mixed user and internal logs
*/
while (current != NULL &&
(current->flags & ULOG_USER_OWNED)) {
last_internal->next = current->next;
pmemops_persist(p_ops, &last_internal->next,
sizeof(last_internal->next));
user_buff_remove(p_ops->base, current);
current = ulog_by_offset(last_internal->next, p_ops);
/* any ulog has been unpinned - set return value to 1 */
ret = 1;
}
last_internal = ulog_by_offset(last_internal->next, p_ops);
}
while (u->next != 0) {
if (VEC_PUSH_BACK(&ulogs_internal_except_first,
&u->next) != 0) {
/* this is fine, it will just use more pmem */
LOG(1, "unable to free transaction logs memory");
goto out;
}
u = ulog_by_offset(u->next, p_ops);
}
/* free non-user defined logs */
uint64_t *ulog_ptr;
VEC_FOREACH_REVERSE(ulog_ptr, &ulogs_internal_except_first) {
ulog_free(p_ops->base, ulog_ptr);
ret = 1;
}
out:
VEC_DELETE(&ulogs_internal_except_first);
return ret;
}
/*
* ulog_clobber -- zeroes the metadata of the ulog
*/
void
ulog_clobber(struct ulog *dest, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
struct ulog empty;
memset(&empty, 0, sizeof(empty));
if (next != NULL)
empty.next = VEC_SIZE(next) == 0 ? 0 : VEC_FRONT(next);
else
empty.next = dest->next;
pmemops_memcpy(p_ops, dest, &empty, sizeof(empty),
PMEMOBJ_F_MEM_WC);
}
/*
* ulog_clobber_data -- zeroes out 'nbytes' of data in the logs
*/
int
ulog_clobber_data(struct ulog *ulog_first,
size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, ulog_free_fn ulog_free,
ulog_rm_user_buffer_fn user_buff_remove,
const struct pmem_ops *p_ops, unsigned flags)
{
ASSERTne(ulog_first, NULL);
/* In case of abort we need to increment counter in the first ulog. */
if (flags & ULOG_INC_FIRST_GEN_NUM)
ulog_inc_gen_num(ulog_first, p_ops);
/*
* In the case of abort or commit, we are not going to free all ulogs,
* but rather increment the generation number to be consistent in the
* first two ulogs.
*/
size_t second_offset = VEC_SIZE(next) == 0 ? 0 : *VEC_GET(next, 0);
struct ulog *ulog_second = ulog_by_offset(second_offset, p_ops);
if (ulog_second && !(flags & ULOG_FREE_AFTER_FIRST))
/*
* We want to keep gen_nums consistent between ulogs.
* If the transaction will commit successfully we'll reuse the
* second buffer (third and next ones will be freed anyway).
* If the application will crash we'll free 2nd ulog on
* recovery, which means we'll never read gen_num of the
* second ulog in case of an ungraceful shutdown.
*/
ulog_inc_gen_num(ulog_second, NULL);
/* The ULOG_ANY_USER_BUFFER flag indicates more than one ulog exist */
if (flags & ULOG_ANY_USER_BUFFER)
ASSERTne(ulog_second, NULL);
struct ulog *u;
/*
* only if there was any user buffer it make sense to check
* if the second ulog is allocated by user
*/
if ((flags & ULOG_ANY_USER_BUFFER) &&
(ulog_second->flags & ULOG_USER_OWNED)) {
/*
* function ulog_free_next() starts from 'next' ulog,
* so to start from the second ulog we need to
* pass the first one
*/
u = ulog_first;
} else {
/*
* To make sure that transaction logs do not occupy too
* much of space, all of them, expect for the first one,
* are freed at the end of the operation. The reasoning for
* this is that pmalloc() is a relatively cheap operation for
* transactions where many hundreds of kilobytes are being
* snapshot, and so, allocating and freeing the buffer for
* each transaction is an acceptable overhead for the average
* case.
*/
if (flags & ULOG_FREE_AFTER_FIRST)
u = ulog_first;
else
u = ulog_second;
}
if (u == NULL)
return 0;
return ulog_free_next(u, p_ops, ulog_free, user_buff_remove, flags);
}
/*
* ulog_process -- process ulog entries
*/
void
ulog_process(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
#ifdef DEBUG
if (check)
ulog_check(ulog, check, p_ops);
#endif
//clock_t start,end;
//double callbacktime = 0;
//start = clock();
ulog_foreach_entry(ulog, ulog_process_entry, NULL, p_ops,NULL);
//end = clock();
//callbacktime += ((double) (end - start)) / CLOCKS_PER_SEC;
//printf("call back %f\n",callbacktime);
pmemops_drain(p_ops);
}
#ifdef USE_NDP_REDO
void
ulog_process_ndp(struct ulog *ulog, struct ulog *ulogdram, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
#ifdef DEBUG
if (check)
ulog_check(ulogdram, check, p_ops);
#endif
//ulog_foreach_entry(ulog, ulog_process_entry_ndp, NULL, p_ops,ulog);
ulog_foreach_entry_ndp(ulogdram, ulog, ulog_process_entry_ndp, NULL, p_ops);
pmemops_drain(p_ops);
}
#endif
/*
* ulog_base_nbytes -- (internal) counts the actual of number of bytes
* occupied by the ulog
*/
size_t
ulog_base_nbytes(struct ulog *ulog)
{
size_t offset = 0;
struct ulog_entry_base *e;
for (offset = 0; offset < ulog->capacity; ) {
e = (struct ulog_entry_base *)(ulog->data + offset);
if (!ulog_entry_valid(ulog, e))
break;
offset += ulog_entry_size(e);
}
return offset;
}
/*
* ulog_recovery_needed -- checks if the logs needs recovery
*/
int
ulog_recovery_needed(struct ulog *ulog, int verify_checksum)
{
size_t nbytes = MIN(ulog_base_nbytes(ulog), ulog->capacity);
if (nbytes == 0)
return 0;
if (verify_checksum && !ulog_checksum(ulog, nbytes, 0))
return 0;
return 1;
}
/*
* ulog_recover -- recovery of ulog
*
* The ulog_recover shall be preceded by ulog_check call.
*/
void
ulog_recover(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
if (ulog_recovery_needed(ulog, 1)) {
ulog_process(ulog, check, p_ops);
ulog_clobber(ulog, NULL, p_ops);
}
}
/*
* ulog_check_entry --
* (internal) checks consistency of a single ulog entry
*/
static int
ulog_check_entry(struct ulog_entry_base *e,
void *arg, const struct pmem_ops *p_ops)
{
uint64_t offset = ulog_entry_offset(e);
ulog_check_offset_fn check = arg;
if (!check(p_ops->base, offset)) {
LOG(15, "ulog %p invalid offset %" PRIu64,
e, e->offset);
return -1;
}
return offset == 0 ? -1 : 0;
}
/*
* ulog_check -- (internal) check consistency of ulog entries
*/
int
ulog_check(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
return ulog_foreach_entry(ulog,
ulog_check_entry, check, p_ops,NULL);
}
| 35,695 | 26.08346 | 134 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/sync.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* sync.h -- internal to obj synchronization API
*/
#ifndef LIBPMEMOBJ_SYNC_H
#define LIBPMEMOBJ_SYNC_H 1
#include <errno.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "out.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* internal definitions of PMEM-locks
*/
typedef union padded_pmemmutex {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_mutex_t mutex;
struct {
void *bsd_mutex_p;
union padded_pmemmutex *next;
} bsd_u;
} mutex_u;
} pmemmutex;
} PMEMmutex_internal;
#define PMEMmutex_lock pmemmutex.mutex_u.mutex
#define PMEMmutex_bsd_mutex_p pmemmutex.mutex_u.bsd_u.bsd_mutex_p
#define PMEMmutex_next pmemmutex.mutex_u.bsd_u.next
typedef union padded_pmemrwlock {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_rwlock_t rwlock;
struct {
void *bsd_rwlock_p;
union padded_pmemrwlock *next;
} bsd_u;
} rwlock_u;
} pmemrwlock;
} PMEMrwlock_internal;
#define PMEMrwlock_lock pmemrwlock.rwlock_u.rwlock
#define PMEMrwlock_bsd_rwlock_p pmemrwlock.rwlock_u.bsd_u.bsd_rwlock_p
#define PMEMrwlock_next pmemrwlock.rwlock_u.bsd_u.next
typedef union padded_pmemcond {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_cond_t cond;
struct {
void *bsd_cond_p;
union padded_pmemcond *next;
} bsd_u;
} cond_u;
} pmemcond;
} PMEMcond_internal;
#define PMEMcond_cond pmemcond.cond_u.cond
#define PMEMcond_bsd_cond_p pmemcond.cond_u.bsd_u.bsd_cond_p
#define PMEMcond_next pmemcond.cond_u.bsd_u.next
/*
* pmemobj_mutex_lock_nofail -- pmemobj_mutex_lock variant that never
* fails from caller perspective. If pmemobj_mutex_lock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_lock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_lock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_lock");
}
}
/*
* pmemobj_mutex_unlock_nofail -- pmemobj_mutex_unlock variant that never
* fails from caller perspective. If pmemobj_mutex_unlock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_unlock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_unlock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_unlock");
}
}
int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp);
#ifdef __cplusplus
}
#endif
#endif
| 2,504 | 21.168142 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/sync.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* sync.c -- persistent memory resident synchronization primitives
*/
#include <inttypes.h>
#include "obj.h"
#include "out.h"
#include "util.h"
#include "sync.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __FreeBSD__
#define RECORD_LOCK(init, type, p) \
if (init) {\
PMEM##type##_internal *head = pop->type##_head;\
while (!util_bool_compare_and_swap64(&pop->type##_head, head,\
p)) {\
head = pop->type##_head;\
}\
p->PMEM##type##_next = head;\
}
#else
#define RECORD_LOCK(init, type, p)
#endif
/*
* _get_value -- (internal) atomically initialize and return a value.
* Returns -1 on error, 0 if the caller is not the value
* initializer, 1 if the caller is the value initializer.
*/
static int
_get_value(uint64_t pop_runid, volatile uint64_t *runid, void *value, void *arg,
int (*init_value)(void *value, void *arg))
{
uint64_t tmp_runid;
int initializer = 0;
while ((tmp_runid = *runid) != pop_runid) {
if (tmp_runid == pop_runid - 1)
continue;
if (!util_bool_compare_and_swap64(runid, tmp_runid,
pop_runid - 1))
continue;
initializer = 1;
if (init_value(value, arg)) {
ERR("error initializing lock");
util_fetch_and_and64(runid, 0);
return -1;
}
if (util_bool_compare_and_swap64(runid, pop_runid - 1,
pop_runid) == 0) {
ERR("error setting lock runid");
return -1;
}
}
return initializer;
}
/*
* get_mutex -- (internal) atomically initialize, record and return a mutex
*/
static inline os_mutex_t *
get_mutex(PMEMobjpool *pop, PMEMmutex_internal *imp)
{
if (likely(imp->pmemmutex.runid == pop->run_id))
return &imp->PMEMmutex_lock;
volatile uint64_t *runid = &imp->pmemmutex.runid;
LOG(5, "PMEMmutex %p pop->run_id %" PRIu64 " pmemmutex.runid %" PRIu64,
imp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal));
COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t));
VALGRIND_REMOVE_PMEM_MAPPING(imp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &imp->PMEMmutex_lock,
NULL, (void *)os_mutex_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, mutex, imp);
return &imp->PMEMmutex_lock;
}
/*
* get_rwlock -- (internal) atomically initialize, record and return a rwlock
*/
static inline os_rwlock_t *
get_rwlock(PMEMobjpool *pop, PMEMrwlock_internal *irp)
{
if (likely(irp->pmemrwlock.runid == pop->run_id))
return &irp->PMEMrwlock_lock;
volatile uint64_t *runid = &irp->pmemrwlock.runid;
LOG(5, "PMEMrwlock %p pop->run_id %"\
PRIu64 " pmemrwlock.runid %" PRIu64,
irp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMrwlock) != sizeof(PMEMrwlock_internal));
COMPILE_ERROR_ON(util_alignof(PMEMrwlock)
!= util_alignof(os_rwlock_t));
VALGRIND_REMOVE_PMEM_MAPPING(irp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &irp->PMEMrwlock_lock,
NULL, (void *)os_rwlock_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, rwlock, irp);
return &irp->PMEMrwlock_lock;
}
/*
* get_cond -- (internal) atomically initialize, record and return a
* condition variable
*/
static inline os_cond_t *
get_cond(PMEMobjpool *pop, PMEMcond_internal *icp)
{
if (likely(icp->pmemcond.runid == pop->run_id))
return &icp->PMEMcond_cond;
volatile uint64_t *runid = &icp->pmemcond.runid;
LOG(5, "PMEMcond %p pop->run_id %" PRIu64 " pmemcond.runid %" PRIu64,
icp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMcond) != sizeof(PMEMcond_internal));
COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t));
VALGRIND_REMOVE_PMEM_MAPPING(icp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &icp->PMEMcond_cond,
NULL, (void *)os_cond_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, cond, icp);
return &icp->PMEMcond_cond;
}
/*
* pmemobj_mutex_zero -- zero-initialize a pmem resident mutex
*
* This function is not MT safe.
*/
void
pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
mutexip->pmemmutex.runid = 0;
pmemops_persist(&pop->p_ops, &mutexip->pmemmutex.runid,
sizeof(mutexip->pmemmutex.runid));
}
/*
* pmemobj_mutex_lock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_lock(mutex);
}
/*
* pmemobj_mutex_assert_locked -- checks whether mutex is locked.
*
* Returns 0 when mutex is locked.
*/
int
pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
int ret = os_mutex_trylock(mutex);
if (ret == EBUSY)
return 0;
if (ret == 0) {
util_mutex_unlock(mutex);
/*
* There's no good error code for this case. EINVAL is used for
* something else here.
*/
return ENODEV;
}
return ret;
}
/*
* pmemobj_mutex_timedlock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_timedlock(mutex, abs_timeout);
}
/*
* pmemobj_mutex_trylock -- trylock a pmem resident mutex
*
* Atomically initializes and trylocks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_trylock(mutex);
}
/*
* pmemobj_mutex_unlock -- unlock a pmem resident mutex
*/
int
pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
/* XXX potential performance improvement - move GET to debug version */
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_unlock(mutex);
}
/*
* pmemobj_rwlock_zero -- zero-initialize a pmem resident rwlock
*
* This function is not MT safe.
*/
void
pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
rwlockip->pmemrwlock.runid = 0;
pmemops_persist(&pop->p_ops, &rwlockip->pmemrwlock.runid,
sizeof(rwlockip->pmemrwlock.runid));
}
/*
* pmemobj_rwlock_rdlock -- rdlock a pmem resident mutex
*
* Atomically initializes and rdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_rdlock(rwlock);
}
/*
* pmemobj_rwlock_wrlock -- wrlock a pmem resident mutex
*
* Atomically initializes and wrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_wrlock(rwlock);
}
/*
* pmemobj_rwlock_timedrdlock -- timedrdlock a pmem resident mutex
*
* Atomically initializes and timedrdlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedrdlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_timedwrlock -- timedwrlock a pmem resident mutex
*
* Atomically initializes and timedwrlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedwrlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_tryrdlock -- tryrdlock a pmem resident mutex
*
* Atomically initializes and tryrdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_tryrdlock(rwlock);
}
/*
* pmemobj_rwlock_trywrlock -- trywrlock a pmem resident mutex
*
* Atomically initializes and trywrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_trywrlock(rwlock);
}
/*
* pmemobj_rwlock_unlock -- unlock a pmem resident rwlock
*/
int
pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
/* XXX potential performance improvement - move GET to debug version */
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_unlock(rwlock);
}
/*
* pmemobj_cond_zero -- zero-initialize a pmem resident condition variable
*
* This function is not MT safe.
*/
void
pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
condip->pmemcond.runid = 0;
pmemops_persist(&pop->p_ops, &condip->pmemcond.runid,
sizeof(condip->pmemcond.runid));
}
/*
* pmemobj_cond_broadcast -- broadcast a pmem resident condition variable
*
* Atomically initializes and broadcast a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_broadcast(cond);
}
/*
* pmemobj_cond_signal -- signal a pmem resident condition variable
*
* Atomically initializes and signal a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_signal(cond);
}
/*
* pmemobj_cond_timedwait -- timedwait on a pmem resident condition variable
*
* Atomically initializes and timedwait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp,
PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p cond %p mutex %p abstime sec %ld nsec %ld", pop, condp,
mutexp, abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_timedwait(cond, mutex, abs_timeout);
}
/*
* pmemobj_cond_wait -- wait on a pmem resident condition variable
*
* Atomically initializes and wait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp,
PMEMmutex *__restrict mutexp)
{
LOG(3, "pop %p cond %p mutex %p", pop, condp, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_wait(cond, mutex);
}
/*
* pmemobj_volatile -- atomically initialize, record and return a
* generic value
*/
void *
pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt,
void *ptr, size_t size,
int (*constr)(void *ptr, void *arg), void *arg)
{
LOG(3, "pop %p vlt %p ptr %p constr %p arg %p", pop, vlt, ptr,
constr, arg);
if (likely(vlt->runid == pop->run_id))
return ptr;
VALGRIND_REMOVE_PMEM_MAPPING(ptr, size);
VALGRIND_ADD_TO_TX(vlt, sizeof(*vlt));
if (_get_value(pop->run_id, &vlt->runid, ptr, arg, constr) < 0) {
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
return NULL;
}
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
VALGRIND_SET_CLEAN(vlt, sizeof(*vlt));
return ptr;
}
| 16,501 | 24.664075 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/lane.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* lane.h -- internal definitions for lanes
*/
#ifndef LIBPMEMOBJ_LANE_H
#define LIBPMEMOBJ_LANE_H 1
#include <stdint.h>
#include "ulog.h"
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Distance between lanes used by threads required to prevent threads from
* false sharing part of lanes array. Used if properly spread lanes are
* available. Otherwise less spread out lanes would be used.
*/
#define LANE_JUMP (64 / sizeof(uint64_t))
/*
* Number of times the algorithm will try to reacquire the primary lane for the
* thread. If this threshold is exceeded, a new primary lane is selected for the
* thread.
*/
#define LANE_PRIMARY_ATTEMPTS 128
#define RLANE_DEFAULT 0
#define LANE_TOTAL_SIZE 3072 /* 3 * 1024 (sum of 3 old lane sections) */
/*
* We have 3 kilobytes to distribute.
* The smallest capacity is needed for the internal redo log for which we can
* accurately calculate the maximum number of occupied space: 48 bytes,
* 3 times sizeof(struct ulog_entry_val). One for bitmap OR, second for bitmap
* AND, third for modification of the destination pointer. For future needs,
* this has been bumped up to 12 ulog entries.
*
* The remaining part has to be split between transactional redo and undo logs,
* and since by far the most space consuming operations are transactional
* snapshots, most of the space, 2 kilobytes, is assigned to the undo log.
* After that, the remainder, 640 bytes, or 40 ulog entries, is left for the
* transactional redo logs.
* Thanks to this distribution, all small and medium transactions should be
* entirely performed without allocating any additional metadata.
*
* These values must be cacheline size aligned to be used for ulogs. Therefore
* they are parametrized for the size of the struct ulog changes between
* platforms.
*/
#define LANE_UNDO_SIZE (LANE_TOTAL_SIZE \
- LANE_REDO_EXTERNAL_SIZE \
- LANE_REDO_INTERNAL_SIZE \
- 3 * sizeof(struct ulog)) /* 2048 for 64B ulog */
#define LANE_REDO_EXTERNAL_SIZE ALIGN_UP(704 - sizeof(struct ulog), \
CACHELINE_SIZE) /* 640 for 64B ulog */
#define LANE_REDO_INTERNAL_SIZE ALIGN_UP(256 - sizeof(struct ulog), \
CACHELINE_SIZE) /* 192 for 64B ulog */
struct lane_layout {
/*
* Redo log for self-contained and 'one-shot' allocator operations.
* Cannot be extended.
*/
struct ULOG(LANE_REDO_INTERNAL_SIZE) internal;
/*
* Redo log for large operations/transactions.
* Can be extended by the use of internal ulog.
*/
struct ULOG(LANE_REDO_EXTERNAL_SIZE) external;
/*
* Undo log for snapshots done in a transaction.
* Can be extended/shrunk by the use of internal ulog.
*/
struct ULOG(LANE_UNDO_SIZE) undo;
};
struct lane {
struct lane_layout *layout; /* pointer to persistent layout */
struct operation_context *internal; /* context for internal ulog */
struct operation_context *external; /* context for external ulog */
struct operation_context *undo; /* context for undo ulog */
};
struct lane_descriptor {
/*
* Number of lanes available at runtime must be <= total number of lanes
* available in the pool. Number of lanes can be limited by shortage of
* other resources e.g. available RNIC's submission queue sizes.
*/
unsigned runtime_nlanes;
unsigned next_lane_idx;
uint64_t *lane_locks;
struct lane *lane;
};
typedef int (*section_layout_op)(PMEMobjpool *pop, void *data, unsigned length);
typedef void *(*section_constr)(PMEMobjpool *pop, void *data);
typedef void (*section_destr)(PMEMobjpool *pop, void *rt);
typedef int (*section_global_op)(PMEMobjpool *pop);
struct section_operations {
section_constr construct_rt;
section_destr destroy_rt;
section_layout_op check;
section_layout_op recover;
section_global_op boot;
section_global_op cleanup;
};
struct lane_info {
uint64_t pop_uuid_lo;
uint64_t lane_idx;
unsigned long nest_count;
/*
* The index of the primary lane for the thread. A thread will always
* try to acquire the primary lane first, and only if that fails it will
* look for a different available lane.
*/
uint64_t primary;
int primary_attempts;
struct lane_info *prev, *next;
};
void lane_info_boot(void);
void lane_info_destroy(void);
void lane_init_data(PMEMobjpool *pop);
int lane_boot(PMEMobjpool *pop);
void lane_cleanup(PMEMobjpool *pop);
int lane_recover_and_section_boot(PMEMobjpool *pop);
int lane_section_cleanup(PMEMobjpool *pop);
int lane_check(PMEMobjpool *pop);
unsigned lane_hold(PMEMobjpool *pop, struct lane **lane);
void lane_release(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 4,652 | 30.02 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/bucket.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* bucket.h -- internal definitions for bucket
*/
#ifndef LIBPMEMOBJ_BUCKET_H
#define LIBPMEMOBJ_BUCKET_H 1
#include <stddef.h>
#include <stdint.h>
#include "container.h"
#include "memblock.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define CALC_SIZE_IDX(_unit_size, _size)\
((_size) == 0 ? 0 : (uint32_t)((((_size) - 1) / (_unit_size)) + 1))
struct bucket {
os_mutex_t lock;
struct alloc_class *aclass;
struct block_container *container;
const struct block_container_ops *c_ops;
struct memory_block_reserved *active_memory_block;
int is_active;
};
struct bucket *bucket_new(struct block_container *c,
struct alloc_class *aclass);
int *bucket_current_resvp(struct bucket *b);
int bucket_insert_block(struct bucket *b, const struct memory_block *m);
void bucket_delete(struct bucket *b);
#ifdef __cplusplus
}
#endif
#endif
| 957 | 17.784314 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/ulog.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* ulog.h -- unified log public interface
*/
#ifndef LIBPMEMOBJ_ULOG_H
#define LIBPMEMOBJ_ULOG_H 1
#include <stddef.h>
#include <stdint.h>
#include <time.h>
#include "vec.h"
#include "pmemops.h"
#include<x86intrin.h>
////cmd write optimization
/*
struct ulog_cmd_packet{
uint32_t ulog_offset : 32;
uint32_t base_offset : 32;
uint32_t src : 32;
uint32_t size : 32;
};
*/
struct ulog_entry_base {
uint64_t offset; /* offset with operation type flag */
};
/*
* ulog_entry_val -- log entry
*/
struct ulog_entry_val {
struct ulog_entry_base base;
uint64_t value; /* value to be applied */
};
/*
* ulog_entry_buf - ulog buffer entry
*/
struct ulog_entry_buf {
struct ulog_entry_base base; /* offset with operation type flag */
uint64_t checksum; /* checksum of the entire log entry */
uint64_t size; /* size of the buffer to be modified */
uint8_t data[]; /* content to fill in */
};
#define ULOG_UNUSED ((CACHELINE_SIZE - 40) / 8)
/*
* This structure *must* be located at a cacheline boundary. To achieve this,
* the next field is always allocated with extra padding, and then the offset
* is additionally aligned.
*/
#define ULOG(capacity_bytes) {\
/* 64 bytes of metadata */\
uint64_t checksum; /* checksum of ulog header and its entries */\
uint64_t next; /* offset of ulog extension */\
uint64_t capacity; /* capacity of this ulog in bytes */\
uint64_t gen_num; /* generation counter */\
uint64_t flags; /* ulog flags */\
uint64_t unused[ULOG_UNUSED]; /* must be 0 */\
uint8_t data[capacity_bytes]; /* N bytes of data */\
}\
#define SIZEOF_ULOG(base_capacity)\
(sizeof(struct ulog) + base_capacity)
/*
* Ulog buffer allocated by the user must be marked by this flag.
* It is important to not free it at the end:
* what user has allocated - user should free himself.
*/
#define ULOG_USER_OWNED (1U << 0)
/* use this for allocations of aligned ulog extensions */
#define SIZEOF_ALIGNED_ULOG(base_capacity)\
ALIGN_UP(SIZEOF_ULOG(base_capacity + (2 * CACHELINE_SIZE)), CACHELINE_SIZE)
struct ulog ULOG(0);
VEC(ulog_next, uint64_t);
typedef uint64_t ulog_operation_type;
#define ULOG_OPERATION_SET (0b000ULL << 61ULL)
#define ULOG_OPERATION_AND (0b001ULL << 61ULL)
#define ULOG_OPERATION_OR (0b010ULL << 61ULL)
#define ULOG_OPERATION_BUF_SET (0b101ULL << 61ULL)
#define ULOG_OPERATION_BUF_CPY (0b110ULL << 61ULL)
#define ULOG_BIT_OPERATIONS (ULOG_OPERATION_AND | ULOG_OPERATION_OR)
/* immediately frees all associated ulog structures */
#define ULOG_FREE_AFTER_FIRST (1U << 0)
/* increments gen_num of the first, preallocated, ulog */
#define ULOG_INC_FIRST_GEN_NUM (1U << 1)
/* informs if there was any buffer allocated by user in the tx */
#define ULOG_ANY_USER_BUFFER (1U << 2)
typedef int (*ulog_check_offset_fn)(void *ctx, uint64_t offset);
typedef int (*ulog_extend_fn)(void *, uint64_t *, uint64_t);
typedef int (*ulog_entry_cb)(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops);
typedef int (*ulog_entry_cb_ndp)(struct ulog_entry_base *e, struct ulog_entry_base *f, void *arg,
const struct pmem_ops *p_ops);
typedef void (*ulog_free_fn)(void *base, uint64_t *next);
typedef int (*ulog_rm_user_buffer_fn)(void *, void *addr);
struct ulog *ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops);
void ulog_construct(uint64_t offset, size_t capacity, uint64_t gen_num,
int flush, uint64_t flags, const struct pmem_ops *p_ops);
size_t ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes,
const struct pmem_ops *p_ops);
void ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next,
const struct pmem_ops *p_ops);
int ulog_foreach_entry(struct ulog *ulog,
ulog_entry_cb cb, void *arg, const struct pmem_ops *ops, struct ulog *ulognvm);
int ulog_foreach_entry_ndp(struct ulog *ulogdram, struct ulog *ulognvm,
ulog_entry_cb_ndp cb, void *arg, const struct pmem_ops *ops);
int ulog_reserve(struct ulog *ulog,
size_t ulog_base_nbytes, size_t gen_num,
int auto_reserve, size_t *new_capacity_bytes,
ulog_extend_fn extend, struct ulog_next *next,
const struct pmem_ops *p_ops);
void ulog_store(struct ulog *dest,
struct ulog *src, size_t nbytes, size_t ulog_base_nbytes,
size_t ulog_total_capacity,
struct ulog_next *next, const struct pmem_ops *p_ops);
int ulog_free_next(struct ulog *u, const struct pmem_ops *p_ops,
ulog_free_fn ulog_free, ulog_rm_user_buffer_fn user_buff_remove,
uint64_t flags);
void ulog_clobber(struct ulog *dest, struct ulog_next *next,
const struct pmem_ops *p_ops);
int ulog_clobber_data(struct ulog *dest,
size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, ulog_free_fn ulog_free,
ulog_rm_user_buffer_fn user_buff_remove,
const struct pmem_ops *p_ops, unsigned flags);
void ulog_clobber_entry(const struct ulog_entry_base *e,
const struct pmem_ops *p_ops);
void ulog_process(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
void ulog_process_ndp(struct ulog *ulognvm, struct ulog *ulogdeam, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
size_t ulog_base_nbytes(struct ulog *ulog);
int ulog_recovery_needed(struct ulog *ulog, int verify_checksum);
struct ulog *ulog_by_offset(size_t offset, const struct pmem_ops *p_ops);
uint64_t ulog_entry_offset(const struct ulog_entry_base *entry);
ulog_operation_type ulog_entry_type(
const struct ulog_entry_base *entry);
struct ulog_entry_val *ulog_entry_val_create(struct ulog *ulog,
size_t offset, uint64_t *dest, uint64_t value,
ulog_operation_type type,
const struct pmem_ops *p_ops);
#ifdef USE_NDP_CLOBBER
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset,
uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops, int clear_next_header);
#else
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset,
uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops);
#endif
void ulog_entry_apply(const struct ulog_entry_base *e, int persist,
const struct pmem_ops *p_ops);
void ulog_entry_apply_ndp(const struct ulog_entry_base *e, const struct ulog_entry_base *f, int persist,
const struct pmem_ops *p_ops);
size_t ulog_entry_size(const struct ulog_entry_base *entry);
void ulog_recover(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
int ulog_check(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
#endif
| 6,600 | 32.170854 | 104 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/libpmemobj.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* libpmemobj.c -- pmem entry points for libpmemobj
*/
#include "pmemcommon.h"
#include "obj.h"
/*
* libpmemobj_init -- load-time initialization for obj
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemobj_init(void)
{
common_init(PMEMOBJ_LOG_PREFIX, PMEMOBJ_LOG_LEVEL_VAR,
PMEMOBJ_LOG_FILE_VAR, PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION);
LOG(3, NULL);
obj_init();
}
/*
* libpmemobj_fini -- libpmemobj cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemobj_fini(void)
{
LOG(3, NULL);
obj_fini();
common_fini();
}
/*
* pmemobj_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemobj_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMOBJ_MAJOR_VERSION) {
ERR("libpmemobj major version mismatch (need %u, found %u)",
major_required, PMEMOBJ_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMOBJ_MINOR_VERSION) {
ERR("libpmemobj minor version mismatch (need %u, found %u)",
minor_required, PMEMOBJ_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemobj_check_version -- see if lib meets application version requirements
*/
const char *
pmemobj_check_version(unsigned major_required, unsigned minor_required)
{
return pmemobj_check_versionU(major_required, minor_required);
}
#else
/*
* pmemobj_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemobj_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemobj_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemobj_set_funcs -- allow overriding libpmemobj's call to malloc, etc.
*/
void
pmemobj_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemobj_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemobj_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemobj_errormsg -- return last error message
*/
const char *
pmemobj_errormsg(void)
{
return pmemobj_errormsgU();
}
#else
/*
* pmemobj_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemobj_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 2,779 | 19.291971 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/lane.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* lane.c -- lane implementation
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <inttypes.h>
#include <errno.h>
#include <limits.h>
#include <sched.h>
#include "libpmemobj.h"
#include "critnib.h"
#include "lane.h"
#include "out.h"
#include "util.h"
#include "obj.h"
#include "os_thread.h"
#include "valgrind_internal.h"
#include "memops.h"
#include "palloc.h"
#include "tx.h"
static os_tls_key_t Lane_info_key;
static __thread struct critnib *Lane_info_ht;
static __thread struct lane_info *Lane_info_records;
static __thread struct lane_info *Lane_info_cache;
/*
* lane_info_create -- (internal) constructor for thread shared data
*/
static inline void
lane_info_create(void)
{
Lane_info_ht = critnib_new();
if (Lane_info_ht == NULL)
FATAL("critnib_new");
}
/*
* lane_info_delete -- (internal) deletes lane info hash table
*/
static inline void
lane_info_delete(void)
{
if (unlikely(Lane_info_ht == NULL))
return;
critnib_delete(Lane_info_ht);
struct lane_info *record;
struct lane_info *head = Lane_info_records;
while (head != NULL) {
record = head;
head = head->next;
Free(record);
}
Lane_info_ht = NULL;
Lane_info_records = NULL;
Lane_info_cache = NULL;
}
/*
* lane_info_ht_boot -- (internal) boot lane info and add it to thread shared
* data
*/
static inline void
lane_info_ht_boot(void)
{
lane_info_create();
int result = os_tls_set(Lane_info_key, Lane_info_ht);
if (result != 0) {
errno = result;
FATAL("!os_tls_set");
}
}
/*
* lane_info_ht_destroy -- (internal) destructor for thread shared data
*/
static inline void
lane_info_ht_destroy(void *ht)
{
lane_info_delete();
}
/*
* lane_info_boot -- initialize lane info hash table and lane info key
*/
void
lane_info_boot(void)
{
int result = os_tls_key_create(&Lane_info_key, lane_info_ht_destroy);
if (result != 0) {
errno = result;
FATAL("!os_tls_key_create");
}
}
/*
* lane_info_destroy -- destroy lane info hash table
*/
void
lane_info_destroy(void)
{
lane_info_delete();
(void) os_tls_key_delete(Lane_info_key);
}
/*
* lane_info_cleanup -- remove lane info record regarding pool being deleted
*/
static inline void
lane_info_cleanup(PMEMobjpool *pop)
{
if (unlikely(Lane_info_ht == NULL))
return;
struct lane_info *info = critnib_remove(Lane_info_ht, pop->uuid_lo);
if (likely(info != NULL)) {
if (info->prev)
info->prev->next = info->next;
if (info->next)
info->next->prev = info->prev;
if (Lane_info_cache == info)
Lane_info_cache = NULL;
if (Lane_info_records == info)
Lane_info_records = info->next;
Free(info);
}
}
/*
* lane_get_layout -- (internal) calculates the real pointer of the lane layout
*/
static struct lane_layout *
lane_get_layout(PMEMobjpool *pop, uint64_t lane_idx)
{
return (void *)((char *)pop + pop->lanes_offset +
sizeof(struct lane_layout) * lane_idx);
}
/*
* lane_ulog_constructor -- (internal) constructor of a ulog extension
*/
static int
lane_ulog_constructor(void *base, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = base;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
uint64_t gen_num = *(uint64_t *)arg;
ulog_construct(OBJ_PTR_TO_OFF(base, ptr), capacity,
gen_num, 1, 0, p_ops);
return 0;
}
/*
* lane_undo_extend -- allocates a new undo log
*/
static int
lane_undo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
PMEMobjpool *pop = base;
struct tx_parameters *params = pop->tx_params;
size_t s = SIZEOF_ALIGNED_ULOG(params->cache_size);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_redo_extend -- allocates a new redo log
*/
static int
lane_redo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
size_t s = SIZEOF_ALIGNED_ULOG(LANE_REDO_EXTERNAL_SIZE);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_init -- (internal) initializes a single lane runtime variables
*/
static int
lane_init(PMEMobjpool *pop, struct lane *lane, struct lane_layout *layout)
{
ASSERTne(lane, NULL);
lane->layout = layout;
lane->internal = operation_new((struct ulog *)&layout->internal,
LANE_REDO_INTERNAL_SIZE,
NULL, NULL, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->internal == NULL)
goto error_internal_new;
lane->external = operation_new((struct ulog *)&layout->external,
LANE_REDO_EXTERNAL_SIZE,
lane_redo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->external == NULL)
goto error_external_new;
lane->undo = operation_new((struct ulog *)&layout->undo,
LANE_UNDO_SIZE,
lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_UNDO);
if (lane->undo == NULL)
goto error_undo_new;
return 0;
error_undo_new:
operation_delete(lane->external);
error_external_new:
operation_delete(lane->internal);
error_internal_new:
return -1;
}
/*
* lane_destroy -- cleanups a single lane runtime variables
*/
static void
lane_destroy(PMEMobjpool *pop, struct lane *lane)
{
operation_delete(lane->undo);
operation_delete(lane->internal);
operation_delete(lane->external);
}
/*
* lane_boot -- initializes all lanes
*/
int
lane_boot(PMEMobjpool *pop)
{
int err = 0;
pop->lanes_desc.lane = Malloc(sizeof(struct lane) * pop->nlanes);
if (pop->lanes_desc.lane == NULL) {
err = ENOMEM;
ERR("!Malloc of volatile lanes");
goto error_lanes_malloc;
}
pop->lanes_desc.next_lane_idx = 0;
pop->lanes_desc.lane_locks =
Zalloc(sizeof(*pop->lanes_desc.lane_locks) * pop->nlanes);
if (pop->lanes_desc.lane_locks == NULL) {
ERR("!Malloc for lane locks");
goto error_locks_malloc;
}
/* add lanes to pmemcheck ignored list */
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset,
(sizeof(struct lane_layout) * pop->nlanes));
uint64_t i;
for (i = 0; i < pop->nlanes; ++i) {
struct lane_layout *layout = lane_get_layout(pop, i);
if ((err = lane_init(pop, &pop->lanes_desc.lane[i], layout))) {
ERR("!lane_init");
goto error_lane_init;
}
}
return 0;
error_lane_init:
for (; i >= 1; --i)
lane_destroy(pop, &pop->lanes_desc.lane[i - 1]);
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
error_locks_malloc:
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
error_lanes_malloc:
return err;
}
/*
* lane_init_data -- initializes ulogs for all the lanes
*/
void
lane_init_data(PMEMobjpool *pop)
{
struct lane_layout *layout;
for (uint64_t i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->internal),
LANE_REDO_INTERNAL_SIZE, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->external),
LANE_REDO_EXTERNAL_SIZE, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->undo),
LANE_UNDO_SIZE, 0, 0, 0, &pop->p_ops);
}
layout = lane_get_layout(pop, 0);
pmemops_xpersist(&pop->p_ops, layout,
pop->nlanes * sizeof(struct lane_layout),
PMEMOBJ_F_RELAXED);
}
/*
* lane_cleanup -- destroys all lanes
*/
void
lane_cleanup(PMEMobjpool *pop)
{
for (uint64_t i = 0; i < pop->nlanes; ++i)
lane_destroy(pop, &pop->lanes_desc.lane[i]);
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
lane_info_cleanup(pop);
}
/*
* lane_recover_and_section_boot -- performs initialization and recovery of all
* lanes
*/
int
lane_recover_and_section_boot(PMEMobjpool *pop)
{
COMPILE_ERROR_ON(SIZEOF_ULOG(LANE_UNDO_SIZE) +
SIZEOF_ULOG(LANE_REDO_EXTERNAL_SIZE) +
SIZEOF_ULOG(LANE_REDO_INTERNAL_SIZE) != LANE_TOTAL_SIZE);
int err = 0;
uint64_t i; /* lane index */
struct lane_layout *layout;
/*
* First we need to recover the internal/external redo logs so that the
* allocator state is consistent before we boot it.
*/
for (i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_recover((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
ulog_recover((struct ulog *)&layout->external,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
}
if ((err = pmalloc_boot(pop)) != 0)
return err;
/*
* Undo logs must be processed after the heap is initialized since
* a undo recovery might require deallocation of the next ulogs.
*/
for (i = 0; i < pop->nlanes; ++i) {
struct operation_context *ctx = pop->lanes_desc.lane[i].undo;
operation_resume(ctx);
operation_process(ctx);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM |
ULOG_FREE_AFTER_FIRST);
}
return 0;
}
/*
* lane_section_cleanup -- performs runtime cleanup of all lanes
*/
int
lane_section_cleanup(PMEMobjpool *pop)
{
return pmalloc_cleanup(pop);
}
/*
* lane_check -- performs check of all lanes
*/
int
lane_check(PMEMobjpool *pop)
{
int err = 0;
uint64_t j; /* lane index */
struct lane_layout *layout;
for (j = 0; j < pop->nlanes; ++j) {
layout = lane_get_layout(pop, j);
if (ulog_check((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops) != 0) {
LOG(2, "lane %" PRIu64 " internal redo failed: %d",
j, err);
return err;
}
}
return 0;
}
/*
* get_lane -- (internal) get free lane index
*/
static inline void
get_lane(uint64_t *locks, struct lane_info *info, uint64_t nlocks)
{
info->lane_idx = info->primary;
while (1) {
do {
info->lane_idx %= nlocks;
if (likely(util_bool_compare_and_swap64(
&locks[info->lane_idx], 0, 1))) {
if (info->lane_idx == info->primary) {
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
} else if (info->primary_attempts == 0) {
info->primary = info->lane_idx;
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
}
return;
}
if (info->lane_idx == info->primary &&
info->primary_attempts > 0) {
info->primary_attempts--;
}
++info->lane_idx;
} while (info->lane_idx < nlocks);
sched_yield();
}
}
/*
* get_lane_info_record -- (internal) get lane record attached to memory pool
* or first free
*/
static inline struct lane_info *
get_lane_info_record(PMEMobjpool *pop)
{
if (likely(Lane_info_cache != NULL &&
Lane_info_cache->pop_uuid_lo == pop->uuid_lo)) {
return Lane_info_cache;
}
if (unlikely(Lane_info_ht == NULL)) {
lane_info_ht_boot();
}
struct lane_info *info = critnib_get(Lane_info_ht, pop->uuid_lo);
if (unlikely(info == NULL)) {
info = Malloc(sizeof(struct lane_info));
if (unlikely(info == NULL)) {
FATAL("Malloc");
}
info->pop_uuid_lo = pop->uuid_lo;
info->lane_idx = UINT64_MAX;
info->nest_count = 0;
info->next = Lane_info_records;
info->prev = NULL;
info->primary = 0;
info->primary_attempts = LANE_PRIMARY_ATTEMPTS;
if (Lane_info_records) {
Lane_info_records->prev = info;
}
Lane_info_records = info;
if (unlikely(critnib_insert(
Lane_info_ht, pop->uuid_lo, info) != 0)) {
FATAL("critnib_insert");
}
}
Lane_info_cache = info;
return info;
}
/*
* lane_hold -- grabs a per-thread lane in a round-robin fashion
*/
unsigned
lane_hold(PMEMobjpool *pop, struct lane **lanep)
{
/*
* Before runtime lane initialization all remote operations are
* executed using RLANE_DEFAULT.
*/
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
if (lanep != NULL)
FATAL("cannot obtain section before lane's init");
return RLANE_DEFAULT;
}
struct lane_info *lane = get_lane_info_record(pop);
while (unlikely(lane->lane_idx == UINT64_MAX)) {
/* initial wrap to next CL */
lane->primary = lane->lane_idx = util_fetch_and_add32(
&pop->lanes_desc.next_lane_idx, LANE_JUMP);
} /* handles wraparound */
uint64_t *llocks = pop->lanes_desc.lane_locks;
/* grab next free lane from lanes available at runtime */
if (!lane->nest_count++) {
get_lane(llocks, lane, pop->lanes_desc.runtime_nlanes);
}
struct lane *l = &pop->lanes_desc.lane[lane->lane_idx];
/* reinitialize lane's content only if in outermost hold */
if (lanep && lane->nest_count == 1) {
VALGRIND_ANNOTATE_NEW_MEMORY(l, sizeof(*l));
VALGRIND_ANNOTATE_NEW_MEMORY(l->layout, sizeof(*l->layout));
operation_init(l->external);
operation_init(l->internal);
operation_init(l->undo);
}
if (lanep)
*lanep = l;
return (unsigned)lane->lane_idx;
}
/*
* lane_release -- drops the per-thread lane
*/
void
lane_release(PMEMobjpool *pop)
{
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
return;
}
struct lane_info *lane = get_lane_info_record(pop);
ASSERTne(lane, NULL);
ASSERTne(lane->lane_idx, UINT64_MAX);
if (unlikely(lane->nest_count == 0)) {
FATAL("lane_release");
} else if (--(lane->nest_count) == 0) {
if (unlikely(!util_bool_compare_and_swap64(
&pop->lanes_desc.lane_locks[lane->lane_idx],
1, 0))) {
FATAL("util_bool_compare_and_swap64");
}
}
}
| 12,994 | 21.678883 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/testframework.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
"""Test framework public interface"""
import sys
from os import path
# Don't create cache files on the import of test modules.
# Improvement using bytecode in python tf code is insignificant.
# It is workaround for a pycache clobber issue in our tangled Makefiles.
sys.dont_write_bytecode = True
sys.path.insert(1, path.abspath(path.join(path.dirname(__file__), 'unittest')))
# flake8 issues silenced:
# E402 - import statements not at the top of the file because of adding
# directory to path
# F401, F403 - testframework.py does not use imported names, only passes them
# down and in most cases needs to pass down all of them - hence import with '*'
from basetest import BaseTest, Test, get_testcases # noqa: E402, F401
from context import * # noqa: E402, F401, F403
from configurator import * # noqa: E402, F401, F403
from valgrind import * # noqa: E402, F401, F403
from utils import * # noqa: E402, F401, F403
from poolset import * # noqa: E402, F401, F403
from builds import * # noqa: E402, F401, F403
from devdax import * # noqa: E402, F401, F403
from test_types import * # noqa: E402, F401, F403
from requirements import * # noqa: E402, F401, F403
import granularity # noqa: E402, F401
| 1,292 | 38.181818 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/common_badblock.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018-2020, Intel Corporation
#
# src/test/common_badblock.sh -- commons for the following tests:
# - util_badblock
# - pmempool_create
# - pmempool_info
#
LOG=out${UNITTEST_NUM}.log
UNITTEST_DIRNAME=$(echo $UNITTEST_NAME | cut -d'/' -f1)
COMMAND_MOUNTED_DIRS="\
mount | grep -e $UNITTEST_DIRNAME | cut -d' ' -f1 | xargs && true"
COMMAND_NDCTL_NFIT_TEST_INIT="\
sudo modprobe nfit_test &>>$PREP_LOG_FILE && \
sudo ndctl disable-region all &>>$PREP_LOG_FILE && \
sudo ndctl zero-labels all &>>$PREP_LOG_FILE && \
sudo ndctl enable-region all &>>$PREP_LOG_FILE"
COMMAND_NDCTL_NFIT_TEST_FINI="\
sudo ndctl disable-region all &>>$PREP_LOG_FILE && \
sudo modprobe -r nfit_test &>>$PREP_LOG_FILE"
#
# badblock_test_init -- initialize badblock test based on underlying hardware
#
# Input arguments:
# 1) device type (dax_device|block_device)
# 2) mount directory (in case of block device type)
#
function badblock_test_init() {
case "$1"
in
dax_device|block_device)
;;
*)
usage "bad device type: $1"
;;
esac
DEVTYPE=$1
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
ndctl_nfit_test_init
fi
if [ "$DEVTYPE" == "dax_device" ]; then
DEVICE=$(badblock_test_get_dax_device)
elif [ "$DEVTYPE" == "block_device" ]; then
DEVICE=$(badblock_test_get_block_device)
prepare_mount_dir $DEVICE $2
fi
NAMESPACE=$(ndctl_get_namespace_of_device $DEVICE)
FULLDEV="/dev/$DEVICE"
# current unit tests support only block sizes less or equal 4096 bytes
require_max_block_size $FULLDEV 4096
}
#
# badblock_test_init_node -- initialize badblock test based on underlying
# hardware on a remote node
#
# Input arguments:
# 1) remote node number
# 2) device type (dax_device|block_device)
# 3) for block device: mount directory
# for dax device on real pmem: dax device index on a given node
#
function badblock_test_init_node() {
case "$2"
in
dax_device|block_device)
;;
*)
usage "bad device type: $2"
;;
esac
DEVTYPE=$2
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
ndctl_nfit_test_init_node $1
fi
if [ "$DEVTYPE" == "dax_device" ]; then
DEVICE=$(badblock_test_get_dax_device_node $1 $3)
elif [ "$DEVTYPE" == "block_device" ]; then
DEVICE=$(badblock_test_get_block_device_node $1)
prepare_mount_dir_node $1 $DEVICE $3
fi
NAMESPACE=$(ndctl_get_namespace_of_device_node $1 $DEVICE)
FULLDEV="/dev/$DEVICE"
}
#
# badblock_test_get_dax_device -- get name of the dax device
#
function badblock_test_get_dax_device() {
DEVICE=""
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
DEVICE=$(ndctl_nfit_test_get_dax_device)
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
DEVICE=$(real_pmem_get_dax_device)
fi
echo $DEVICE
}
#
# badblock_test_get_dax_device_node -- get name of the dax device on a given
# remote node
# Input arguments:
# 1) remote node number
# 2) For real pmem: device dax index on a given node
#
function badblock_test_get_dax_device_node() {
DEVICE=""
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
DEVICE=$(ndctl_nfit_test_get_dax_device_node $1)
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
DEVICE=$(real_pmem_get_dax_device_node $1 $2)
fi
echo $DEVICE
}
#
# badblock_test_get_block_device -- get name of the block device
#
function badblock_test_get_block_device() {
DEVICE=""
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
DEVICE=$(ndctl_nfit_test_get_block_device)
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
DEVICE=$(real_pmem_get_block_device)
fi
echo "$DEVICE"
}
#
# badblock_test_get_block_device_node -- get name of the block device on a given
# remote node
#
function badblock_test_get_block_device_node() {
DEVICE=""
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
DEVICE=$(ndctl_nfit_test_get_block_device_node $1)
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
DEVICE=$(real_pmem_get_block_device_node $1)
fi
echo "$DEVICE"
}
#
# prepare_mount_dir -- prepare the mount directory for provided device
#
# Input arguments:
# 1) device name
# 2) mount directory
#
function prepare_mount_dir() {
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
local FULLDEV="/dev/$1"
ndctl_nfit_test_mount_pmem $FULLDEV $2
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
if [ ! -d $2 ]; then
mkdir -p $2
fi
fi
}
#
# prepare_mount_dir_node -- prepare the mount directory for provided device
# on a given remote node
#
# Input arguments:
# 1) remote node number
# 2) device name
# 3) mount directory
#
function prepare_mount_dir_node() {
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
local FULLDEV="/dev/$2"
ndctl_nfit_test_mount_pmem_node $1 $FULLDEV $3
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
if [ ! -d $3 ]; then
run_on_node $1 "mkdir -p $3"
fi
fi
}
#
# real_pmem_get_dax_device -- get real pmem dax device name
#
function real_pmem_get_dax_device() {
local FULLDEV=${DEVICE_DAX_PATH[0]}
DEVICE=${FULLDEV##*/}
echo $DEVICE
}
#
# real_pmem_get_dax_device_node -- get real pmem dax device name on a given
# remote node
#
# Input arguments:
# 1) remote node number
# 2) device dax index number
#
function real_pmem_get_dax_device_node() {
local node=$1
local devdax_index=$2
local device_dax_path=(${NODE_DEVICE_DAX_PATH[$node]})
local FULLDEV=${device_dax_path[$devdax_index]}
DEVICE=${FULLDEV##*/}
echo $DEVICE
}
#
# real_pmem_get_block_device -- get real pmem block device name
#
function real_pmem_get_block_device() {
local FULL_DEV=$(mount | grep $PMEM_FS_DIR | cut -f 1 -d" ")
DEVICE=${FULL_DEV##*/}
echo $DEVICE
}
#
# real_pmem_get_block_device_node -- get real pmem block device name on a given
# remote node
#
function real_pmem_get_block_device_node() {
local FULL_DEV=$(expect_normal_exit run_on_node $1 mount | grep $PMEM_FS_DIR | cut -f 1 -d" ")
DEVICE=${FULL_DEV##*/}
echo $DEVICE
}
#
# ndctl_nfit_test_init -- reset all regions and reload the nfit_test module
#
function ndctl_nfit_test_init() {
sudo ndctl disable-region all &>>$PREP_LOG_FILE
if ! sudo modprobe -r nfit_test &>>$PREP_LOG_FILE; then
MOUNTED_DIRS="$(eval $COMMAND_MOUNTED_DIRS)"
[ "$MOUNTED_DIRS" ] && sudo umount $MOUNTED_DIRS
sudo ndctl disable-region all &>>$PREP_LOG_FILE
sudo modprobe -r nfit_test
fi
expect_normal_exit $COMMAND_NDCTL_NFIT_TEST_INIT
}
#
# ndctl_nfit_test_init_node -- reset all regions and reload the nfit_test
# module on a remote node
#
function ndctl_nfit_test_init_node() {
run_on_node $1 "sudo ndctl disable-region all &>>$PREP_LOG_FILE"
if ! run_on_node $1 "sudo modprobe -r nfit_test &>>$PREP_LOG_FILE"; then
MOUNTED_DIRS="$(run_on_node $1 $COMMAND_MOUNTED_DIRS)"
run_on_node $1 "\
[ \"$MOUNTED_DIRS\" ] && sudo umount $MOUNTED_DIRS; \
sudo ndctl disable-region all &>>$PREP_LOG_FILE; \
sudo modprobe -r nfit_test"
fi
expect_normal_exit run_on_node $1 "$COMMAND_NDCTL_NFIT_TEST_INIT"
}
#
# badblock_test_fini -- clean badblock test based on underlying hardware
#
# Input arguments:
# 1) pmem mount directory to be umounted (optional)
#
function badblock_test_fini() {
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
ndctl_nfit_test_fini $1
fi
}
#
# badblock_test_fini_node() -- clean badblock test based on underlying hardware
# on a given remote node
#
# Input arguments:
# 1) node number
# 2) pmem mount directory to be umounted (optional)
#
function badblock_test_fini_node() {
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
ndctl_nfit_test_fini_node $1 $2
fi
}
#
# ndctl_nfit_test_fini -- clean badblock test ran on nfit_test based on underlying hardware
#
function ndctl_nfit_test_fini() {
MOUNT_DIR=$1
[ $MOUNT_DIR ] && sudo umount $MOUNT_DIR &>> $PREP_LOG_FILE
expect_normal_exit $COMMAND_NDCTL_NFIT_TEST_FINI
}
#
# ndctl_nfit_test_fini_node -- disable all regions, remove the nfit_test module
# and (optionally) umount the pmem block device on a remote node
#
# Input arguments:
# 1) node number
# 2) pmem mount directory to be umounted
#
function ndctl_nfit_test_fini_node() {
MOUNT_DIR=$2
[ $MOUNT_DIR ] && expect_normal_exit run_on_node $1 "sudo umount $MOUNT_DIR &>> $PREP_LOG_FILE"
expect_normal_exit run_on_node $1 "$COMMAND_NDCTL_NFIT_TEST_FINI"
}
#
# ndctl_nfit_test_mount_pmem -- mount a pmem block device
#
# Input arguments:
# 1) path of a pmem block device
# 2) mount directory
#
function ndctl_nfit_test_mount_pmem() {
FULLDEV=$1
MOUNT_DIR=$2
expect_normal_exit "\
sudo mkfs.ext4 $FULLDEV &>>$PREP_LOG_FILE && \
sudo mkdir -p $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo mount $FULLDEV $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo chmod 0777 $MOUNT_DIR"
}
#
# ndctl_nfit_test_mount_pmem_node -- mount a pmem block device on a remote node
#
# Input arguments:
# 1) number of a node
# 2) path of a pmem block device
# 3) mount directory
#
function ndctl_nfit_test_mount_pmem_node() {
FULLDEV=$2
MOUNT_DIR=$3
expect_normal_exit run_on_node $1 "\
sudo mkfs.ext4 $FULLDEV &>>$PREP_LOG_FILE && \
sudo mkdir -p $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo mount $FULLDEV $MOUNT_DIR &>>$PREP_LOG_FILE && \
sudo chmod 0777 $MOUNT_DIR"
}
#
# ndctl_nfit_test_get_device -- create a namespace and get name of the pmem device
# of the nfit_test module
#
# Input argument:
# 1) mode of the namespace (devdax or fsdax)
#
function ndctl_nfit_test_get_device() {
MODE=$1
DEVTYPE=""
[ "$MODE" == "devdax" ] && DEVTYPE="chardev"
[ "$MODE" == "fsdax" ] && DEVTYPE="blockdev"
[ "$DEVTYPE" == "" ] && echo "ERROR: wrong namespace mode: $MODE" >&2 && exit 1
BUS="nfit_test.0"
REGION=$(ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
DEVICE=$(sudo ndctl create-namespace -b $BUS -r $REGION -f -m $MODE -a 4096 | sed "/$DEVTYPE/!d;s/[\", ]//g;s/$DEVTYPE://g")
echo $DEVICE
}
#
# ndctl_nfit_test_get_device_node -- create a namespace and get name of the pmem device
# of the nfit_test module on a remote node
#
# Input argument:
# 1) mode of the namespace (devdax or fsdax)
#
function ndctl_nfit_test_get_device_node() {
MODE=$2
DEVTYPE=""
[ "$MODE" == "devdax" ] && DEVTYPE="chardev"
[ "$MODE" == "fsdax" ] && DEVTYPE="blockdev"
[ "$DEVTYPE" == "" ] && echo "ERROR: wrong namespace mode: $MODE" >&2 && exit 1
BUS="nfit_test.0"
REGION=$(expect_normal_exit run_on_node $1 ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
DEVICE=$(expect_normal_exit run_on_node $1 sudo ndctl create-namespace -b $BUS -r $REGION -f -m $MODE -a 4096 | sed "/$DEVTYPE/!d;s/[\", ]//g;s/$DEVTYPE://g")
echo $DEVICE
}
#
# ndctl_nfit_test_get_dax_device -- create a namespace and get name of the dax device
# of the nfit_test module
#
function ndctl_nfit_test_get_dax_device() {
# XXX needed by libndctl (it should be removed when it is not needed)
sudo chmod o+rw /dev/ndctl*
DEVICE=$(ndctl_nfit_test_get_device devdax)
sudo chmod o+rw /dev/$DEVICE
echo $DEVICE
}
#
# ndctl_nfit_test_get_dax_device_node -- create a namespace and get name of
# the pmem dax device of the nfit_test
# module on a remote node
#
function ndctl_nfit_test_get_dax_device_node() {
DEVICE=$(ndctl_nfit_test_get_device_node $1 devdax)
echo $DEVICE
}
#
# ndctl_nfit_test_get_block_device -- create a namespace and get name of the pmem block device
# of the nfit_test module
#
function ndctl_nfit_test_get_block_device() {
DEVICE=$(ndctl_nfit_test_get_device fsdax)
echo $DEVICE
}
#
# ndctl_nfit_test_get_block_device_node -- create a namespace and get name of
# the pmem block device of the nfit_test
# module on a remote node
#
function ndctl_nfit_test_get_block_device_node() {
DEVICE=$(ndctl_nfit_test_get_device_node $1 fsdax)
echo $DEVICE
}
#
# ndctl_nfit_test_grant_access -- grant accesses required by libndctl
#
# XXX needed by libndctl (it should be removed when these extra access rights are not needed)
#
# Input argument:
# 1) a name of pmem device
#
function ndctl_nfit_test_grant_access() {
BUS="nfit_test.0"
REGION=$(ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
expect_normal_exit "\
sudo chmod o+rw /dev/nmem* && \
sudo chmod o+r /sys/bus/nd/devices/ndbus*/$REGION/*/resource && \
sudo chmod o+r /sys/bus/nd/devices/ndbus*/$REGION/resource"
}
#
# ndctl_nfit_test_grant_access_node -- grant accesses required by libndctl on a node
#
# XXX needed by libndctl (it should be removed when these extra access rights are not needed)
#
# Input arguments:
# 1) node number
# 2) name of pmem device
#
function ndctl_nfit_test_grant_access_node() {
BUS="nfit_test.0"
REGION=$(expect_normal_exit run_on_node $1 ndctl list -b $BUS -t pmem -Ri | sed "/dev/!d;s/[\", ]//g;s/dev://g" | tail -1)
expect_normal_exit run_on_node $1 "\
sudo chmod o+rw /dev/nmem* && \
sudo chmod o+r /sys/bus/nd/devices/ndbus*/$REGION/*/resource && \
sudo chmod o+r /sys/bus/nd/devices/ndbus*/$REGION/resource"
}
#
# ndctl_requires_extra_access -- checks whether ndctl will require extra
# file permissions for bad-block iteration
#
# Input argument:
# 1) Mode of the namespace
#
function ndctl_requires_extra_access()
{
# Tests require additional permissions for badblock iteration if they
# are ran on device dax or with ndctl version prior to v63.
if [ "$1" != "fsdax" ] || ! is_ndctl_enabled $PMEMPOOL$EXESUFFIX ; then
return 0
fi
return 1
}
#
# ndctl_nfit_test_get_namespace_of_device -- get namespace of the pmem device
#
# Input argument:
# 1) a name of pmem device
#
function ndctl_get_namespace_of_device() {
local DEVICE=$1
NAMESPACE=$(ndctl list | grep -e "$DEVICE" -e namespace | grep -B1 -e "$DEVICE" | head -n1 | cut -d'"' -f4)
MODE=$(ndctl list -n "$NAMESPACE" | grep mode | cut -d'"' -f4)
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ] && ndctl_requires_extra_access $MODE; then
ndctl_nfit_test_grant_access $DEVICE
fi
echo "$NAMESPACE"
}
#
# ndctl_nfit_test_get_namespace_of_device_node -- get namespace of the pmem device on a remote node
#
# Input arguments:
# 1) node number
# 2) name of pmem device
#
function ndctl_get_namespace_of_device_node() {
local DEVICE=$2
NAMESPACE=$(expect_normal_exit run_on_node $1 ndctl list | grep -e "$DEVICE" -e namespace | grep -B1 -e "$DEVICE" | head -n1 | cut -d'"' -f4)
MODE=$(expect_normal_exit run_on_node $1 ndctl list -n "$NAMESPACE" | grep mode | cut -d'"' -f4)
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ] && ndctl_requires_extra_access $MODE; then
ndctl_nfit_test_grant_access_node $1 $DEVICE
fi
echo $NAMESPACE
}
#
# ndctl_inject_error -- inject error (bad blocks) to the namespace
#
# Input arguments:
# 1) namespace
# 2) the first bad block
# 3) number of bad blocks
#
function ndctl_inject_error() {
local namespace=$1
local block=$2
local count=$3
echo "# sudo ndctl inject-error --block=$block --count=$count $namespace" >> $PREP_LOG_FILE
expect_normal_exit "sudo ndctl inject-error --block=$block --count=$count $namespace" &>> $PREP_LOG_FILE
echo "# sudo ndctl start-scrub" >> $PREP_LOG_FILE
expect_normal_exit "sudo ndctl start-scrub" &>> $PREP_LOG_FILE
echo "# sudo ndctl wait-scrub" >> $PREP_LOG_FILE
expect_normal_exit "sudo ndctl wait-scrub" &>> $PREP_LOG_FILE
echo "(done: ndctl wait-scrub)" >> $PREP_LOG_FILE
}
#
# ndctl_inject_error_node -- inject error (bad blocks) to the namespace on
# a given remote node
#
# Input arguments:
# 1) node
# 2) namespace
# 3) the first bad block
# 4) number of bad blocks
#
function ndctl_inject_error_node() {
local node=$1
local namespace=$2
local block=$3
local count=$4
echo "# sudo ndctl inject-error --block=$block --count=$count $namespace" >> $PREP_LOG_FILE
expect_normal_exit run_on_node $node "sudo ndctl inject-error --block=$block --count=$count $namespace" &>> $PREP_LOG_FILE
echo "# sudo ndctl start-scrub" >> $PREP_LOG_FILE
expect_normal_exit run_on_node $node "sudo ndctl start-scrub" &>> $PREP_LOG_FILE
echo "# sudo ndctl wait-scrub" >> $PREP_LOG_FILE
expect_normal_exit run_on_node $node "sudo ndctl wait-scrub" &>> $PREP_LOG_FILE
echo "(done: ndctl wait-scrub)" >> $PREP_LOG_FILE
}
#
# ndctl_uninject_error -- clear bad block error present in the namespace
#
# Input arguments:
# 1) full device name (error clearing process requires writing to device)
# 2) namespace
# 3) the first bad block
# 4) number of bad blocks
#
function ndctl_uninject_error() {
# explicit uninjection is not required on nfit_test since any error
# injections made during the tests are eventually cleaned up in _fini
# function by reloading the whole namespace
if [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
local fulldev=$1
local namespace=$2
local block=$3
local count=$4
expect_normal_exit "sudo ndctl inject-error --uninject --block=$block --count=$count $namespace >> $PREP_LOG_FILE 2>&1"
if [ "$DEVTYPE" == "block_device" ]; then
expect_normal_exit "sudo dd if=/dev/zero of=$fulldev bs=512 seek=$block count=$count \
oflag=direct >> $PREP_LOG_FILE 2>&1"
elif [ "$DEVTYPE" == "dax_device" ]; then
expect_normal_exit "$DAXIO$EXESUFFIX -i /dev/zero -o $fulldev -s $block -l $count >> $PREP_LOG_FILE 2>&1"
fi
fi
}
#
# ndctl_uninject_error_node -- clear bad block error present in the
# namespace on a given remote node
#
# Input arguments:
# 1) node
# 2) full device name (error clearing process requires writing to device)
# 3) namespace
# 4) the first bad block
# 5) number of bad blocks
#
function ndctl_uninject_error_node() {
# explicit uninjection is not required on nfit_test since any error
# injections made during the tests are eventually cleaned up in _fini
# function by reloading the whole namespace
if [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
local node=$1
local fulldev=$2
local namespace=$3
local block=$4
local count=$5
expect_normal_exit run_on_node $node "sudo ndctl inject-error --uninject --block=$block --count=$count \
$namespace >> $PREP_LOG_FILE 2>&1"
if [ "$DEVTYPE" == "block_device" ]; then
expect_normal_exit run_on_node $node "sudo dd if=/dev/zero of=$fulldev bs=512 seek=$block count=$count \
oflag=direct >> $PREP_LOG_FILE 2>&1"
elif [ "$DEVTYPE" == "dax_device" ]; then
expect_normal_exit run_on_node $node "$DAXIO$EXESUFFIX -i /dev/zero -o $fulldev -s $block -l $count \
>> $PREP_LOG_FILE 2>&1"
fi
fi
}
#
# print_bad_blocks -- print all bad blocks (count, offset and length)
# in the given namespace or "No bad blocks found"
# if there are no bad blocks
#
# Input arguments:
# 1) namespace
#
function print_bad_blocks {
# XXX sudo should be removed when it is not needed
sudo ndctl list -M -n $1 | \
grep -e "badblock_count" -e "offset" -e "length" >> $LOG \
|| echo "No bad blocks found" >> $LOG
}
#
# expect_bad_blocks -- verify if there are required bad blocks
# in the given namespace and fail if they are not there
#
# Input arguments:
# 1) namespace
#
function expect_bad_blocks {
# XXX sudo should be removed when it is not needed
sudo ndctl list -M -n $1 | grep -e "badblock_count" -e "offset" -e "length" >> $LOG && true
if [ $? -ne 0 ]; then
# XXX sudo should be removed when it is not needed
sudo ndctl list -M &>> $PREP_LOG_FILE && true
msg "====================================================================="
msg "Error occurred, the preparation log ($PREP_LOG_FILE) is listed below:"
msg ""
cat $PREP_LOG_FILE
msg "====================================================================="
msg ""
fatal "Error: ndctl failed to inject or retain bad blocks"
fi
}
#
# expect_bad_blocks_node -- verify if there are required bad blocks
# in the given namespace on the given node
# and fail if they are not there
#
# Input arguments:
# 1) node number
# 2) namespace
#
function expect_bad_blocks_node {
# XXX sudo should be removed when it is not needed
expect_normal_exit run_on_node $1 sudo ndctl list -M -n $2 | \
grep -e "badblock_count" -e "offset" -e "length" >> $LOG \
|| fatal "Error: ndctl failed to inject or retain bad blocks (node $1)"
}
| 20,737 | 28.838849 | 159 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/RUNTESTS.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
"""Main script for unit tests execution"""
import sys
import os
from os import path
sys.path.insert(1, path.abspath(path.join(path.dirname(__file__), 'unittest')))
# flake8 issues silenced:
# E402 - import statements not at the top of the file because of adding
# directory to path
import importlib.util as importutil # noqa E402
import subprocess as sp # noqa E402
import futils # noqa E402
from basetest import get_testcases # noqa E402
from configurator import Configurator # noqa E402
from ctx_filter import CtxFilter # noqa E402
class TestRunner:
def __init__(self, config, testcases):
self.testcases = testcases
self.config = config
self._check_admin()
self.msg = futils.Message(config.unittest_log_level)
if self.config.test_sequence:
# filter test cases from sequence
self.testcases = [t for t in self.testcases
if t.testnum in self.config.test_sequence]
# sort testcases so their sequence matches provided test sequence
self.testcases.sort(key=lambda
tc: config.test_sequence.index(tc.testnum))
if not self.testcases:
sys.exit('No testcases to run found for selected configuration.')
def _check_admin(self):
if not self.config.enable_admin_tests:
return
if sys.platform != 'win32':
"""This check is valid only for linux OSes"""
try:
sp.check_output(['sudo', '-n', 'true'], stderr=sp.STDOUT)
except sp.CalledProcessError:
sys.exit('Enabled "enable_admin_tests" requires '
'the non-interactive sudo (no password required to '
'perform the sudo command).')
"""XXX add a similar check for Windows"""
def run_tests(self):
"""Run selected testcases"""
ret = 0
for tc in self.testcases:
# TODO handle test type inside custom decorator
if tc.test_type not in self.config.test_type:
continue
if not tc.enabled:
continue
cf = CtxFilter(self.config, tc)
# The 'c' context has to be initilized before the 'for' loop,
# because cf.get_contexts() can return no value ([])
# and in case of the 'futils.Fail' exception
# self._test_failed(tc, c, f) will be called
# with uninitilized value of the 'c' context.
c = None
try:
for c in cf.get_contexts():
try:
t = tc()
if t.enabled:
self.msg.print('{}: SETUP\t({}/{})'
.format(t, t.test_type, c))
t._execute(c)
else:
continue
except futils.Skip as s:
self.msg.print_verbose('{}: SKIP: {}'.format(t, s))
except futils.Fail as f:
self._test_failed(t, c, f)
ret = 1
else:
self._test_passed(t)
except futils.Skip as s:
self.msg.print_verbose('{}: SKIP: {}'.format(tc, s))
except futils.Fail as f:
self._test_failed(tc, c, f)
ret = 1
return ret
def _test_failed(self, tc, ctx, fail):
self.msg.print('{}: {}FAILED{}\t({}/{})'
.format(tc, futils.Color.RED,
futils.Color.END, tc.test_type, ctx))
self.msg.print(fail)
if not self.config.keep_going:
sys.exit(1)
def _test_passed(self, tc):
"""Print message specific for passed test"""
if self.config.tm:
tm = '\t\t\t[{:06.3F} s]'.format(tc.elapsed)
else:
tm = ''
self.msg.print('{}: {}PASS{} {}'
.format(tc, futils.Color.GREEN, futils.Color.END, tm))
def _import_testfiles():
"""
Traverse through "src/test" directory, find all "TESTS.py" files and
import them as modules. Set imported module name to
file directory path.
"""
for root, _, files in os.walk(futils.ROOTDIR):
for name in files:
if name == 'TESTS.py':
testfile = path.join(root, name)
module_name = path.dirname(testfile)
spec = importutil.spec_from_file_location(module_name,
testfile)
module = importutil.module_from_spec(spec)
spec.loader.exec_module(module)
def main():
_import_testfiles()
config = Configurator().config
testcases = get_testcases()
if config.group:
# filter selected groups
testcases = [t for t in testcases
if path.basename(t.__module__) in config.group]
if hasattr(config, 'list_testcases'):
for t in testcases:
print(t.name)
sys.exit(0)
runner = TestRunner(config, testcases)
sys.exit(runner.run_tests())
if __name__ == '__main__':
main()
| 5,400 | 32.134969 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/env.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
#
"""
Interpreter managing test group specific TESTS.py file execution.
It parses test classes from interpreted file, handles command line arguments
and executes tests using provided configuration.
"""
import importlib.util as importutil
import os
import sys
from testframework import Configurator, get_testcases
from RUNTESTS import TestRunner
def run_testcases():
"""Parse user configuration, run test cases"""
config = Configurator().config
testcases = get_testcases()
runner = TestRunner(config, testcases)
return runner.run_tests()
def main():
# Interpreter receives TESTS.py file as first argument
if len(sys.argv) < 2:
sys.exit('Provide test file to run')
testfile = sys.argv[1]
# Remove TESTS.py file from args, the rest of the args is parsed as a
# test configuration
sys.argv.pop(1)
# import TESTS.py as a module
testfile_dir = os.path.abspath(os.path.dirname(testfile))
spec = importutil.spec_from_file_location(testfile_dir, testfile)
module = importutil.module_from_spec(spec)
spec.loader.exec_module(module)
sys.exit(run_testcases())
if __name__ == '__main__':
main()
| 1,272 | 25.520833 | 76 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_rm_remote/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# libpmempool_rm_remote/config.sh -- test configuration
#
CONF_GLOBAL_FS_TYPE=any
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 285 | 19.428571 | 55 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_map/pmem2_map.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_map.c -- pmem2_map unittests
*/
#include <stdbool.h>
#include "config.h"
#include "pmem2_utils.h"
#include "source.h"
#include "map.h"
#include "out.h"
#include "pmem2.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
/*
* prepare_config_with_addr -- extended version of ut_pmem2_prepare_config -
* fill pmem2_config also with addr and addr_request
*/
static void
prepare_config_with_addr(struct pmem2_config *cfg, struct pmem2_source **src,
struct FHandle **fh, const char *file, size_t length, size_t offset,
int access, void *addr, enum pmem2_address_request_type addr_request)
{
ut_pmem2_prepare_config(cfg, src, fh, FH_FD, file, length, offset,
access);
cfg->addr = addr;
cfg->addr_request = (int)addr_request;
}
#ifdef _WIN32
#define HIDWORD(x) ((DWORD)((x) >> 32))
#define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF))
/*
* prepare_map -- map accordingly to the config
*
* XXX it is assumed pmem2_config contains exact arguments e.g.
* length won't be altered by the file size.
*/
static void
prepare_map(struct pmem2_map **map_ptr,
struct pmem2_config *cfg, struct pmem2_source *src)
{
struct pmem2_map *map = malloc(sizeof(*map));
UT_ASSERTne(map, NULL);
UT_ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
size_t max_size = cfg->length + cfg->offset;
HANDLE mh = CreateFileMapping(src->value.handle,
NULL,
PAGE_READWRITE,
HIDWORD(max_size),
LODWORD(max_size),
NULL);
UT_ASSERTne(mh, NULL);
UT_ASSERTne(GetLastError(), ERROR_ALREADY_EXISTS);
map->addr = MapViewOfFileEx(mh,
FILE_MAP_ALL_ACCESS,
HIDWORD(cfg->offset),
LODWORD(cfg->offset),
cfg->length,
NULL);
UT_ASSERTne(map->addr, NULL);
UT_ASSERTne(CloseHandle(mh), 0);
map->reserved_length = map->content_length = cfg->length;
map->effective_granularity = PMEM2_GRANULARITY_PAGE;
*map_ptr = map;
UT_ASSERTeq(pmem2_register_mapping(map), 0);
}
#else
/*
* prepare_map -- map accordingly to the config
*
* XXX this function currently calls mmap(3) without MAP_SYNC so the only
* mapping granularity is PMEM2_GRANULARITY_PAGE.
*
* XXX it is assumed pmem2_config contains exact mmap(3) arguments e.g.
* length won't be altered by the file size.
*/
static void
prepare_map(struct pmem2_map **map_ptr,
struct pmem2_config *cfg, struct pmem2_source *src)
{
int flags = MAP_SHARED;
int proto = PROT_READ | PROT_WRITE;
off_t offset = (off_t)cfg->offset;
UT_ASSERTeq((size_t)offset, cfg->offset);
struct pmem2_map *map = malloc(sizeof(*map));
UT_ASSERTne(map, NULL);
UT_ASSERTeq(src->type, PMEM2_SOURCE_FD);
map->addr = mmap(NULL, cfg->length, proto, flags,
src->value.fd, offset);
UT_ASSERTne(map->addr, MAP_FAILED);
map->source.value.ftype = PMEM2_FTYPE_REG;
map->reserved_length = map->content_length = cfg->length;
map->effective_granularity = PMEM2_GRANULARITY_PAGE;
*map_ptr = map;
UT_ASSERTeq(pmem2_register_mapping(map), 0);
}
#endif
/*
* unmap_map -- unmap the mapping according to pmem2_map struct
*/
static void
unmap_map(struct pmem2_map *map)
{
#ifdef _WIN32
UT_ASSERTne(UnmapViewOfFile(map->addr), 0);
#else
UT_ASSERTeq(munmap(map->addr, map->reserved_length), 0);
#endif
UT_ASSERTeq(pmem2_unregister_mapping(map), 0);
}
/*
* get_align_by_name -- fetch map alignment for an unopened file
*/
static size_t
get_align_by_name(const char *filename)
{
struct pmem2_source *src;
size_t align;
int fd = OPEN(filename, O_RDONLY);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_SOURCE_ALIGNMENT(src, &align);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return align;
}
/*
* test_map_rdrw_file - map a O_RDWR file
*/
static int
test_map_rdrw_file(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_map_rdrw_file <file>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, 0, FH_RDWR);
struct pmem2_map *map;
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
unmap_map(map);
FREE(map);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 1;
}
/*
* test_map_rdonly_file - map a O_RDONLY file
*/
static int
test_map_rdonly_file(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_map_rdonly_file <file>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, 0, FH_READ);
struct pmem2_map *map;
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_NO_ACCESS);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 1;
}
/*
* map_valid_ranges_common -- map valid range and validate its length
* Includes cleanup.
*/
static void
map_valid_ranges_common(const char *file, size_t offset, size_t length,
size_t val_length)
{
struct pmem2_config cfg;
struct pmem2_source *src;
struct pmem2_map *map;
int ret = 0;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, length, offset,
FH_RDWR);
ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(map->content_length, val_length);
unmap_map(map);
FREE(map);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
}
/*
* test_map_valid_ranges - map valid memory ranges
*/
static int
test_map_valid_ranges(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_valid_ranges <file> <size>");
char *file = argv[0];
size_t align = get_align_by_name(file);
size_t size = ATOUL(argv[1]);
size_t size2 = ALIGN_DOWN(size / 2, align);
/* the config WITHOUT provided length allows mapping the whole file */
map_valid_ranges_common(file, 0, 0, size);
/* the config WITH provided length allows mapping the whole file */
map_valid_ranges_common(file, 0, size, size);
/* the config with provided length different than the file length */
map_valid_ranges_common(file, 0, size2, size2);
/* verify the config with provided length and a valid offset */
map_valid_ranges_common(file, align, size2, size2);
return 2;
}
/*
* test_map_invalid_ranges - map invalid memory ranges
*/
static int
test_map_invalid_ranges(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_invalid_ranges <file> <size>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
size_t size = ATOUL(argv[1]);
size_t offset = 0;
struct pmem2_map *map;
int ret = 0;
struct FHandle *fh;
/* the mapping + the offset > the file size */
size_t size2 = ALIGN_DOWN(size / 2, get_align_by_name(file));
offset = size2 + (4 * MEGABYTE);
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, size2, offset,
FH_RDWR);
ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_MAP_RANGE);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
/* the mapping size > the file size */
offset = size * 2;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, offset,
FH_RDWR);
ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_MAP_RANGE);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 2;
}
/*
* test_map_invalid_alignment - map using invalid alignment in the offset
*/
static int
test_map_invalid_alignment(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_invalid_args <file> <size>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
size_t size = ATOUL(argv[1]);
size_t length = size / 2;
struct pmem2_map *map;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, length,
KILOBYTE, FH_RDWR);
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_OFFSET_UNALIGNED);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 2;
}
/*
* test_map_invalid_fd - map using a invalid file descriptor
*/
static int
test_map_invalid_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_invalid_args <file> <size>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
size_t size = ATOUL(argv[1]);
size_t length = size / 2;
struct pmem2_map *map;
struct FHandle *fh;
/* the invalid file descriptor */
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, length, 0,
FH_RDWR);
UT_FH_CLOSE(fh);
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE);
PMEM2_SOURCE_DELETE(&src);
return 2;
}
/*
* test_map_unaligned_length - map a file of length which is not page-aligned
*/
static int
test_map_unaligned_length(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_unaligned_length <file> <size>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
size_t length = ATOUL(argv[1]);
struct pmem2_map *map;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, length, 0,
FH_RDWR);
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_LENGTH_UNALIGNED);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 2;
}
/*
* test_unmap_valid - unmap valid pmem2 mapping
*/
static int
test_unmap_valid(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_unmap_valid <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
struct pmem2_config cfg;
struct pmem2_source *src;
struct pmem2_map *map = NULL;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, size, 0,
FH_RDWR);
prepare_map(&map, &cfg, src);
/* unmap the valid mapping */
int ret = pmem2_unmap(&map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(map, NULL);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 2;
}
typedef void (*spoil_func)(struct pmem2_map *map);
/*
* unmap_invalid_common - unmap an invalid pmem2 mapping
*/
static int
unmap_invalid_common(const char *file, size_t size,
spoil_func spoil, int exp_ret)
{
struct pmem2_config cfg;
struct pmem2_source *src;
struct pmem2_map *map = NULL;
struct pmem2_map map_copy;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, size, 0,
FH_RDWR);
prepare_map(&map, &cfg, src);
/* backup the map and spoil it */
memcpy(&map_copy, map, sizeof(*map));
spoil(map);
/* unmap the invalid mapping */
int ret = pmem2_unmap(&map);
UT_PMEM2_EXPECT_RETURN(ret, exp_ret);
FREE(map);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 1;
}
static void
map_spoil_set_zero_length(struct pmem2_map *map)
{
map->reserved_length = 0;
map->content_length = 0;
}
static void
map_spoil_set_unaligned_addr(struct pmem2_map *map)
{
map->addr = (void *)((uintptr_t)map->addr + 1);
map->reserved_length -= 1;
}
static void
map_spoil_by_unmap(struct pmem2_map *map)
{
unmap_map(map);
}
/*
* test_unmap_zero_length - unmap a pmem2 mapping with an invalid length
*/
static int
test_unmap_zero_length(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_unmap_zero_length <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
unmap_invalid_common(file, size, map_spoil_set_zero_length, -EINVAL);
return 2;
}
/*
* test_unmap_unaligned_addr - unmap a pmem2 mapping with an unaligned address
*/
static int
test_unmap_unaligned_addr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_unmap_unaligned_addr <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
unmap_invalid_common(file, size, map_spoil_set_unaligned_addr, -EINVAL);
return 2;
}
/*
* test_unmap_unaligned_addr - double unmap a pmem2 mapping
*/
static int
test_unmap_unmapped(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_unmap_unmapped <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
unmap_invalid_common(file, size, map_spoil_by_unmap,
PMEM2_E_MAPPING_NOT_FOUND);
return 2;
}
/*
* test_map_get_address -- check pmem2_map_get_address func
*/
static int
test_map_get_address(const struct test_case *tc, int argc, char *argv[])
{
void *ret_addr;
void *ref_addr = (void *)0x12345;
struct pmem2_map map;
map.addr = ref_addr;
ret_addr = pmem2_map_get_address(&map);
UT_ASSERTeq(ret_addr, ref_addr);
return 0;
}
/*
* test_map_get_size -- check pmem2_map_get_size func
*/
static int
test_map_get_size(const struct test_case *tc, int argc, char *argv[])
{
size_t ret_size;
size_t ref_size = 16384;
struct pmem2_map map;
map.content_length = ref_size;
ret_size = pmem2_map_get_size(&map);
UT_ASSERTeq(ret_size, ref_size);
return 0;
}
/*
* test_get_granularity_simple - simply get the previously stored value
*/
static int
test_get_granularity_simple(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_map map;
map.effective_granularity = PMEM2_GRANULARITY_BYTE;
enum pmem2_granularity ret = pmem2_map_get_store_granularity(&map);
UT_ASSERTeq(ret, PMEM2_GRANULARITY_BYTE);
return 0;
}
/*
* test_map_larger_than_unaligned_file_size - map a file which size is not
* aligned
*/
static int
test_map_larger_than_unaligned_file_size(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_larger_than_unaligned_file_size"
" <file> <size>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
size_t length = ATOUL(argv[1]);
struct pmem2_map *map;
struct FHandle *fh;
size_t alignment;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, 0, FH_RDWR);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
/* validate file length is unaligned */
UT_ASSERTne(length % alignment, 0);
/* align up the required mapping length */
cfg.length = ALIGN_UP(length, alignment);
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
unmap_map(map);
FREE(map);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 2;
}
/*
* test_map_zero_file_size - map using zero file size, do not set length
* in config, expect failure
*/
static int
test_map_zero_file_size(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_map_zero_file_size <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
if (fd < 0)
UT_FATAL("open: %s", file);
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* mapping length is left unset */
cfg.offset = 0;
cfg.requested_max_granularity = PMEM2_GRANULARITY_PAGE;
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FD(&src, fd);
struct pmem2_map *map;
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_SOURCE_EMPTY);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 2;
}
static void
do_map_and_copy_data(struct pmem2_config *cfg, struct pmem2_source *src,
struct pmem2_map **map, const char *data)
{
int ret = pmem2_map(cfg, src, map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(*map);
void *addr = pmem2_map_get_address(*map);
memcpy_fn(addr, data, strlen(data), 0);
UT_ASSERTeq(memcmp(addr, data, strlen(data)), 0);
}
static const char *word1 = "Persistent or nonpersistent: that is the question.";
static const char *word2 = "Nonpersistent: that is the answer.";
/*
* test_map_sharing_shared - map file with the PMEM2_SHARED option and check if
* data was written; the file is not reopened
*/
static int
test_map_sharing_shared(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_map_sharing_shared <file>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, 0, FH_RDWR);
struct pmem2_map *map1 = NULL;
do_map_and_copy_data(&cfg, src, &map1, word1);
struct pmem2_map *map2 = NULL;
do_map_and_copy_data(&cfg, src, &map2, word2);
void *addr1 = pmem2_map_get_address(map1);
/* check if changes in shared mapping affect other mapping */
UT_ASSERTeq(memcmp(addr1, word2, strlen(word2)), 0);
UT_ASSERTne(memcmp(addr1, word1, strlen(word1)), 0);
unmap_map(map2);
unmap_map(map1);
FREE(map2);
FREE(map1);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 1;
}
/*
* test_map_sharing_private - map file with the PMEM2_PRIVATE option and
* check if data wasn't written; the file is not reopen
*/
static int
test_map_sharing_private(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_map_sharing_private <file>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, 0, FH_RDWR);
struct pmem2_map *map1 = NULL;
do_map_and_copy_data(&cfg, src, &map1, word1);
struct pmem2_map *map2 = NULL;
pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
do_map_and_copy_data(&cfg, src, &map2, word2);
void *addr1 = pmem2_map_get_address(map1);
/* check if changes in private mapping do not affect other mapping */
UT_ASSERTne(memcmp(addr1, word2, strlen(word2)), 0);
UT_ASSERTeq(memcmp(addr1, word1, strlen(word1)), 0);
unmap_map(map2);
unmap_map(map1);
FREE(map2);
FREE(map1);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 1;
}
/*
* test_map_sharing_private_with_reopened_fd - map file, with the PMEM2_PRIVATE
* option and check if data wasn't written; the file is reopened before every
* mapping
*/
static int
test_map_sharing_private_with_reopened_fd(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_map_sharing_private_with_reopened_fd <file>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
struct FHandle *fh1;
ut_pmem2_prepare_config(&cfg, &src, &fh1, FH_FD, file, 0, 0,
FH_RDWR);
struct pmem2_map *map1;
do_map_and_copy_data(&cfg, src, &map1, word1);
UT_FH_CLOSE(fh1);
struct FHandle *fh2;
ut_pmem2_prepare_config(&cfg, &src, &fh2, FH_FD, file, 0, 0,
FH_RDWR);
struct pmem2_map *map2;
pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
do_map_and_copy_data(&cfg, src, &map2, word2);
UT_FH_CLOSE(fh2);
void *addr1 = pmem2_map_get_address(map1);
/* check if changes in private mapping do not affect other mapping */
UT_ASSERTne(memcmp(addr1, word2, strlen(word2)), 0);
UT_ASSERTeq(memcmp(addr1, word1, strlen(word1)), 0);
unmap_map(map2);
unmap_map(map1);
FREE(map2);
FREE(map1);
PMEM2_SOURCE_DELETE(&src);
return 1;
}
/*
* test_map_sharing_private_rdonly_file - map O_RDONLY file with
* PMEM2_PRIVATE sharing
*/
static int
test_map_sharing_private_rdonly_file(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_map_sharing_private_rdonly_file <file>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, 0, FH_READ);
pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
struct pmem2_map *map = NULL;
do_map_and_copy_data(&cfg, src, &map, word2);
unmap_map(map);
FREE(map);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 1;
}
/*
* test_map_sharing_private_devdax - map DAX device with PMEM2_PRIVATE sharing
*/
static int
test_map_sharing_private_devdax(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_map_sharing_private_devdax <file>");
char *file = argv[0];
struct pmem2_config cfg;
struct pmem2_source *src;
struct FHandle *fh;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, 0, 0, FH_RDWR);
pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
struct pmem2_map *map = NULL;
int ret = pmem2_map(&cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_SRC_DEVDAX_PRIVATE);
UT_ASSERTeq(map, NULL);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 1;
}
/*
* test_map_fixed_noreplace_valid - map a file to the desired addr with
* address type request PMEM2_ADDRESS_FIXED_NOREPLACE
*/
static int
test_map_fixed_noreplace_valid(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_fixed_noreplace_valid <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
struct pmem2_config cfg;
struct pmem2_source *src;
struct pmem2_map *map = NULL;
struct FHandle *fh;
void *addr;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, size, 0,
FH_RDWR);
int ret = pmem2_map(&cfg, src, &map);
UT_ASSERTeq(ret, 0);
addr = pmem2_map_get_address(map);
/* unmap current mapping */
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(map, NULL);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
/*
* let's do the same mapping, to the same addr but with
* address type request PMEM2_ADDRESS_FIXED_NOREPLACE
*/
prepare_config_with_addr(&cfg, &src, &fh, file, size, 0, FH_RDWR,
addr, PMEM2_ADDRESS_FIXED_NOREPLACE);
ret = pmem2_map(&cfg, src, &map);
UT_ASSERTeq(ret, 0);
/* check if mapping is in the same addr, which is desired */
UT_ASSERTeq(addr, map->addr);
/* unmap mapping */
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(map, NULL);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
return 2;
}
/*
* test_map_fixed_noreplace_full_overlap - map a file and overlap whole
* other existing mapping with address type request
* PMEM2_ADDRESS_FIXED_NOREPLACE
*/
static int
test_map_fixed_noreplace_full_overlap(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_fixed_noreplace_full_overlap"
" <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
struct pmem2_source *src;
struct pmem2_config cfg;
struct pmem2_map *map = NULL;
struct pmem2_map *map_overlap = NULL;
struct FHandle *fh;
struct FHandle *fh_overlap;
void *addr;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, size, 0,
FH_RDWR);
int ret = pmem2_map(&cfg, src, &map);
UT_ASSERTeq(ret, 0);
addr = pmem2_map_get_address(map);
/*
* let's do the same mapping, to the same addr but with
* the address type request PMEM2_ADDRESS_FIXED_NOREPLACE
*/
prepare_config_with_addr(&cfg, &src, &fh_overlap, file, size, 0,
FH_RDWR, addr, PMEM2_ADDRESS_FIXED_NOREPLACE);
ret = pmem2_map(&cfg, src, &map_overlap);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_MAPPING_EXISTS);
/* unmap first mapping and close fds */
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(map, NULL);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
UT_FH_CLOSE(fh_overlap);
return 2;
}
/*
* test_map_fixed_noreplace_partial_overlap - map a file in a middle of
* other existing mapping with address type request
* PMEM2_ADDRESS_FIXED_NOREPLACE
*/
static int
test_map_fixed_noreplace_partial_overlap(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_fixed_noreplace_partial_overlap"
" <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
struct pmem2_source *src;
struct pmem2_config cfg;
struct pmem2_map *map = NULL;
struct pmem2_map *map_overlap = NULL;
struct FHandle *fh;
struct FHandle *fh_overlap;
void *addr;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, size, 0,
FH_RDWR);
int ret = pmem2_map(&cfg, src, &map);
UT_ASSERTeq(ret, 0);
/*
* Let's get address of the current mapping and move it to the
* middle of the mapping. "Randomly" define size of the new
* mapping as MEGABYTE.
*/
addr = (char *)pmem2_map_get_address(map) + MEGABYTE;
size_t overlap_size = MEGABYTE;
/* check if new mapping is in the middle of the existing one */
UT_ASSERT(size > MEGABYTE + overlap_size);
/*
* let's do the mapping in the middle of existing one, but
* with the address type request PMEM2_ADDRESS_FIXED_NOREPLACE
*/
prepare_config_with_addr(&cfg, &src, &fh_overlap, file,
overlap_size, 0, FH_RDWR, addr,
PMEM2_ADDRESS_FIXED_NOREPLACE);
ret = pmem2_map(&cfg, src, &map_overlap);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_MAPPING_EXISTS);
/* unmap first mapping and close fds */
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(map, NULL);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
UT_FH_CLOSE(fh_overlap);
return 2;
}
/*
* test_map_fixed_noreplace_partial_above_overlap - map a file which
* starts in a middle and ends above of other existing mapping with
* address type request PMEM2_ADDRESS_FIXED_NOREPLACE
*/
static int
test_map_fixed_noreplace_partial_above_overlap(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_map_fixed_noreplace_partial_overlap"
" <file> <size>");
char *file = argv[0];
size_t size = ATOUL(argv[1]);
struct pmem2_source *src;
struct pmem2_config cfg;
struct pmem2_map *map = NULL;
struct pmem2_map *map_overlap = NULL;
struct FHandle *fh;
struct FHandle *fh_overlap;
void *addr;
/* let's do the mapping which size is half of the file size */
size /= 2;
ut_pmem2_prepare_config(&cfg, &src, &fh, FH_FD, file, size, 0,
FH_RDWR);
int ret = pmem2_map(&cfg, src, &map);
UT_ASSERTeq(ret, 0);
/*
* Let's get address of the current mapping and move it to the
* middle of the mapping. "Randomly" define size of the new
* mapping as a size.
*/
addr = (char *)pmem2_map_get_address(map) + MEGABYTE;
size_t overlap_size = size;
/*
* check if new mapping starts in the middle and ends above of
* the existing one
*/
UT_ASSERT(size < MEGABYTE + overlap_size);
/*
* let's do the mapping in the middle of existing one, but
* with the address type request PMEM2_ADDRESS_FIXED_NOREPLACE
*/
prepare_config_with_addr(&cfg, &src, &fh_overlap, file,
overlap_size, 0, FH_RDWR, addr,
PMEM2_ADDRESS_FIXED_NOREPLACE);
ret = pmem2_map(&cfg, src, &map_overlap);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_MAPPING_EXISTS);
/* unmap first mapping and close fds */
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(map, NULL);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
UT_FH_CLOSE(fh_overlap);
return 2;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_map_rdrw_file),
TEST_CASE(test_map_rdonly_file),
TEST_CASE(test_map_valid_ranges),
TEST_CASE(test_map_invalid_ranges),
TEST_CASE(test_map_invalid_alignment),
TEST_CASE(test_map_invalid_fd),
TEST_CASE(test_map_unaligned_length),
TEST_CASE(test_unmap_valid),
TEST_CASE(test_unmap_zero_length),
TEST_CASE(test_unmap_unaligned_addr),
TEST_CASE(test_unmap_unmapped),
TEST_CASE(test_map_get_address),
TEST_CASE(test_map_get_size),
TEST_CASE(test_get_granularity_simple),
TEST_CASE(test_map_larger_than_unaligned_file_size),
TEST_CASE(test_map_zero_file_size),
TEST_CASE(test_map_sharing_shared),
TEST_CASE(test_map_sharing_private),
TEST_CASE(test_map_sharing_private_with_reopened_fd),
TEST_CASE(test_map_sharing_private_rdonly_file),
TEST_CASE(test_map_sharing_private_devdax),
TEST_CASE(test_map_fixed_noreplace_valid),
TEST_CASE(test_map_fixed_noreplace_full_overlap),
TEST_CASE(test_map_fixed_noreplace_partial_overlap),
TEST_CASE(test_map_fixed_noreplace_partial_above_overlap),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_map");
util_init();
out_init("pmem2_map", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
#ifdef _MSC_VER
MSVC_CONSTR(libpmem2_init)
MSVC_DESTR(libpmem2_fini)
#endif
| 27,649 | 23.4907 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_map/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import os
import testframework as t
class PMEM2_MAP(t.Test):
test_type = t.Short
filesize = 16 * t.MiB
with_size = True
def run(self, ctx):
filepath = ctx.create_holey_file(self.filesize, 'testfile',)
if self.with_size:
filesize = os.stat(filepath).st_size
ctx.exec('pmem2_map', self.test_case, filepath, filesize)
else:
ctx.exec('pmem2_map', self.test_case, filepath)
class PMEM2_MAP_NO_FILE(t.Test):
test_type = t.Short
def run(self, ctx):
ctx.exec('pmem2_map', self.test_case)
@t.windows_exclude
@t.require_devdax(t.DevDax('devdax1'))
class PMEM2_MAP_DEVDAX(t.Test):
test_type = t.Short
with_size = True
def run(self, ctx):
dd = ctx.devdaxes.devdax1
if self.with_size:
ctx.exec('pmem2_map', self.test_case, dd.path, dd.size)
else:
ctx.exec('pmem2_map', self.test_case, dd.path)
class TEST0(PMEM2_MAP):
"""map a O_RDWR file"""
test_case = "test_map_rdrw_file"
with_size = False
class TEST1(PMEM2_MAP_DEVDAX):
"""DevDax map a O_RDWR file"""
test_case = "test_map_rdrw_file"
with_size = False
class TEST2(PMEM2_MAP):
"""map a O_RDONLY file"""
test_case = "test_map_rdonly_file"
with_size = False
class TEST3(PMEM2_MAP_DEVDAX):
"""DevDax map a O_RDONLY file"""
test_case = "test_map_rdonly_file"
with_size = False
class TEST4(PMEM2_MAP):
"""map valid memory ranges"""
test_case = "test_map_valid_ranges"
class TEST5(PMEM2_MAP_DEVDAX):
"""DevDax map valid memory ranges"""
test_case = "test_map_valid_ranges"
class TEST6(PMEM2_MAP):
"""map invalid memory ranges"""
test_case = "test_map_invalid_ranges"
class TEST7(PMEM2_MAP_DEVDAX):
"""DevDax map invalid memory ranges"""
test_case = "test_map_invalid_ranges"
class TEST8(PMEM2_MAP):
"""map using invalid alignment in the offset"""
test_case = "test_map_invalid_alignment"
class TEST9(PMEM2_MAP_DEVDAX):
"""DevDax map using invalid alignment in the offset"""
test_case = "test_map_invalid_alignment"
class TEST10(PMEM2_MAP):
"""map using a invalid file descriptor"""
test_case = "test_map_invalid_fd"
class TEST11(PMEM2_MAP):
"""unmap valid pmem2 mapping"""
test_case = "test_unmap_valid"
class TEST12(PMEM2_MAP_DEVDAX):
"""DevDax unmap valid pmem2 mapping"""
test_case = "test_unmap_valid"
# UnmapViewOfFile does not use length
@t.windows_exclude
class TEST13(PMEM2_MAP):
"""unmap a pmem2 mapping with an invalid length"""
test_case = "test_unmap_zero_length"
class TEST14(PMEM2_MAP_DEVDAX):
"""DevDax unmap a pmem2 mapping with an invalid length"""
test_case = "test_unmap_zero_length"
# UnmapViewOfFile does not care about the address alignment
@t.windows_exclude
class TEST15(PMEM2_MAP):
"""unmap a pmem2 mapping with an unaligned address"""
test_case = "test_unmap_unaligned_addr"
class TEST16(PMEM2_MAP_DEVDAX):
"""DevDax unmap a pmem2 mapping with an unaligned address"""
test_case = "test_unmap_unaligned_addr"
# munmap does not fail if the mapping does not exist
@t.windows_only
class TEST17(PMEM2_MAP):
"""double unmap a pmem2 mapping"""
test_case = "test_unmap_unmapped"
class TEST18(PMEM2_MAP_NO_FILE):
"""test for pmem2_map_get_address"""
test_case = "test_map_get_address"
class TEST19(PMEM2_MAP_NO_FILE):
"""test for pmem2_map_get_size"""
test_case = "test_map_get_size"
class TEST20(PMEM2_MAP_NO_FILE):
"""simply get the previously stored value of granularity"""
test_case = "test_get_granularity_simple"
class TEST21(PMEM2_MAP):
"""map a file of length which is not page-aligned"""
test_case = "test_map_unaligned_length"
filesize = 3 * t.KiB
class TEST22(PMEM2_MAP):
"""map a file which size is not aligned"""
test_case = "test_map_larger_than_unaligned_file_size"
filesize = 16 * t.MiB - 1
class TEST23(PMEM2_MAP):
"""
map a file with zero size, do not provide length
to pmem2_map config
"""
test_case = "test_map_zero_file_size"
filesize = 0
class TEST24(PMEM2_MAP):
"""
map a file with PMEM2_SHARED sharing, changes in the mapping are visible
in another mapping
"""
test_case = "test_map_sharing_shared"
with_size = False
class TEST25(PMEM2_MAP):
"""
map a file with PMEM2_PRIVATE sharing, changes in the mapping are not
visible in another mapping
"""
test_case = "test_map_sharing_private"
with_size = False
class TEST26(PMEM2_MAP):
"""
map a file with PMEM2_PRIVATE sharing, changes in the mapping are not
visible in another mapping, fd is reopened before each mapping
"""
test_case = "test_map_sharing_private_with_reopened_fd"
with_size = False
class TEST27(PMEM2_MAP):
"""
map O_RDONLY file with PMEM2_PRIVATE sharing
"""
test_case = "test_map_sharing_private_rdonly_file"
with_size = False
class TEST28(PMEM2_MAP_DEVDAX):
"""DevDax file with PMEM2_PRIVATE sharing"""
test_case = "test_map_sharing_private_devdax"
with_size = False
class TEST29(PMEM2_MAP):
"""
map a file to the desired addr with request type
PMEM2_ADDRESS_FIXED_NOREPLACE
"""
test_case = "test_map_fixed_noreplace_valid"
class TEST30(PMEM2_MAP):
"""
map a file and overlap whole other existing mapping with the request type
PMEM2_ADDRESS_FIXED_NOREPLACE
"""
test_case = "test_map_fixed_noreplace_full_overlap"
class TEST31(PMEM2_MAP):
"""
map a file in a middle of other existing mapping with the request type
PMEM2_ADDRESS_FIXED_NOREPLACE
"""
test_case = "test_map_fixed_noreplace_partial_overlap"
class TEST32(PMEM2_MAP):
"""
map a file which starts in a middle and ends above of other
existing mapping with request type PMEM2_ADDRESS_FIXED_NOREPLACE
"""
test_case = "test_map_fixed_noreplace_partial_above_overlap"
| 6,101 | 23.118577 | 77 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_lane/obj_lane.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_lane.c -- unit test for lanes
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <inttypes.h>
#include "list.h"
#include "obj.h"
#include "tx.h"
#include "unittest.h"
#include "pmemcommon.h"
#define MAX_MOCK_LANES 5
#define MOCK_LAYOUT (void *)(0xAAA)
static void *base_ptr;
struct mock_pop {
PMEMobjpool p;
struct lane_layout l[MAX_MOCK_LANES];
};
/*
* mock_flush -- mock flush for lanes
*/
static int
mock_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* mock_persist -- mock flush for lanes
*/
static int
mock_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* mock_memset -- mock memset for lanes
*/
static void *
mock_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
memset(ptr, c, sz);
return ptr;
}
/*
* mock_drain -- mock drain for lanes
*/
static void
mock_drain(void *ctx)
{
}
static void
test_lane_boot_cleanup_ok(void)
{
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = MAX_MOCK_LANES;
base_ptr = &pop->p;
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
pop->p.p_ops.base = pop;
pop->p.p_ops.flush = mock_flush;
pop->p.p_ops.memset = mock_memset;
pop->p.p_ops.drain = mock_drain;
pop->p.p_ops.persist = mock_persist;
lane_init_data(&pop->p);
lane_info_boot();
UT_ASSERTeq(lane_boot(&pop->p), 0);
for (int i = 0; i < MAX_MOCK_LANES; ++i) {
struct lane *lane = &pop->p.lanes_desc.lane[i];
UT_ASSERTeq(lane->layout, &pop->l[i]);
}
lane_cleanup(&pop->p);
UT_ASSERTeq(pop->p.lanes_desc.lane, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane_locks, NULL);
FREE(pop);
}
static ut_jmp_buf_t Jmp;
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
static void
test_lane_hold_release(void)
{
struct ulog *mock_ulog = ZALLOC(SIZEOF_ULOG(1024));
struct pmem_ops p_ops;
struct operation_context *ctx = operation_new(mock_ulog, 1024,
NULL, NULL, &p_ops, LOG_TYPE_REDO);
struct lane mock_lane = {
.layout = MOCK_LAYOUT,
.internal = ctx,
.external = ctx,
.undo = ctx,
};
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = 1;
pop->p.lanes_desc.runtime_nlanes = 1,
pop->p.lanes_desc.lane = &mock_lane;
pop->p.lanes_desc.next_lane_idx = 0;
pop->p.lanes_desc.lane_locks = CALLOC(OBJ_NLANES, sizeof(uint64_t));
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
pop->p.uuid_lo = 123456;
base_ptr = &pop->p;
struct lane *lane;
lane_hold(&pop->p, &lane);
UT_ASSERTeq(lane->layout, MOCK_LAYOUT);
UT_ASSERTeq(lane->undo, ctx);
lane_hold(&pop->p, &lane);
UT_ASSERTeq(lane->layout, MOCK_LAYOUT);
UT_ASSERTeq(lane->undo, ctx);
lane_release(&pop->p);
lane_release(&pop->p);
struct sigaction v, old;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGABRT, &v, &old);
if (!ut_sigsetjmp(Jmp)) {
lane_release(&pop->p); /* only two sections were held */
UT_ERR("we should not get here");
}
SIGACTION(SIGABRT, &old, NULL);
FREE(pop->p.lanes_desc.lane_locks);
FREE(pop);
operation_delete(ctx);
FREE(mock_ulog);
}
static void
test_lane_sizes(void)
{
UT_COMPILE_ERROR_ON(sizeof(struct lane_layout) != LANE_TOTAL_SIZE);
}
enum thread_work_type {
LANE_INFO_DESTROY,
LANE_CLEANUP
};
struct thread_data {
enum thread_work_type work;
};
/*
* test_separate_thread -- child thread input point for multithreaded
* scenarios
*/
static void *
test_separate_thread(void *arg)
{
UT_ASSERTne(arg, NULL);
struct thread_data *data = arg;
switch (data->work) {
case LANE_INFO_DESTROY:
lane_info_destroy();
break;
case LANE_CLEANUP:
UT_ASSERTne(base_ptr, NULL);
lane_cleanup(base_ptr);
break;
default:
UT_FATAL("Unimplemented thread work type: %d", data->work);
}
return NULL;
}
/*
* test_lane_info_destroy_in_separate_thread -- lane info boot from one thread
* and lane info destroy from another
*/
static void
test_lane_info_destroy_in_separate_thread(void)
{
lane_info_boot();
struct thread_data data;
data.work = LANE_INFO_DESTROY;
os_thread_t thread;
THREAD_CREATE(&thread, NULL, test_separate_thread, &data);
THREAD_JOIN(&thread, NULL);
lane_info_destroy();
}
/*
* test_lane_cleanup_in_separate_thread -- lane boot from one thread and lane
* cleanup from another
*/
static void
test_lane_cleanup_in_separate_thread(void)
{
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = MAX_MOCK_LANES;
pop->p.p_ops.base = pop;
pop->p.p_ops.flush = mock_flush;
pop->p.p_ops.memset = mock_memset;
pop->p.p_ops.drain = mock_drain;
pop->p.p_ops.persist = mock_persist;
base_ptr = &pop->p;
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
lane_init_data(&pop->p);
lane_info_boot();
UT_ASSERTeq(lane_boot(&pop->p), 0);
for (int i = 0; i < MAX_MOCK_LANES; ++i) {
struct lane *lane = &pop->p.lanes_desc.lane[i];
UT_ASSERTeq(lane->layout, &pop->l[i]);
}
struct thread_data data;
data.work = LANE_CLEANUP;
os_thread_t thread;
THREAD_CREATE(&thread, NULL, test_separate_thread, &data);
THREAD_JOIN(&thread, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane_locks, NULL);
FREE(pop);
}
static void
test_fault_injection()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "lane_boot");
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.p_ops.base = pop;
int ret = lane_boot(&pop->p);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
static void
usage(const char *app)
{
UT_FATAL("usage: %s [scenario: s/m]", app);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_lane");
obj_init();
if (argc != 2)
usage(argv[0]);
switch (argv[1][0]) {
case 's':
/* single thread scenarios */
test_lane_boot_cleanup_ok();
test_lane_hold_release();
test_lane_sizes();
break;
case 'm':
/* multithreaded scenarios */
test_lane_info_destroy_in_separate_thread();
test_lane_cleanup_in_separate_thread();
break;
case 'f':
/* fault injection */
test_fault_injection();
break;
default:
usage(argv[0]);
}
obj_fini();
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically,
* we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 6,418 | 18.334337 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_common.h -- common declarations for rpmem_obc test
*/
#include "unittest.h"
#include "out.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define BUFF_SIZE 8192
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
static const struct rpmem_pool_attr POOL_ATTR = POOL_ATTR_INIT;
struct server {
int fd_in;
int fd_out;
};
void set_rpmem_cmd(const char *fmt, ...);
struct server *srv_init(void);
void srv_fini(struct server *s);
void srv_recv(struct server *s, void *buff, size_t len);
void srv_send(struct server *s, const void *buff, size_t len);
void srv_wait_disconnect(struct server *s);
void client_connect_wait(struct rpmem_obc *rpc, char *target);
/*
* Since the server may disconnect the connection at any moment
* from the client's perspective, execute the test in a loop so
* the moment when the connection is closed will be possibly different.
*/
#define ECONNRESET_LOOP 10
void server_econnreset(struct server *s, const void *msg, size_t len);
TEST_CASE_DECLARE(client_enotconn);
TEST_CASE_DECLARE(client_connect);
TEST_CASE_DECLARE(client_monitor);
TEST_CASE_DECLARE(server_monitor);
TEST_CASE_DECLARE(server_wait);
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(server_create);
TEST_CASE_DECLARE(server_create_econnreset);
TEST_CASE_DECLARE(server_create_eproto);
TEST_CASE_DECLARE(server_create_error);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(server_open);
TEST_CASE_DECLARE(server_open_econnreset);
TEST_CASE_DECLARE(server_open_eproto);
TEST_CASE_DECLARE(server_open_error);
TEST_CASE_DECLARE(client_close);
TEST_CASE_DECLARE(server_close);
TEST_CASE_DECLARE(server_close_econnreset);
TEST_CASE_DECLARE(server_close_eproto);
TEST_CASE_DECLARE(server_close_error);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server_set_attr);
TEST_CASE_DECLARE(server_set_attr_econnreset);
TEST_CASE_DECLARE(server_set_attr_eproto);
TEST_CASE_DECLARE(server_set_attr_error);
| 2,951 | 26.082569 | 71 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_close.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_close.c -- test cases for rpmem_obj_close function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_close_resp CLOSE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = 0,
},
};
/*
* check_close_msg -- check close message
*/
static void
check_close_msg(struct rpmem_msg_close *msg)
{
size_t msg_size = sizeof(struct rpmem_msg_close);
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CLOSE);
UT_ASSERTeq(msg->hdr.size, msg_size);
}
/*
* server_close_handle -- handle a close request message
*/
static void
server_close_handle(struct server *s, const struct rpmem_msg_close_resp *resp)
{
struct rpmem_msg_close msg;
srv_recv(s, &msg, sizeof(msg));
rpmem_ntoh_msg_close(&msg);
check_close_msg(&msg);
srv_send(s, resp, sizeof(*resp));
}
/*
* client_close_errno -- perform close request operation and expect
* specified errno
*/
static void
client_close_errno(char *target, int ex_errno)
{
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_close(rpc, 0);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_close_eproto function.
*/
#define CLOSE_EPROTO_COUNT 5
/*
* server_close_eproto -- send invalid create request responses to a client
*/
int
server_close_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, CLOSE_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* client_close_error -- check if valid errno is set if error status returned
*/
static void
client_close_error(char *target)
{
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_close_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_close -- test case for close request operation - client side
*/
int
client_close(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_close_econnreset %d", i % 2);
client_close_errno(target, ECONNRESET);
}
for (int i = 0; i < CLOSE_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_close_eproto %d", i);
client_close_errno(target, EPROTO);
}
client_close_error(target);
set_rpmem_cmd("server_close");
client_close_errno(target, 0);
return 1;
}
/*
* server_close_error -- return error status in close response message
*/
int
server_close_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
resp.hdr.status = e;
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_close_econnreset -- test case for closing connection - server size
*/
int
server_close_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_close -- test case for close request operation - server side
*/
int
server_close(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 0;
}
| 4,725 | 18.289796 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_set_attr.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* rpmem_obc_test_set_attr.c -- test cases for rpmem_set_attr function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_set_attr_resp SET_ATTR_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR_RESP,
.size = sizeof(struct rpmem_msg_set_attr_resp),
.status = 0,
}
};
/*
* check_set_attr_msg -- check set attributes message
*/
static void
check_set_attr_msg(struct rpmem_msg_set_attr *msg)
{
size_t msg_size = sizeof(struct rpmem_msg_set_attr);
struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_SET_ATTR);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0);
}
/*
* server_open_handle -- handle an set attributes request message
*/
static void
server_set_attr_handle(struct server *s,
const struct rpmem_msg_set_attr_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_set_attr);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_set_attr(msg);
check_set_attr_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_set_attr_eproto function.
*/
#define SET_ATTR_EPROTO_COUNT 5
/*
* server_set_attr_eproto -- send invalid set attributes request responses to
* a client
*/
int
server_set_attr_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, SET_ATTR_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_set_attr_error -- return error status in set attributes response
* message
*/
int
server_set_attr_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
resp.hdr.status = e;
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_set_attr_econnreset -- test case for closing connection - server side
*/
int
server_set_attr_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
rpmem_hton_msg_set_attr_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_set_attr -- test case for rpmem_obc_set_attr - server side
* side
*/
int
server_set_attr(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_set_attr_init -- initialize communication - client side
*/
static struct rpmem_obc *
client_set_attr_init(char *target)
{
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
return rpc;
}
/*
* client_set_attr_fini -- finalize communication - client side
*/
static void
client_set_attr_fini(struct rpmem_obc *rpc)
{
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_set_attr_errno -- perform set attributes request operation and expect
* specified errno.
*/
static void
client_set_attr_errno(char *target, int ex_errno)
{
struct rpmem_obc *rpc = client_set_attr_init(target);
const struct rpmem_pool_attr pool_attr_alt = POOL_ATTR_ALT;
int ret = rpmem_obc_set_attr(rpc, &pool_attr_alt);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
}
client_set_attr_fini(rpc);
}
/*
* client_set_attr_error -- check if valid errno is set if error status
* returned
*/
static void
client_set_attr_error(char *target)
{
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_set_attr_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = client_set_attr_init(target);
const struct rpmem_pool_attr pool_attr_alt = POOL_ATTR_ALT;
ret = rpmem_obc_set_attr(rpc, &pool_attr_alt);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
client_set_attr_fini(rpc);
}
}
/*
* client_set_attr -- test case for set attributes request operation - client
* side
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_set_attr_econnreset %d", i % 2);
client_set_attr_errno(target, ECONNRESET);
}
for (int i = 0; i < SET_ATTR_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_set_attr_eproto %d", i);
client_set_attr_errno(target, EPROTO);
}
client_set_attr_error(target);
set_rpmem_cmd("server_set_attr");
client_set_attr_errno(target, 0);
return 1;
}
| 5,684 | 19.672727 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_create.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_create.c -- test cases for rpmem_obc_create function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_create_resp CREATE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE_RESP,
.size = sizeof(struct rpmem_msg_create_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
};
/*
* check_create_msg -- check create message
*/
static void
check_create_msg(struct rpmem_msg_create *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CREATE);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0);
}
/*
* server_create_handle -- handle a create request message
*/
static void
server_create_handle(struct server *s, const struct rpmem_msg_create_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_create) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_create *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_create(msg);
check_create_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_create_eproto function.
*/
#define CREATE_EPROTO_COUNT 8
/*
* server_create_eproto -- send invalid create request responses to a client
*/
int
server_create_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, CREATE_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_error -- return an error status in create response message
*/
int
server_create_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
resp.hdr.status = e;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_econnreset -- test case for closing connection - server side
*/
int
server_create_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_create -- test case for rpmem_obc_create function - server side
*/
int
server_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 0)
UT_FATAL("usage: %s", tc->name);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_create_errno -- perform create request operation and expect
* specified errno. If ex_errno is zero expect certain values in res struct.
*/
static void
client_create_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, CREATE_RESP.ibc.port);
UT_ASSERTeq(res.rkey, CREATE_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, CREATE_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
CREATE_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
CREATE_RESP.ibc.nlanes);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_create_error -- check if valid errno is set if error status returned
*/
static void
client_create_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_create_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_create -- test case for create request operation - client side
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_create_econnreset %d", i % 2);
client_create_errno(target, ECONNRESET);
}
for (int i = 0; i < CREATE_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_create_eproto %d", i);
client_create_errno(target, EPROTO);
}
client_create_error(target);
set_rpmem_cmd("server_create");
client_create_errno(target, 0);
return 1;
}
| 6,642 | 20.498382 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_common.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem_obc_test_common.c -- common definitions for rpmem_obc tests
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include "rpmem_obc_test_common.h"
#include "os.h"
#define CMD_BUFF_SIZE 4096
static const char *rpmem_cmd;
/*
* set_rpmem_cmd -- set RPMEM_CMD variable
*/
void
set_rpmem_cmd(const char *fmt, ...)
{
static char cmd_buff[CMD_BUFF_SIZE];
if (!rpmem_cmd) {
char *cmd = os_getenv(RPMEM_CMD_ENV);
UT_ASSERTne(cmd, NULL);
rpmem_cmd = STRDUP(cmd);
}
ssize_t ret;
size_t cnt = 0;
va_list ap;
va_start(ap, fmt);
ret = SNPRINTF(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt,
"%s ", rpmem_cmd);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
ret = vsnprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt, fmt, ap);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
va_end(ap);
ret = os_setenv(RPMEM_CMD_ENV, cmd_buff, 1);
UT_ASSERTeq(ret, 0);
/*
* Rpmem has internal RPMEM_CMD variable copy and it is assumed
* RPMEMD_CMD will not change its value during execution. To refresh the
* internal copy it must be destroyed and a instance must be initialized
* manually.
*/
rpmem_util_cmds_fini();
rpmem_util_cmds_init();
}
struct server *
srv_init(void)
{
struct server *s = MALLOC(sizeof(*s));
s->fd_in = STDIN_FILENO;
s->fd_out = STDOUT_FILENO;
uint32_t status = 0;
srv_send(s, &status, sizeof(status));
return s;
}
/*
* srv_stop -- close the server
*/
void
srv_fini(struct server *s)
{
FREE(s);
}
/*
* srv_recv -- read a message from the client
*/
void
srv_recv(struct server *s, void *buff, size_t len)
{
size_t rd = 0;
uint8_t *cbuf = buff;
while (rd < len) {
ssize_t ret = read(s->fd_in, &cbuf[rd], len - rd);
UT_ASSERT(ret > 0);
rd += (size_t)ret;
}
}
/*
* srv_send -- send a message to the client
*/
void
srv_send(struct server *s, const void *buff, size_t len)
{
size_t wr = 0;
const uint8_t *cbuf = buff;
while (wr < len) {
ssize_t ret = write(s->fd_out, &cbuf[wr], len - wr);
UT_ASSERT(ret > 0);
wr += (size_t)ret;
}
}
/*
* client_connect_wait -- wait until client connects to the server
*/
void
client_connect_wait(struct rpmem_obc *rpc, char *target)
{
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
while (rpmem_obc_connect(rpc, info))
;
rpmem_target_free(info);
}
/*
* server_econnreset -- disconnect from client during performing an
* operation
*/
void
server_econnreset(struct server *s, const void *msg, size_t len)
{
for (int i = 0; i < ECONNRESET_LOOP; i++) {
srv_send(s, msg, len);
}
}
| 2,623 | 17.478873 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmem_obc_test.c -- unit test for rpmem_obc module
*/
#include "rpmem_obc_test_common.h"
#include "pmemcommon.h"
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(client_enotconn),
TEST_CASE(client_connect),
TEST_CASE(client_create),
TEST_CASE(server_create),
TEST_CASE(server_create_econnreset),
TEST_CASE(server_create_eproto),
TEST_CASE(server_create_error),
TEST_CASE(client_open),
TEST_CASE(server_open),
TEST_CASE(server_open_econnreset),
TEST_CASE(server_open_eproto),
TEST_CASE(server_open_error),
TEST_CASE(client_close),
TEST_CASE(server_close),
TEST_CASE(server_close_econnreset),
TEST_CASE(server_close_eproto),
TEST_CASE(server_close_error),
TEST_CASE(client_monitor),
TEST_CASE(server_monitor),
TEST_CASE(client_set_attr),
TEST_CASE(server_set_attr),
TEST_CASE(server_set_attr_econnreset),
TEST_CASE(server_set_attr_eproto),
TEST_CASE(server_set_attr_error),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_obc",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
DONE(NULL);
}
| 1,388 | 20.369231 | 59 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_open.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_open.c -- test cases for rpmem_obj_open function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_open_resp OPEN_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN_RESP,
.size = sizeof(struct rpmem_msg_open_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
.pool_attr = POOL_ATTR_INIT,
};
/*
* check_open_msg -- check open message
*/
static void
check_open_msg(struct rpmem_msg_open *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_OPEN);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
}
/*
* server_open_handle -- handle an open request message
*/
static void
server_open_handle(struct server *s, const struct rpmem_msg_open_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_open) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_open *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_open(msg);
check_open_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_open_eproto function.
*/
#define OPEN_EPROTO_COUNT 8
/*
* server_open_eproto -- send invalid open request responses to a client
*/
int
server_open_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, OPEN_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open_error -- return error status in open response message
*/
int
server_open_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
resp.hdr.status = e;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for rpmem_obc_create function - server side
*/
int
server_open_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for open request message - server side
*/
int
server_open(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_open_errno -- perform open request operation and expect
* specified errno, repeat the operation specified number of times.
* If ex_errno is zero expect certain values in res struct.
*/
static void
client_open_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, OPEN_RESP.ibc.port);
UT_ASSERTeq(res.rkey, OPEN_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, OPEN_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
OPEN_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
OPEN_RESP.ibc.nlanes);
UT_ASSERTeq(memcmp(pool_attr.signature,
OPEN_RESP.pool_attr.signature,
RPMEM_POOL_HDR_SIG_LEN), 0);
UT_ASSERTeq(pool_attr.major, OPEN_RESP.pool_attr.major);
UT_ASSERTeq(pool_attr.compat_features,
OPEN_RESP.pool_attr.compat_features);
UT_ASSERTeq(pool_attr.incompat_features,
OPEN_RESP.pool_attr.incompat_features);
UT_ASSERTeq(pool_attr.ro_compat_features,
OPEN_RESP.pool_attr.ro_compat_features);
UT_ASSERTeq(memcmp(pool_attr.poolset_uuid,
OPEN_RESP.pool_attr.poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.uuid,
OPEN_RESP.pool_attr.uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.next_uuid,
OPEN_RESP.pool_attr.next_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.prev_uuid,
OPEN_RESP.pool_attr.prev_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.user_flags,
OPEN_RESP.pool_attr.user_flags,
RPMEM_POOL_USER_FLAGS_LEN), 0);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_open_error -- check if valid errno is set if error status returned
*/
static void
client_open_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_open_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_open -- test case for open request message - client side
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_open_econnreset %d", i % 2);
client_open_errno(target, ECONNRESET);
}
for (int i = 0; i < OPEN_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_open_eproto %d", i);
client_open_errno(target, EPROTO);
}
client_open_error(target);
set_rpmem_cmd("server_open");
client_open_errno(target, 0);
return 1;
}
| 7,427 | 21.306306 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_misc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_misc.c -- miscellaneous test cases for rpmem_obc module
*/
#include <netdb.h>
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_close_resp CLOSE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = 0,
},
};
/*
* client_enotconn -- check if ENOTCONN error is returned after
* calling rpmem_obc API without connecting to the server.
*/
int
client_enotconn(const struct test_case *tc, int argc, char *argv[])
{
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
rpmem_obc_fini(rpc);
return 0;
}
/*
* client_connect -- try to connect to the server at specified address and port
*/
int
client_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]...", tc->name);
for (int i = 0; i < argc; i++) {
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(argv[i]);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
if (ret) {
UT_OUT("not connected: %s: %s", argv[i],
out_get_errormsg());
} else {
UT_OUT(" connected: %s", argv[i]);
rpmem_obc_disconnect(rpc);
}
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
return argc;
}
/*
* server_monitor -- test case for rpmem_obc_create function - server side
*/
int
server_monitor(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_close close;
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
srv_recv(s, &close, sizeof(close));
srv_send(s, &resp, sizeof(resp));
srv_fini(s);
return 0;
}
/*
* server_monitor -- test case for rpmem_obc_monitor function - server side
*/
int
client_monitor(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_monitor");
{
/*
* Connect to target node, check connection state before
* and after disconnecting.
*/
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTne(ret, 1);
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
{
/*
* Connect to target node and expect that server will
* disconnect.
*/
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 0);
UT_ASSERTne(ret, 1);
rpmem_obc_disconnect(rpc);
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
return 1;
}
| 3,975 | 19.708333 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/setup.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2019, Intel Corporation
#
# src/test/rpmem_obc/setup.sh -- common setup for rpmem_obc tests
#
set -e
require_nodes 2
require_node_log_files 1 $RPMEM_LOG_FILE
RPMEM_CMD="\"cd ${NODE_TEST_DIR[0]} && UNITTEST_FORCE_QUIET=1 \
LD_LIBRARY_PATH=${NODE_LD_LIBRARY_PATH[0]}:$REMOTE_LD_LIBRARY_PATH \
./rpmem_obc$EXESUFFIX\""
export_vars_node 1 RPMEM_CMD
| 428 | 22.833333 | 69 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_out_of_memory/obj_out_of_memory.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_out_of_memory.c -- allocate objects until OOM
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "out_of_memory"
struct cargs {
size_t size;
};
static int
test_constructor(PMEMobjpool *pop, void *addr, void *args)
{
struct cargs *a = args;
pmemobj_memset_persist(pop, addr, rand() % 256, a->size / 2);
return 0;
}
static void
test_alloc(PMEMobjpool *pop, size_t size)
{
unsigned long cnt = 0;
while (1) {
struct cargs args = { size };
if (pmemobj_alloc(pop, NULL, size, 0,
test_constructor, &args) != 0)
break;
cnt++;
}
UT_OUT("size: %zu allocs: %lu", size, cnt);
}
static void
test_free(PMEMobjpool *pop)
{
PMEMoid oid;
PMEMoid next;
POBJ_FOREACH_SAFE(pop, oid, next)
pmemobj_free(&oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_out_of_memory");
if (argc < 3)
UT_FATAL("usage: %s size filename ...", argv[0]);
size_t size = ATOUL(argv[1]);
for (int i = 2; i < argc; i++) {
const char *path = argv[i];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_alloc(pop, size);
pmemobj_close(pop);
UT_ASSERTeq(pmemobj_check(path, LAYOUT_NAME), 1);
/*
* To prevent subsequent opens from receiving exactly the same
* volatile memory addresses a dummy malloc has to be made.
* This can expose issues in which traces of previous volatile
* state are leftover in the persistent pool.
*/
void *heap_touch = MALLOC(1);
UT_ASSERTne(pop = pmemobj_open(path, LAYOUT_NAME), NULL);
test_free(pop);
pmemobj_close(pop);
FREE(heap_touch);
}
DONE(NULL);
}
| 1,755 | 17.484211 | 64 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_db/rpmemd_db_test.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd_db_test.c -- unit test for pool set database
*
* usage: rpmemd_db <log-file> <root_dir> <pool_desc_1> <pool_desc_2>
*/
#include "file.h"
#include "unittest.h"
#include "librpmem.h"
#include "rpmemd_db.h"
#include "rpmemd_log.h"
#include "util_pmem.h"
#include "set.h"
#include "out.h"
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#define POOL_MODE 0644
#define FAILED_FUNC(func_name) \
UT_ERR("!%s(): %s() failed", __func__, func_name);
#define FAILED_FUNC_PARAM(func_name, param) \
UT_ERR("!%s(): %s(%s) failed", __func__, func_name, param);
#define NPOOLS_DUAL 2
#define POOL_ATTR_CREATE 0
#define POOL_ATTR_OPEN 1
#define POOL_ATTR_SET_ATTR 2
#define POOL_STATE_INITIAL 0
#define POOL_STATE_CREATED 1
#define POOL_STATE_OPENED 2
#define POOL_STATE_CLOSED POOL_STATE_CREATED
#define POOL_STATE_REMOVED POOL_STATE_INITIAL
/*
* fill_rand -- fill a buffer with random values
*/
static void
fill_rand(void *addr, size_t len)
{
unsigned char *buff = addr;
srand(time(NULL));
for (unsigned i = 0; i < len; i++)
buff[i] = (rand() % ('z' - 'a')) + 'a';
}
/*
* test_init -- test rpmemd_db_init() and rpmemd_db_fini()
*/
static int
test_init(const char *root_dir)
{
struct rpmemd_db *db;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
rpmemd_db_fini(db);
return 0;
}
/*
* test_check_dir -- test rpmemd_db_check_dir()
*/
static int
test_check_dir(const char *root_dir)
{
struct rpmemd_db *db;
int ret;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
ret = rpmemd_db_check_dir(db);
if (ret) {
FAILED_FUNC("rpmemd_db_check_dir");
}
rpmemd_db_fini(db);
return ret;
}
/*
* test_create -- test rpmemd_db_pool_create()
*/
static int
test_create(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
memset(&attr, 0, sizeof(attr));
attr.incompat_features = 2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_create_dual -- dual test for rpmemd_db_pool_create()
*/
static int
test_create_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1;
memset(&attr1, 0, sizeof(attr1));
attr1.incompat_features = 2;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
/* test dual create */
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
rpmemd_db_pool_close(db, prp1);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_create_2:
rpmemd_db_pool_close(db, prp1);
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* compare_attr -- compare pool's attributes
*/
static void
compare_attr(struct rpmem_pool_attr *a1, struct rpmem_pool_attr *a2)
{
char *msg;
if (a1->major != a2->major) {
msg = "major";
goto err_mismatch;
}
if (a1->compat_features != a2->compat_features) {
msg = "compat_features";
goto err_mismatch;
}
if (a1->incompat_features != a2->incompat_features) {
msg = "incompat_features";
goto err_mismatch;
}
if (a1->ro_compat_features != a2->ro_compat_features) {
msg = "ro_compat_features";
goto err_mismatch;
}
if (memcmp(a1->signature, a2->signature, RPMEM_POOL_HDR_SIG_LEN)) {
msg = "signature";
goto err_mismatch;
}
if (memcmp(a1->poolset_uuid, a2->poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN)) {
msg = "poolset_uuid";
goto err_mismatch;
}
if (memcmp(a1->uuid, a2->uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "uuid";
goto err_mismatch;
}
if (memcmp(a1->next_uuid, a2->next_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "next_uuid";
goto err_mismatch;
}
if (memcmp(a1->prev_uuid, a2->prev_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "prev_uuid";
goto err_mismatch;
}
return;
err_mismatch:
errno = EINVAL;
UT_FATAL("%s(): pool attributes mismatch (%s)", __func__, msg);
}
/*
* test_open -- test rpmemd_db_pool_open()
*/
static int
test_open(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr1, attr2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1, sizeof(attr1));
attr1.major = 1;
attr1.incompat_features = 2;
attr1.compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr1);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr2);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto fini;
}
rpmemd_db_pool_close(db, prp);
compare_attr(&attr1, &attr2);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_open_dual -- dual test for rpmemd_db_pool_open()
*/
static int
test_open_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1a, attr2a, attr1b, attr2b;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1a, sizeof(attr1a));
fill_rand(&attr1b, sizeof(attr1b));
attr1a.major = 1;
attr1a.incompat_features = 2;
attr1a.compat_features = 0;
attr1b.major = 1;
attr1b.incompat_features = 2;
attr1b.compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
rpmemd_db_pool_close(db, prp1);
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
/* test dual open */
prp1 = rpmemd_db_pool_open(db, pool_desc_1, 0, &attr2a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_1);
goto err_open_1;
}
prp2 = rpmemd_db_pool_open(db, pool_desc_2, 0, &attr2b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_2);
goto err_open_2;
}
rpmemd_db_pool_close(db, prp1);
rpmemd_db_pool_close(db, prp2);
compare_attr(&attr1a, &attr2a);
compare_attr(&attr1b, &attr2b);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_open_2:
rpmemd_db_pool_close(db, prp1);
err_open_1:
rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
err_create_2:
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr -- test rpmemd_db_pool_set_attr()
*/
static int
test_set_attr(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr[3];
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr[POOL_ATTR_CREATE], sizeof(attr[POOL_ATTR_CREATE]));
fill_rand(&attr[POOL_ATTR_SET_ATTR], sizeof(attr[POOL_ATTR_SET_ATTR]));
attr[POOL_ATTR_CREATE].major = 1;
attr[POOL_ATTR_CREATE].incompat_features = 2;
attr[POOL_ATTR_CREATE].compat_features = 0;
attr[POOL_ATTR_SET_ATTR].major = 1;
attr[POOL_ATTR_SET_ATTR].incompat_features = 2;
attr[POOL_ATTR_SET_ATTR].compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr[POOL_ATTR_CREATE]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto err_create;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_CREATE], &attr[POOL_ATTR_OPEN]);
ret = rpmemd_db_pool_set_attr(prp, &attr[POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_set_attr");
goto err_set_attr;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_SET_ATTR], &attr[POOL_ATTR_OPEN]);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
goto fini;
err_set_attr:
rpmemd_db_pool_close(db, prp);
err_open:
rpmemd_db_pool_remove(db, pool_desc, 0, 0);
err_create:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr_dual -- dual test for rpmemd_db_pool_set_attr()
*/
static int
test_set_attr_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr[NPOOLS_DUAL][3];
struct rpmemd_db_pool *prp[NPOOLS_DUAL];
const char *pool_desc[NPOOLS_DUAL] = {pool_desc_1, pool_desc_2};
unsigned pool_state[NPOOLS_DUAL] = {POOL_STATE_INITIAL};
struct rpmemd_db *db;
int ret = -1;
/* initialize rpmem database */
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
/*
* generate random pool attributes for create and set
* attributes operations
*/
fill_rand(&attr[p][POOL_ATTR_CREATE],
sizeof(attr[p][POOL_ATTR_CREATE]));
fill_rand(&attr[p][POOL_ATTR_SET_ATTR],
sizeof(attr[p][POOL_ATTR_SET_ATTR]));
attr[p][POOL_ATTR_CREATE].major = 1;
attr[p][POOL_ATTR_CREATE].incompat_features = 2;
attr[p][POOL_ATTR_CREATE].compat_features = 0;
attr[p][POOL_ATTR_SET_ATTR].major = 1;
attr[p][POOL_ATTR_SET_ATTR].incompat_features = 2;
attr[p][POOL_ATTR_SET_ATTR].compat_features = 0;
/* create pool */
prp[p] = rpmemd_db_pool_create(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_CREATE]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CREATED;
}
/* open pools and check pool attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_CREATE],
&attr[p][POOL_ATTR_OPEN]);
}
/* set attributes and close pools */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
ret = rpmemd_db_pool_set_attr(prp[p],
&attr[p][POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_set_attr",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
/* open pools and check attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_SET_ATTR],
&attr[p][POOL_ATTR_OPEN]);
}
err:
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
if (pool_state[p] == POOL_STATE_OPENED) {
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
if (pool_state[p] == POOL_STATE_CREATED) {
ret = rpmemd_db_pool_remove(db, pool_desc[p], 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove",
pool_desc[p]);
}
pool_state[p] = POOL_STATE_REMOVED;
}
}
rpmemd_db_fini(db);
return ret;
}
static int
exists_cb(struct part_file *pf, void *arg)
{
return util_file_exists(pf->part->path);
}
static int
noexists_cb(struct part_file *pf, void *arg)
{
int exists = util_file_exists(pf->part->path);
if (exists < 0)
return -1;
else
return !exists;
}
/*
* test_remove -- test for rpmemd_db_pool_remove()
*/
static void
test_remove(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret;
char path[PATH_MAX];
SNPRINTF(path, PATH_MAX, "%s/%s", root_dir, pool_desc);
fill_rand(&attr, sizeof(attr));
strncpy((char *)attr.poolset_uuid, "TEST", sizeof(attr.poolset_uuid));
attr.incompat_features = 2;
attr.compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
UT_ASSERTne(db, NULL);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = util_poolset_foreach_part(path, exists_cb, NULL);
UT_ASSERTeq(ret, 1);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
UT_ASSERTeq(ret, 0);
ret = util_poolset_foreach_part(path, noexists_cb, NULL);
UT_ASSERTeq(ret, 1);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 1);
UT_ASSERTeq(ret, 0);
ret = util_file_exists(path);
UT_ASSERTne(ret, 1);
rpmemd_db_fini(db);
}
int
main(int argc, char *argv[])
{
char *pool_desc[2], *log_file;
char root_dir[PATH_MAX];
START(argc, argv, "rpmemd_db");
util_init();
out_init("rpmemd_db", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0);
if (argc != 5)
UT_FATAL("usage: %s <log-file> <root_dir> <pool_desc_1>"
" <pool_desc_2>", argv[0]);
log_file = argv[1];
if (realpath(argv[2], root_dir) == NULL)
UT_FATAL("!realpath(%s)", argv[1]);
pool_desc[0] = argv[3];
pool_desc[1] = argv[4];
if (rpmemd_log_init("rpmemd error: ", log_file, 0))
FAILED_FUNC("rpmemd_log_init");
test_init(root_dir);
test_check_dir(root_dir);
test_create(root_dir, pool_desc[0]);
test_create_dual(root_dir, pool_desc[0], pool_desc[1]);
test_open(root_dir, pool_desc[0]);
test_open_dual(root_dir, pool_desc[0], pool_desc[1]);
test_set_attr(root_dir, pool_desc[0]);
test_set_attr_dual(root_dir, pool_desc[0], pool_desc[1]);
test_remove(root_dir, pool_desc[0]);
rpmemd_log_close();
out_fini();
DONE(NULL);
}
| 15,339 | 22.636364 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity/pmem2_granularity.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_granularity.c -- test for graunlarity functionality
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
#include "source.h"
#include "pmem2_granularity.h"
#include "unittest.h"
#include "ut_pmem2_config.h"
#include "ut_pmem2_utils.h"
#include "out.h"
size_t Is_nfit = 1;
size_t Pc_type = 7;
size_t Pc_capabilities;
/*
* parse_args -- parse args from the input
*/
static int
parse_args(const struct test_case *tc, int argc, char *argv[],
char **file)
{
if (argc < 1)
UT_FATAL("usage: %s <file>", tc->name);
*file = argv[0];
return 1;
}
/*
* set_eadr -- set variable required for mocked functions
*/
static void
set_eadr()
{
int is_eadr = atoi(os_getenv("IS_EADR"));
if (is_eadr)
Pc_capabilities = 3;
else
Pc_capabilities = 2;
}
/*
* test_ctx -- essential parameters used by test
*/
struct test_ctx {
int fd;
enum pmem2_granularity requested_granularity;
enum pmem2_granularity expected_granularity;
};
/*
* init_test -- initialize basic parameters for test
*/
static void
init_test(char *file, struct test_ctx *ctx,
enum pmem2_granularity granularity)
{
set_eadr();
ctx->fd = OPEN(file, O_RDWR);
ctx->requested_granularity = granularity;
int is_eadr = atoi(os_getenv("IS_EADR"));
int is_pmem = atoi(os_getenv("IS_PMEM"));
if (is_eadr) {
if (is_pmem)
ctx->expected_granularity = PMEM2_GRANULARITY_BYTE;
else
UT_FATAL("invalid configuration IS_EADR && !IS_PMEM");
} else if (is_pmem) {
ctx->expected_granularity = PMEM2_GRANULARITY_CACHE_LINE;
} else {
ctx->expected_granularity = PMEM2_GRANULARITY_PAGE;
}
}
/*
* init_cfg -- initialize basic pmem2 config
*/
static void
init_cfg(struct pmem2_config *cfg,
struct pmem2_source **src, struct test_ctx *ctx)
{
pmem2_config_init(cfg);
int ret = pmem2_source_from_fd(src, ctx->fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
}
/*
* cleanup -- cleanup the environment after test
*/
static void
cleanup(struct pmem2_source *src, struct test_ctx *ctx)
{
#ifdef _WIN32
CloseHandle(src->value.handle);
#else
CLOSE(ctx->fd);
#endif
}
/*
* map_with_available_granularity -- map the range with valid granularity,
* includes cleanup
*/
static void
map_with_available_granularity(struct pmem2_config *cfg,
struct pmem2_source *src, struct test_ctx *ctx)
{
cfg->requested_max_granularity = ctx->requested_granularity;
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(map, NULL);
UT_ASSERTeq(ctx->expected_granularity,
pmem2_map_get_store_granularity(map));
/* cleanup after the test */
pmem2_unmap(&map);
}
/*
* map_with_unavailable_granularity -- map the range with invalid
* granularity (unsuccessful)
*/
static void
map_with_unavailable_granularity(struct pmem2_config *cfg,
struct pmem2_source *src, struct test_ctx *ctx)
{
cfg->requested_max_granularity = ctx->requested_granularity;
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
UT_ERR("%s", pmem2_errormsg());
UT_ASSERTeq(map, NULL);
}
typedef void(*map_func)(struct pmem2_config *cfg,
struct pmem2_source *src, struct test_ctx *ctx);
/*
* granularity_template -- template for testing granularity in pmem2
*/
static int
granularity_template(const struct test_case *tc, int argc, char *argv[],
map_func map_do, enum pmem2_granularity granularity)
{
char *file = NULL;
int ret = parse_args(tc, argc, argv, &file);
struct test_ctx ctx = { 0 };
init_test(file, &ctx, granularity);
struct pmem2_config cfg;
struct pmem2_source *src;
init_cfg(&cfg, &src, &ctx);
map_do(&cfg, src, &ctx);
cleanup(src, &ctx);
pmem2_source_delete(&src);
return ret;
}
/*
* test_granularity_req_byte_avail_byte -- require byte granularity,
* when byte granularity is available
*/
static int
test_granularity_req_byte_avail_byte(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_BYTE);
}
/*
* test_granularity_req_byte_avail_cl -- require byte granularity,
* when cache line granularity is available
*/
static int
test_granularity_req_byte_avail_cl(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE);
}
/*
* test_granularity_req_byte_avail_page -- require byte granularity,
* when page granularity is available
*/
static int
test_granularity_req_byte_avail_page(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE);
}
/*
* test_granularity_req_cl_avail_byte -- require cache line granularity,
* when byte granularity is available
*/
static int
test_granularity_req_cl_avail_byte(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE);
}
/*
* test_granularity_req_cl_avail_cl -- require cache line granularity,
* when cache line granularity is available
*/
static int
test_granularity_req_cl_avail_cl(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE);
}
/*
* test_granularity_req_cl_avail_page -- require cache line granularity,
* when page granularity is available
*/
static int
test_granularity_req_cl_avail_page(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_unavailable_granularity, PMEM2_GRANULARITY_CACHE_LINE);
}
/*
* test_granularity_req_page_avail_byte -- require page granularity,
* when byte granularity is available
*/
static int
test_granularity_req_page_avail_byte(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_PAGE);
}
/*
* test_granularity_req_byte_avail_cl -- require page granularity,
* when byte cache line is available
*/
static int
test_granularity_req_page_avail_cl(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_PAGE);
}
/*
* test_granularity_req_page_avail_page -- require page granularity,
* when page granularity is available
*/
static int
test_granularity_req_page_avail_page(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_PAGE);
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_granularity_req_byte_avail_byte),
TEST_CASE(test_granularity_req_byte_avail_cl),
TEST_CASE(test_granularity_req_byte_avail_page),
TEST_CASE(test_granularity_req_cl_avail_byte),
TEST_CASE(test_granularity_req_cl_avail_cl),
TEST_CASE(test_granularity_req_cl_avail_page),
TEST_CASE(test_granularity_req_page_avail_byte),
TEST_CASE(test_granularity_req_page_avail_cl),
TEST_CASE(test_granularity_req_page_avail_page),
};
#define NTESTS ARRAY_SIZE(test_cases)
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_granularity");
out_init("pmem2_granularity", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
#ifdef _MSC_VER
MSVC_CONSTR(libpmem2_init)
MSVC_DESTR(libpmem2_fini)
#endif
| 7,665 | 23.106918 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import os
from enum import Enum
import testframework as t
from testframework import granularity as g
class Granularity(Enum):
BYTE = 1
CACHE_LINE = 2
PAGE = 3
@g.require_granularity(g.ANY)
class PMEM2_GRANULARITY(t.BaseTest):
test_type = t.Short
available_granularity = None
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile1')
bus_dev_path_without_eADR = os.path.join(
self.cwd,
"linux_eadr_paths/eadr_not_available/sys/bus/nd/devices/")
bus_dev_path_with_eADR = os.path.join(
self.cwd,
"linux_eadr_paths/eadr_available/sys/bus/nd/devices/")
# Testframework may set this variable to emulate the certain type of
# granularity.
# This test mocks all granularity checks but they are skipped if
# granularity is forced so this test requires unforced granularity.
ctx.env['PMEM2_FORCE_GRANULARITY'] = '0'
if self.available_granularity == Granularity.BYTE:
ctx.env['IS_EADR'] = '1'
ctx.env['IS_PMEM'] = '1'
ctx.env['BUS_DEVICE_PATH'] = bus_dev_path_with_eADR
elif self.available_granularity == Granularity.CACHE_LINE:
ctx.env['IS_EADR'] = '0'
ctx.env['IS_PMEM'] = '1'
ctx.env['BUS_DEVICE_PATH'] = bus_dev_path_without_eADR
elif self.available_granularity == Granularity.PAGE:
ctx.env['IS_EADR'] = '0'
ctx.env['IS_PMEM'] = '0'
ctx.env['BUS_DEVICE_PATH'] = bus_dev_path_without_eADR
ctx.exec('pmem2_granularity', self.test_case, filepath)
class TEST0(PMEM2_GRANULARITY):
"""pass byte granularity, available byte granularity"""
test_case = "test_granularity_req_byte_avail_byte"
available_granularity = Granularity.BYTE
@t.windows_only
class TEST1(PMEM2_GRANULARITY):
"""pass byte granularity, available cache line granularity"""
test_case = "test_granularity_req_byte_avail_cl"
available_granularity = Granularity.CACHE_LINE
@t.linux_only
class TEST2(PMEM2_GRANULARITY):
"""pass byte granularity, available cache line granularity"""
test_case = "test_granularity_req_byte_avail_cl"
available_granularity = Granularity.CACHE_LINE
@t.windows_only
class TEST3(PMEM2_GRANULARITY):
"""pass byte granularity, available page granularity"""
test_case = "test_granularity_req_byte_avail_page"
available_granularity = Granularity.PAGE
@t.linux_only
class TEST4(PMEM2_GRANULARITY):
"""pass byte granularity, available page granularity"""
test_case = "test_granularity_req_byte_avail_page"
available_granularity = Granularity.PAGE
@t.freebsd_only
class TEST5(PMEM2_GRANULARITY):
"""pass byte granularity, available page granularity"""
test_case = "test_granularity_req_byte_avail_page"
available_granularity = Granularity.PAGE
class TEST6(PMEM2_GRANULARITY):
"""pass cache line granularity, available byte granularity"""
test_case = "test_granularity_req_cl_avail_byte"
available_granularity = Granularity.BYTE
class TEST7(PMEM2_GRANULARITY):
"""pass cache line granularity, available cache line granularity"""
test_case = "test_granularity_req_cl_avail_cl"
available_granularity = Granularity.CACHE_LINE
@t.windows_only
class TEST8(PMEM2_GRANULARITY):
"""pass cache line granularity, available page granularity"""
test_case = "test_granularity_req_cl_avail_page"
available_granularity = Granularity.PAGE
@t.linux_only
class TEST9(PMEM2_GRANULARITY):
"""pass cache line granularity, available page granularity"""
test_case = "test_granularity_req_cl_avail_page"
available_granularity = Granularity.PAGE
@t.freebsd_only
class TEST10(PMEM2_GRANULARITY):
"""pass cache line granularity, available page granularity"""
test_case = "test_granularity_req_cl_avail_page"
available_granularity = Granularity.PAGE
class TEST11(PMEM2_GRANULARITY):
"""pass page granularity, available byte granularity"""
test_case = "test_granularity_req_page_avail_byte"
available_granularity = Granularity.BYTE
class TEST12(PMEM2_GRANULARITY):
"""pass page granularity, available cache line granularity"""
test_case = "test_granularity_req_page_avail_cl"
available_granularity = Granularity.CACHE_LINE
class TEST13(PMEM2_GRANULARITY):
"""pass page granularity, available page granularity"""
test_case = "test_granularity_req_page_avail_page"
available_granularity = Granularity.PAGE
| 4,642 | 30.585034 | 76 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity/mocks_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* mocks_posix.c -- mocked functions used in auto_flush_linux.c
*/
#include <fts.h>
#include "map.h"
#include "../common/mmap.h"
#include "fs.h"
#include "unittest.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
/*
* mmap - mock mmap
*/
FUNC_MOCK(mmap, void *, void *addr, size_t len, int prot,
int flags, int fd, __off_t offset)
FUNC_MOCK_RUN_DEFAULT {
char *str_map_sync = os_getenv("IS_PMEM");
const int ms = MAP_SYNC | MAP_SHARED_VALIDATE;
int map_sync_try = ((flags & ms) == ms) ? 1 : 0;
if (str_map_sync && atoi(str_map_sync) == 1) {
if (map_sync_try) {
flags &= ~ms;
flags |= MAP_SHARED;
return _FUNC_REAL(mmap)(addr, len, prot, flags,
fd, offset);
}
} else if (map_sync_try) {
errno = EINVAL;
return MAP_FAILED;
}
return _FUNC_REAL(mmap)(addr, len, prot, flags, fd, offset);
}
FUNC_MOCK_END
/*
* open -- open mock
*/
FUNC_MOCK(open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH);
if (!is_bus_device_path ||
(is_bus_device_path && strstr(path, "region")))
return _FUNC_REAL(open)(path, flags, mode);
const char *mock_path = os_getenv("BUS_DEVICE_PATH");
return _FUNC_REAL(open)(mock_path, flags, mode);
}
FUNC_MOCK_END
struct fs {
FTS *ft;
struct fs_entry entry;
};
/*
* fs_new -- creates fs traversal instance
*/
FUNC_MOCK(fs_new, struct fs *, const char *path)
FUNC_MOCK_RUN_DEFAULT {
char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH);
if (!is_bus_device_path ||
(is_bus_device_path && strstr(path, "region")))
return _FUNC_REAL(fs_new)(path);
const char *mock_path = os_getenv("BUS_DEVICE_PATH");
return _FUNC_REAL(fs_new)(mock_path);
}
FUNC_MOCK_END
/*
* os_stat -- os_stat mock to handle sysfs path
*/
FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf)
FUNC_MOCK_RUN_DEFAULT {
char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH);
if (!is_bus_device_path ||
(is_bus_device_path && strstr(path, "region")))
return _FUNC_REAL(os_stat)(path, buf);
const char *mock_path = os_getenv("BUS_DEVICE_PATH");
return _FUNC_REAL(os_stat)(mock_path, buf);
}
FUNC_MOCK_END
| 2,302 | 23.5 | 63 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.