code
stringlengths 12
2.05k
| label_name
stringclasses 5
values | label
int64 0
4
|
---|---|---|
do_prefetch_tables (const void *gcmM, size_t gcmM_size)
{
prefetch_table(gcmM, gcmM_size);
prefetch_table(gcmR, sizeof(gcmR));
}
|
Class
| 2 |
newkeys_from_blob(struct sshbuf *m, struct ssh *ssh, int mode)
{
struct sshbuf *b = NULL;
struct sshcomp *comp;
struct sshenc *enc;
struct sshmac *mac;
struct newkeys *newkey = NULL;
size_t keylen, ivlen, maclen;
int r;
if ((newkey = calloc(1, sizeof(*newkey))) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if ((r = sshbuf_froms(m, &b)) != 0)
goto out;
#ifdef DEBUG_PK
sshbuf_dump(b, stderr);
#endif
enc = &newkey->enc;
mac = &newkey->mac;
comp = &newkey->comp;
if ((r = sshbuf_get_cstring(b, &enc->name, NULL)) != 0 ||
(r = sshbuf_get(b, &enc->cipher, sizeof(enc->cipher))) != 0 ||
(r = sshbuf_get_u32(b, (u_int *)&enc->enabled)) != 0 ||
(r = sshbuf_get_u32(b, &enc->block_size)) != 0 ||
(r = sshbuf_get_string(b, &enc->key, &keylen)) != 0 ||
(r = sshbuf_get_string(b, &enc->iv, &ivlen)) != 0)
goto out;
if (cipher_authlen(enc->cipher) == 0) {
if ((r = sshbuf_get_cstring(b, &mac->name, NULL)) != 0)
goto out;
if ((r = mac_setup(mac, mac->name)) != 0)
goto out;
if ((r = sshbuf_get_u32(b, (u_int *)&mac->enabled)) != 0 ||
(r = sshbuf_get_string(b, &mac->key, &maclen)) != 0)
goto out;
if (maclen > mac->key_len) {
r = SSH_ERR_INVALID_FORMAT;
goto out;
}
mac->key_len = maclen;
}
if ((r = sshbuf_get_u32(b, &comp->type)) != 0 ||
(r = sshbuf_get_u32(b, (u_int *)&comp->enabled)) != 0 ||
(r = sshbuf_get_cstring(b, &comp->name, NULL)) != 0)
goto out;
if (enc->name == NULL ||
cipher_by_name(enc->name) != enc->cipher) {
r = SSH_ERR_INVALID_FORMAT;
goto out;
}
if (sshbuf_len(b) != 0) {
r = SSH_ERR_INVALID_FORMAT;
goto out;
}
enc->key_len = keylen;
enc->iv_len = ivlen;
ssh->kex->newkeys[mode] = newkey;
newkey = NULL;
r = 0;
out:
free(newkey);
sshbuf_free(b);
return r;
}
|
Class
| 2 |
_pickle_UnpicklerMemoProxy_copy_impl(UnpicklerMemoProxyObject *self)
/*[clinic end generated code: output=e12af7e9bc1e4c77 input=97769247ce032c1d]*/
{
Py_ssize_t i;
PyObject *new_memo = PyDict_New();
if (new_memo == NULL)
return NULL;
for (i = 0; i < self->unpickler->memo_size; i++) {
int status;
PyObject *key, *value;
value = self->unpickler->memo[i];
if (value == NULL)
continue;
key = PyLong_FromSsize_t(i);
if (key == NULL)
goto error;
status = PyDict_SetItem(new_memo, key, value);
Py_DECREF(key);
if (status < 0)
goto error;
}
return new_memo;
error:
Py_DECREF(new_memo);
return NULL;
}
|
Base
| 1 |
static inline LineContribType * _gdContributionsAlloc(unsigned int line_length, unsigned int windows_size)
{
unsigned int u = 0;
LineContribType *res;
int overflow_error = 0;
res = (LineContribType *) gdMalloc(sizeof(LineContribType));
if (!res) {
return NULL;
}
res->WindowSize = windows_size;
res->LineLength = line_length;
if (overflow2(line_length, sizeof(ContributionType))) {
gdFree(res);
return NULL;
}
res->ContribRow = (ContributionType *) gdMalloc(line_length * sizeof(ContributionType));
if (res->ContribRow == NULL) {
gdFree(res);
return NULL;
}
for (u = 0 ; u < line_length ; u++) {
if (overflow2(windows_size, sizeof(double))) {
overflow_error = 1;
} else {
res->ContribRow[u].Weights = (double *) gdMalloc(windows_size * sizeof(double));
}
if (overflow_error == 1 || res->ContribRow[u].Weights == NULL) {
unsigned int i;
u--;
for (i=0;i<=u;i++) {
gdFree(res->ContribRow[i].Weights);
}
gdFree(res->ContribRow);
gdFree(res);
return NULL;
}
}
return res;
}
|
Base
| 1 |
void snd_msndmidi_input_read(void *mpuv)
{
unsigned long flags;
struct snd_msndmidi *mpu = mpuv;
void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
spin_lock_irqsave(&mpu->input_lock, flags);
while (readw(mpu->dev->MIDQ + JQS_wTail) !=
readw(mpu->dev->MIDQ + JQS_wHead)) {
u16 wTmp, val;
val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
&mpu->mode))
snd_rawmidi_receive(mpu->substream_input,
(unsigned char *)&val, 1);
wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
writew(0, mpu->dev->MIDQ + JQS_wHead);
else
writew(wTmp, mpu->dev->MIDQ + JQS_wHead);
}
spin_unlock_irqrestore(&mpu->input_lock, flags);
}
|
Base
| 1 |
void trustedBlsSignMessageAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey,
uint32_t enc_len, char *_hashX,
char *_hashY, char *signature) {
LOG_DEBUG(__FUNCTION__);
INIT_ERROR_STATE
CHECK_STATE(encryptedPrivateKey);
CHECK_STATE(_hashX);
CHECK_STATE(_hashY);
CHECK_STATE(signature);
SAFE_CHAR_BUF(key, BUF_LEN);SAFE_CHAR_BUF(sig, BUF_LEN);
int status = AES_decrypt(encryptedPrivateKey, enc_len, key, BUF_LEN);
CHECK_STATUS("AES decrypt failed")
if (!enclave_sign(key, _hashX, _hashY, sig)) {
strncpy(errString, "Enclave failed to create bls signature", BUF_LEN);
LOG_ERROR(errString);
*errStatus = -1;
goto clean;
}
strncpy(signature, sig, BUF_LEN);
if (strnlen(signature, BUF_LEN) < 10) {
strncpy(errString, "Signature too short", BUF_LEN);
LOG_ERROR(errString);
*errStatus = -1;
goto clean;
}
SET_SUCCESS
LOG_DEBUG("SGX call completed");
clean:
;
LOG_DEBUG("SGX call completed");
}
|
Base
| 1 |
static void ext4_clamp_want_extra_isize(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
sbi->s_want_extra_isize == 0) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (ext4_has_feature_extra_isize(sb)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_want_extra_isize);
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_min_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_min_extra_isize);
}
}
/* Check if enough inode space is available */
if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
sbi->s_inode_size) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
ext4_msg(sb, KERN_INFO,
"required extra inode space not available");
}
}
|
Variant
| 0 |
static void nalm_dump(FILE * trace, char *data, u32 data_size)
{
GF_BitStream *bs;
Bool rle, large_size;
u32 entry_count;
if (!data) {
fprintf(trace, "<NALUMap rle=\"\" large_size=\"\">\n");
fprintf(trace, "<NALUMapEntry NALU_startNumber=\"\" groupID=\"\"/>\n");
fprintf(trace, "</NALUMap>\n");
return;
}
bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ);
gf_bs_read_int(bs, 6);
large_size = gf_bs_read_int(bs, 1);
rle = gf_bs_read_int(bs, 1);
entry_count = gf_bs_read_int(bs, large_size ? 16 : 8);
fprintf(trace, "<NALUMap rle=\"%d\" large_size=\"%d\">\n", rle, large_size);
while (entry_count) {
u32 ID;
fprintf(trace, "<NALUMapEntry ");
if (rle) {
u32 start_num = gf_bs_read_int(bs, large_size ? 16 : 8);
fprintf(trace, "NALU_startNumber=\"%d\" ", start_num);
}
ID = gf_bs_read_u16(bs);
fprintf(trace, "groupID=\"%d\"/>\n", ID);
entry_count--;
}
gf_bs_del(bs);
fprintf(trace, "</NALUMap>\n");
return;
}
|
Base
| 1 |
table_regex_match(const char *string, const char *pattern)
{
regex_t preg;
int cflags = REG_EXTENDED|REG_NOSUB;
if (strncmp(pattern, "(?i)", 4) == 0) {
cflags |= REG_ICASE;
pattern += 4;
}
if (regcomp(&preg, pattern, cflags) != 0)
return (0);
if (regexec(&preg, string, 0, NULL, 0) != 0)
return (0);
return (1);
}
|
Variant
| 0 |
int wait_for_key_construction(struct key *key, bool intr)
{
int ret;
ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret)
return -ERESTARTSYS;
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
smp_rmb();
return key->reject_error;
}
return key_validate(key);
}
|
Class
| 2 |
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len)
{
size_t cipher_len;
size_t i;
unsigned char iv[16] = { 0 };
unsigned char plaintext[4096] = { 0 };
epass2003_exdata *exdata = NULL;
if (!card->drv_data)
return SC_ERROR_INVALID_ARGUMENTS;
exdata = (epass2003_exdata *)card->drv_data;
/* no cipher */
if (in[0] == 0x99)
return 0;
/* parse cipher length */
if (0x01 == in[2] && 0x82 != in[1]) {
cipher_len = in[1];
i = 3;
}
else if (0x01 == in[3] && 0x81 == in[1]) {
cipher_len = in[2];
i = 4;
}
else if (0x01 == in[4] && 0x82 == in[1]) {
cipher_len = in[2] * 0x100;
cipher_len += in[3];
i = 5;
}
else {
return -1;
}
if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext)
return -1;
/* decrypt */
if (KEY_TYPE_AES == exdata->smtype)
aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
else
des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext);
/* unpadding */
while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0))
cipher_len--;
if (2 == cipher_len)
return -1;
memcpy(out, plaintext, cipher_len - 2);
*out_len = cipher_len - 2;
return 0;
}
|
Variant
| 0 |
ber_parse_header(STREAM s, int tagval, int *length)
{
int tag, len;
if (tagval > 0xff)
{
in_uint16_be(s, tag);
}
else
{
in_uint8(s, tag);
}
if (tag != tagval)
{
logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag);
return False;
}
in_uint8(s, len);
if (len & 0x80)
{
len &= ~0x80;
*length = 0;
while (len--)
next_be(s, *length);
}
else
*length = len;
return s_check(s);
}
|
Base
| 1 |
int ecall_answer(struct ecall *ecall, enum icall_call_type call_type,
bool audio_cbr)
{
int err = 0;
if (!ecall)
return EINVAL;
#ifdef ECALL_CBR_ALWAYS_ON
audio_cbr = true;
#endif
info("ecall(%p): answer on pending econn %p call_type=%d\n", ecall, ecall->econn, call_type);
if (!ecall->econn) {
warning("ecall: answer: econn does not exist!\n");
return ENOENT;
}
if (ECONN_PENDING_INCOMING != econn_current_state(ecall->econn)) {
info("ecall(%p): answer: invalid state (%s)\n", ecall,
econn_state_name(econn_current_state(ecall->econn)));
return EPROTO;
}
if (!ecall->flow) {
warning("ecall: answer: no mediaflow\n");
return EPROTO;
}
ecall->call_type = call_type;
IFLOW_CALL(ecall->flow, set_call_type, call_type);
ecall->audio_cbr = audio_cbr;
IFLOW_CALL(ecall->flow, set_audio_cbr, audio_cbr);
#if 0
if (ecall->props_local) {
const char *vstate_string =
call_type == ICALL_CALL_TYPE_VIDEO ? "true" : "false";
int err2 = econn_props_update(ecall->props_local, "videosend", vstate_string);
if (err2) {
warning("ecall(%p): econn_props_update(videosend)",
" failed (%m)\n", ecall, err2);
/* Non fatal, carry on */
}
}
#endif
err = generate_or_gather_answer(ecall, ecall->econn);
if (err) {
warning("ecall: answer: failed to gather_or_answer\n");
goto out;
}
ecall->answered = true;
ecall->audio_setup_time = -1;
ecall->call_estab_time = -1;
ecall->ts_answered = tmr_jiffies();
out:
return err;
}
|
Base
| 1 |
R_API RConfigNode* r_config_set(RConfig *cfg, const char *name, const char *value) {
RConfigNode *node = NULL;
char *ov = NULL;
ut64 oi;
if (!cfg || STRNULL (name)) {
return NULL;
}
node = r_config_node_get (cfg, name);
if (node) {
if (node->flags & CN_RO) {
eprintf ("(error: '%s' config key is read only)\n", name);
return node;
}
oi = node->i_value;
if (node->value) {
ov = strdup (node->value);
if (!ov) {
goto beach;
}
} else {
free (node->value);
node->value = strdup ("");
}
if (node->flags & CN_BOOL) {
bool b = is_true (value);
node->i_value = (ut64) b? 1: 0;
char *value = strdup (r_str_bool (b));
if (value) {
free (node->value);
node->value = value;
}
} else {
if (!value) {
free (node->value);
node->value = strdup ("");
node->i_value = 0;
} else {
if (node->value == value) {
goto beach;
}
free (node->value);
node->value = strdup (value);
if (IS_DIGIT (*value)) {
if (strchr (value, '/')) {
node->i_value = r_num_get (cfg->num, value);
} else {
node->i_value = r_num_math (cfg->num, value);
}
} else {
node->i_value = 0;
}
node->flags |= CN_INT;
}
}
} else { // Create a new RConfigNode
oi = UT64_MAX;
if (!cfg->lock) {
node = r_config_node_new (name, value);
if (node) {
if (value && is_bool (value)) {
node->flags |= CN_BOOL;
node->i_value = is_true (value)? 1: 0;
}
if (cfg->ht) {
ht_insert (cfg->ht, node->name, node);
r_list_append (cfg->nodes, node);
cfg->n_nodes++;
}
} else {
eprintf ("r_config_set: unable to create a new RConfigNode\n");
}
} else {
eprintf ("r_config_set: variable '%s' not found\n", name);
}
}
if (node && node->setter) {
int ret = node->setter (cfg->user, node);
if (ret == false) {
if (oi != UT64_MAX) {
node->i_value = oi;
}
free (node->value);
node->value = strdup (ov? ov: "");
}
}
beach:
free (ov);
return node;
}
|
Variant
| 0 |
X509_NAME_oneline_ex(X509_NAME * a,
char *buf,
unsigned int *size,
unsigned long flag)
{
BIO *out = NULL;
out = BIO_new(BIO_s_mem ());
if (X509_NAME_print_ex(out, a, 0, flag) > 0) {
if (buf != NULL && (*size) > (unsigned int) BIO_number_written(out)) {
memset(buf, 0, *size);
BIO_read(out, buf, (int) BIO_number_written(out));
}
else {
*size = BIO_number_written(out);
}
}
BIO_free(out);
return (buf);
}
|
Class
| 2 |
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
msg->msg_namelen = 0;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
|
Class
| 2 |
static bool cgm_bind_dir(const char *root, const char *dirname)
{
nih_local char *cgpath = NULL;
/* /sys should have been mounted by now */
cgpath = NIH_MUST( nih_strdup(NULL, root) );
NIH_MUST( nih_strcat(&cgpath, NULL, "/sys/fs/cgroup") );
if (!dir_exists(cgpath)) {
ERROR("%s does not exist", cgpath);
return false;
}
/* mount a tmpfs there so we can create subdirs */
if (mount("cgroup", cgpath, "tmpfs", 0, "size=10000,mode=755")) {
SYSERROR("Failed to mount tmpfs at %s", cgpath);
return false;
}
NIH_MUST( nih_strcat(&cgpath, NULL, "/cgmanager") );
if (mkdir(cgpath, 0755) < 0) {
SYSERROR("Failed to create %s", cgpath);
return false;
}
if (mount(dirname, cgpath, "none", MS_BIND, 0)) {
SYSERROR("Failed to bind mount %s to %s", dirname, cgpath);
return false;
}
return true;
}
|
Base
| 1 |
static void async_polkit_query_free(AsyncPolkitQuery *q) {
if (!q)
return;
sd_bus_slot_unref(q->slot);
if (q->registry && q->request)
hashmap_remove(q->registry, q->request);
sd_bus_message_unref(q->request);
sd_bus_message_unref(q->reply);
free(q->action);
strv_free(q->details);
free(q);
}
|
Variant
| 0 |
static int fill_autodev(const struct lxc_rootfs *rootfs)
{
int ret;
char path[MAXPATHLEN];
int i;
mode_t cmask;
INFO("Creating initial consoles under container /dev");
ret = snprintf(path, MAXPATHLEN, "%s/dev", rootfs->path ? rootfs->mount : "");
if (ret < 0 || ret >= MAXPATHLEN) {
ERROR("Error calculating container /dev location");
return -1;
}
if (!dir_exists(path)) // ignore, just don't try to fill in
return 0;
INFO("Populating container /dev");
cmask = umask(S_IXUSR | S_IXGRP | S_IXOTH);
for (i = 0; i < sizeof(lxc_devs) / sizeof(lxc_devs[0]); i++) {
const struct lxc_devs *d = &lxc_devs[i];
ret = snprintf(path, MAXPATHLEN, "%s/dev/%s", rootfs->path ? rootfs->mount : "", d->name);
if (ret < 0 || ret >= MAXPATHLEN)
return -1;
ret = mknod(path, d->mode, makedev(d->maj, d->min));
if (ret && errno != EEXIST) {
char hostpath[MAXPATHLEN];
FILE *pathfile;
// Unprivileged containers cannot create devices, so
// bind mount the device from the host
ret = snprintf(hostpath, MAXPATHLEN, "/dev/%s", d->name);
if (ret < 0 || ret >= MAXPATHLEN)
return -1;
pathfile = fopen(path, "wb");
if (!pathfile) {
SYSERROR("Failed to create device mount target '%s'", path);
return -1;
}
fclose(pathfile);
if (mount(hostpath, path, 0, MS_BIND, NULL) != 0) {
SYSERROR("Failed bind mounting device %s from host into container",
d->name);
return -1;
}
}
}
umask(cmask);
INFO("Populated container /dev");
return 0;
}
|
Base
| 1 |
u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3)
{
u32 val=0, code;
s32 nb_lead = -1;
u32 bits = 0;
for (code=0; !code; nb_lead++) {
if (nb_lead>=32) {
//gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp)
//we only test once nb_lead>=32 to avoid testing at each bit read
if (!gf_bs_available(bs)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n"));
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead));
}
return 0;
}
code = gf_bs_read_int(bs, 1);
bits++;
}
if (nb_lead) {
val = gf_bs_read_int(bs, nb_lead);
val += (1 << nb_lead) - 1;
bits += nb_lead;
}
if (fname) {
gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3);
}
return val;
}
|
Base
| 1 |
horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc)
{
tmsize_t stride = PredictorState(tif)->stride;
unsigned char* cp = (unsigned char*) cp0;
assert((cc%stride)==0);
if (cc > stride) {
/*
* Pipeline the most common cases.
*/
if (stride == 3) {
unsigned int cr = cp[0];
unsigned int cg = cp[1];
unsigned int cb = cp[2];
cc -= 3;
cp += 3;
while (cc>0) {
cp[0] = (unsigned char) ((cr += cp[0]) & 0xff);
cp[1] = (unsigned char) ((cg += cp[1]) & 0xff);
cp[2] = (unsigned char) ((cb += cp[2]) & 0xff);
cc -= 3;
cp += 3;
}
} else if (stride == 4) {
unsigned int cr = cp[0];
unsigned int cg = cp[1];
unsigned int cb = cp[2];
unsigned int ca = cp[3];
cc -= 4;
cp += 4;
while (cc>0) {
cp[0] = (unsigned char) ((cr += cp[0]) & 0xff);
cp[1] = (unsigned char) ((cg += cp[1]) & 0xff);
cp[2] = (unsigned char) ((cb += cp[2]) & 0xff);
cp[3] = (unsigned char) ((ca += cp[3]) & 0xff);
cc -= 4;
cp += 4;
}
} else {
cc -= stride;
do {
REPEAT4(stride, cp[stride] =
(unsigned char) ((cp[stride] + *cp) & 0xff); cp++)
cc -= stride;
} while (cc>0);
}
}
}
|
Class
| 2 |
spnego_gss_inquire_context(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_name_t *src_name,
gss_name_t *targ_name,
OM_uint32 *lifetime_rec,
gss_OID *mech_type,
OM_uint32 *ctx_flags,
int *locally_initiated,
int *opened)
{
OM_uint32 ret = GSS_S_COMPLETE;
ret = gss_inquire_context(minor_status,
context_handle,
src_name,
targ_name,
lifetime_rec,
mech_type,
ctx_flags,
locally_initiated,
opened);
return (ret);
}
|
Base
| 1 |
int ip_options_get_from_user(struct net *net, struct ip_options **optp,
unsigned char __user *data, int optlen)
{
struct ip_options *opt = ip_options_get_alloc(optlen);
if (!opt)
return -ENOMEM;
if (optlen && copy_from_user(opt->__data, data, optlen)) {
kfree(opt);
return -EFAULT;
}
return ip_options_get_finish(net, optp, opt, optlen);
}
|
Class
| 2 |
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len)
{
u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
struct bpf_prog *prog_adj;
/* Since our patchlet doesn't expand the image, we're done. */
if (insn_delta == 0) {
memcpy(prog->insnsi + off, patch, sizeof(*patch));
return prog;
}
insn_adj_cnt = prog->len + insn_delta;
/* Several new instructions need to be inserted. Make room
* for them. Likely, there's no need for a new allocation as
* last page could have large enough tailroom.
*/
prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
GFP_USER);
if (!prog_adj)
return NULL;
prog_adj->len = insn_adj_cnt;
/* Patching happens in 3 steps:
*
* 1) Move over tail of insnsi from next instruction onwards,
* so we can patch the single target insn with one or more
* new ones (patching is always from 1 to n insns, n > 0).
* 2) Inject new instructions at the target location.
* 3) Adjust branch offsets if necessary.
*/
insn_rest = insn_adj_cnt - off - len;
memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
sizeof(*patch) * insn_rest);
memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
bpf_adj_branches(prog_adj, off, insn_delta);
return prog_adj;
}
|
Base
| 1 |
snmp_engine_get(snmp_header_t *header, snmp_varbind_t *varbinds, uint32_t varbinds_length)
{
snmp_mib_resource_t *resource;
uint32_t i;
for(i = 0; i < varbinds_length; i++) {
resource = snmp_mib_find(varbinds[i].oid);
if(!resource) {
switch(header->version) {
case SNMP_VERSION_1:
header->error_status_non_repeaters.error_status = SNMP_STATUS_NO_SUCH_NAME;
/*
* Varbinds are 1 indexed
*/
header->error_index_max_repetitions.error_index = i + 1;
break;
case SNMP_VERSION_2C:
(&varbinds[i])->value_type = SNMP_DATA_TYPE_NO_SUCH_INSTANCE;
break;
default:
header->error_status_non_repeaters.error_status = SNMP_STATUS_NO_SUCH_NAME;
header->error_index_max_repetitions.error_index = 0;
}
} else {
resource->handler(&varbinds[i], resource->oid);
}
}
return 0;
}
|
Base
| 1 |
MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms)
{
MemoryRegion *ram = g_new(MemoryRegion, 1);
memory_region_init_ram(uc, ram, size, perms);
if (ram->addr == -1) {
// out of memory
return NULL;
}
memory_region_add_subregion(uc->system_memory, begin, ram);
if (uc->cpu) {
tlb_flush(uc->cpu);
}
return ram;
}
|
Base
| 1 |
int gnutls_x509_ext_import_proxy(const gnutls_datum_t * ext, int *pathlen,
char **policyLanguage, char **policy,
size_t * sizeof_policy)
{
ASN1_TYPE c2 = ASN1_TYPE_EMPTY;
int result;
gnutls_datum_t value = { NULL, 0 };
if ((result = asn1_create_element
(_gnutls_get_pkix(), "PKIX1.ProxyCertInfo",
&c2)) != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(result);
}
result = _asn1_strict_der_decode(&c2, ext->data, ext->size, NULL);
if (result != ASN1_SUCCESS) {
gnutls_assert();
result = _gnutls_asn2err(result);
goto cleanup;
}
if (pathlen) {
result = _gnutls_x509_read_uint(c2, "pCPathLenConstraint",
(unsigned int *)
pathlen);
if (result == GNUTLS_E_ASN1_ELEMENT_NOT_FOUND)
*pathlen = -1;
else if (result != GNUTLS_E_SUCCESS) {
gnutls_assert();
result = _gnutls_asn2err(result);
goto cleanup;
}
}
result = _gnutls_x509_read_value(c2, "proxyPolicy.policyLanguage",
&value);
if (result < 0) {
gnutls_assert();
goto cleanup;
}
if (policyLanguage) {
*policyLanguage = (char *)value.data;
} else {
gnutls_free(value.data);
value.data = NULL;
}
result = _gnutls_x509_read_value(c2, "proxyPolicy.policy", &value);
if (result == GNUTLS_E_ASN1_ELEMENT_NOT_FOUND) {
if (policy)
*policy = NULL;
if (sizeof_policy)
*sizeof_policy = 0;
} else if (result < 0) {
gnutls_assert();
goto cleanup;
} else {
if (policy) {
*policy = (char *)value.data;
value.data = NULL;
}
if (sizeof_policy)
*sizeof_policy = value.size;
}
result = 0;
cleanup:
gnutls_free(value.data);
asn1_delete_structure(&c2);
return result;
}
|
Variant
| 0 |
njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval)
{
njs_int_t ret;
njs_value_t ctor;
njs_native_frame_t *frame;
njs_promise_capability_t *capability;
frame = vm->top_frame;
frame->retval = retval;
njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]);
capability = njs_promise_new_capability(vm, &ctor);
if (njs_slow_path(capability == NULL)) {
return NJS_ERROR;
}
frame->function->context = capability;
ret = njs_function_lambda_call(vm);
if (ret == NJS_OK) {
ret = njs_function_call(vm, njs_function(&capability->resolve),
&njs_value_undefined, retval, 1, &vm->retval);
} else if (ret == NJS_AGAIN) {
ret = NJS_OK;
} else if (ret == NJS_ERROR) {
if (njs_is_memory_error(vm, &vm->retval)) {
return NJS_ERROR;
}
ret = njs_function_call(vm, njs_function(&capability->reject),
&njs_value_undefined, &vm->retval, 1,
&vm->retval);
}
*retval = capability->promise;
return ret;
}
|
Variant
| 0 |
tok_new(void)
{
struct tok_state *tok = (struct tok_state *)PyMem_MALLOC(
sizeof(struct tok_state));
if (tok == NULL)
return NULL;
tok->buf = tok->cur = tok->end = tok->inp = tok->start = NULL;
tok->done = E_OK;
tok->fp = NULL;
tok->input = NULL;
tok->tabsize = TABSIZE;
tok->indent = 0;
tok->indstack[0] = 0;
tok->atbol = 1;
tok->pendin = 0;
tok->prompt = tok->nextprompt = NULL;
tok->lineno = 0;
tok->level = 0;
tok->altwarning = 1;
tok->alterror = 1;
tok->alttabsize = 1;
tok->altindstack[0] = 0;
tok->decoding_state = STATE_INIT;
tok->decoding_erred = 0;
tok->read_coding_spec = 0;
tok->enc = NULL;
tok->encoding = NULL;
tok->cont_line = 0;
#ifndef PGEN
tok->filename = NULL;
tok->decoding_readline = NULL;
tok->decoding_buffer = NULL;
#endif
tok->async_def = 0;
tok->async_def_indent = 0;
tok->async_def_nl = 0;
return tok;
}
|
Base
| 1 |
PJ_DEF(pj_status_t) pjmedia_rtcp_fb_parse_nack(
const void *buf,
pj_size_t length,
unsigned *nack_cnt,
pjmedia_rtcp_fb_nack nack[])
{
pjmedia_rtcp_common *hdr = (pjmedia_rtcp_common*) buf;
pj_uint8_t *p;
unsigned cnt, i;
PJ_ASSERT_RETURN(buf && nack_cnt && nack, PJ_EINVAL);
PJ_ASSERT_RETURN(length >= sizeof(pjmedia_rtcp_common), PJ_ETOOSMALL);
/* Generic NACK uses pt==RTCP_RTPFB and FMT==1 */
if (hdr->pt != RTCP_RTPFB || hdr->count != 1)
return PJ_ENOTFOUND;
cnt = pj_ntohs((pj_uint16_t)hdr->length);
if (cnt > 2) cnt -= 2; else cnt = 0;
if (length < (cnt+3)*4)
return PJ_ETOOSMALL;
*nack_cnt = PJ_MIN(*nack_cnt, cnt);
p = (pj_uint8_t*)hdr + sizeof(*hdr);
for (i = 0; i < *nack_cnt; ++i) {
pj_uint16_t val;
pj_memcpy(&val, p, 2);
nack[i].pid = pj_ntohs(val);
pj_memcpy(&val, p+2, 2);
nack[i].blp = pj_ntohs(val);
p += 4;
}
return PJ_SUCCESS;
}
|
Base
| 1 |
static void server_real_connect(SERVER_REC *server, IPADDR *ip,
const char *unix_socket)
{
GIOChannel *handle;
IPADDR *own_ip = NULL;
const char *errmsg;
char *errmsg2;
char ipaddr[MAX_IP_LEN];
int port;
g_return_if_fail(ip != NULL || unix_socket != NULL);
signal_emit("server connecting", 2, server, ip);
if (server->connrec->no_connect)
return;
if (ip != NULL) {
own_ip = ip == NULL ? NULL :
(IPADDR_IS_V6(ip) ? server->connrec->own_ip6 :
server->connrec->own_ip4);
port = server->connrec->proxy != NULL ?
server->connrec->proxy_port : server->connrec->port;
handle = server->connrec->use_ssl ?
net_connect_ip_ssl(ip, port, own_ip, server->connrec->ssl_cert, server->connrec->ssl_pkey,
server->connrec->ssl_cafile, server->connrec->ssl_capath, server->connrec->ssl_verify) :
net_connect_ip(ip, port, own_ip);
} else {
handle = net_connect_unix(unix_socket);
}
if (handle == NULL) {
/* failed */
errmsg = g_strerror(errno);
errmsg2 = NULL;
if (errno == EADDRNOTAVAIL) {
if (own_ip != NULL) {
/* show the IP which is causing the error */
net_ip2host(own_ip, ipaddr);
errmsg2 = g_strconcat(errmsg, ": ", ipaddr, NULL);
}
server->no_reconnect = TRUE;
}
if (server->connrec->use_ssl && errno == ENOSYS)
server->no_reconnect = TRUE;
server->connection_lost = TRUE;
server_connect_failed(server, errmsg2 ? errmsg2 : errmsg);
g_free(errmsg2);
} else {
server->handle = net_sendbuffer_create(handle, 0);
#ifdef HAVE_OPENSSL
if (server->connrec->use_ssl)
server_connect_callback_init_ssl(server, handle);
else
#endif
server->connect_tag =
g_input_add(handle, G_INPUT_WRITE | G_INPUT_READ,
(GInputFunction)
server_connect_callback_init,
server);
}
}
|
Class
| 2 |
static inline LineContribType *_gdContributionsCalc(unsigned int line_size, unsigned int src_size, double scale_d, const interpolation_method pFilter)
{
double width_d;
double scale_f_d = 1.0;
const double filter_width_d = DEFAULT_BOX_RADIUS;
int windows_size;
unsigned int u;
LineContribType *res;
if (scale_d < 1.0) {
width_d = filter_width_d / scale_d;
scale_f_d = scale_d;
} else {
width_d= filter_width_d;
}
windows_size = 2 * (int)ceil(width_d) + 1;
res = _gdContributionsAlloc(line_size, windows_size);
for (u = 0; u < line_size; u++) {
const double dCenter = (double)u / scale_d;
/* get the significant edge points affecting the pixel */
register int iLeft = MAX(0, (int)floor (dCenter - width_d));
int iRight = MIN((int)ceil(dCenter + width_d), (int)src_size - 1);
double dTotalWeight = 0.0;
int iSrc;
res->ContribRow[u].Left = iLeft;
res->ContribRow[u].Right = iRight;
/* Cut edge points to fit in filter window in case of spill-off */
if (iRight - iLeft + 1 > windows_size) {
if (iLeft < ((int)src_size - 1 / 2)) {
iLeft++;
} else {
iRight--;
}
}
for (iSrc = iLeft; iSrc <= iRight; iSrc++) {
dTotalWeight += (res->ContribRow[u].Weights[iSrc-iLeft] = scale_f_d * (*pFilter)(scale_f_d * (dCenter - (double)iSrc)));
}
if (dTotalWeight < 0.0) {
_gdContributionsFree(res);
return NULL;
}
if (dTotalWeight > 0.0) {
for (iSrc = iLeft; iSrc <= iRight; iSrc++) {
res->ContribRow[u].Weights[iSrc-iLeft] /= dTotalWeight;
}
}
}
return res;
}
|
Base
| 1 |
static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
{
uint8* bufp = buf;
int32 bytes_read = 0;
uint32 strip, nstrips = TIFFNumberOfStrips(in);
uint32 stripsize = TIFFStripSize(in);
uint32 rows = 0;
uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
tsize_t scanline_size = TIFFScanlineSize(in);
if (scanline_size == 0) {
TIFFError("", "TIFF scanline size is zero!");
return 0;
}
for (strip = 0; strip < nstrips; strip++) {
bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
rows = bytes_read / scanline_size;
if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize))
TIFFError("", "Strip %d: read %lu bytes, strip size %lu",
(int)strip + 1, (unsigned long) bytes_read,
(unsigned long)stripsize);
if (bytes_read < 0 && !ignore) {
TIFFError("", "Error reading strip %lu after %lu rows",
(unsigned long) strip, (unsigned long)rows);
return 0;
}
bufp += bytes_read;
}
return 1;
} /* end readContigStripsIntoBuffer */
|
Class
| 2 |
ikev1_sub_print(netdissect_options *ndo,
u_char np, const struct isakmp_gen *ext, const u_char *ep,
uint32_t phase, uint32_t doi, uint32_t proto, int depth)
{
const u_char *cp;
int i;
struct isakmp_gen e;
cp = (const u_char *)ext;
while (np) {
ND_TCHECK(*ext);
UNALIGNED_MEMCPY(&e, ext, sizeof(e));
ND_TCHECK2(*ext, ntohs(e.len));
depth++;
ND_PRINT((ndo,"\n"));
for (i = 0; i < depth; i++)
ND_PRINT((ndo," "));
ND_PRINT((ndo,"("));
cp = ike_sub0_print(ndo, np, ext, ep, phase, doi, proto, depth);
ND_PRINT((ndo,")"));
depth--;
if (cp == NULL) {
/* Zero-length subitem */
return NULL;
}
np = e.np;
ext = (const struct isakmp_gen *)cp;
}
return cp;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(np)));
return NULL;
}
|
Base
| 1 |
static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len)
{
struct ceph_x_authorizer *au = (void *)a;
struct ceph_x_ticket_handler *th;
int ret = 0;
struct ceph_x_authorize_reply reply;
void *p = au->reply_buf;
void *end = p + sizeof(au->reply_buf);
th = get_ticket_handler(ac, au->service);
if (IS_ERR(th))
return PTR_ERR(th);
ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
if (ret < 0)
return ret;
if (ret != sizeof(reply))
return -EPERM;
if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one))
ret = -EPERM;
else
ret = 0;
dout("verify_authorizer_reply nonce %llx got %llx ret %d\n",
au->nonce, le64_to_cpu(reply.nonce_plus_one), ret);
return ret;
}
|
Class
| 2 |
void user_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_instantiated(key))
seq_printf(m, ": %u", key->datalen);
}
|
Class
| 2 |
num_stmts(const node *n)
{
int i, l;
node *ch;
switch (TYPE(n)) {
case single_input:
if (TYPE(CHILD(n, 0)) == NEWLINE)
return 0;
else
return num_stmts(CHILD(n, 0));
case file_input:
l = 0;
for (i = 0; i < NCH(n); i++) {
ch = CHILD(n, i);
if (TYPE(ch) == stmt)
l += num_stmts(ch);
}
return l;
case stmt:
return num_stmts(CHILD(n, 0));
case compound_stmt:
return 1;
case simple_stmt:
return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */
case suite:
/* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */
if (NCH(n) == 1)
return num_stmts(CHILD(n, 0));
else {
i = 2;
l = 0;
if (TYPE(CHILD(n, 1)) == TYPE_COMMENT)
i += 2;
for (; i < (NCH(n) - 1); i++)
l += num_stmts(CHILD(n, i));
return l;
}
default: {
char buf[128];
sprintf(buf, "Non-statement found: %d %d",
TYPE(n), NCH(n));
Py_FatalError(buf);
}
}
assert(0);
return 0;
}
|
Base
| 1 |
bit_write_MC (Bit_Chain *dat, BITCODE_MC val)
{
int i, j;
int negative = 0;
unsigned char byte[5];
BITCODE_UMC mask = 0x0000007f;
BITCODE_UMC value = (BITCODE_UMC)val;
if (val < 0)
{
negative = 1;
value = (BITCODE_UMC)-val;
}
for (i = 4, j = 0; i >= 0; i--, j += 7)
{
byte[i] = (unsigned char)((value & mask) >> j);
byte[i] |= 0x80;
mask = mask << 7;
}
for (i = 0; i < 4; i++)
if (byte[i] & 0x7f)
break;
if (byte[i] & 0x40)
i--;
byte[i] &= 0x7f;
if (negative)
byte[i] |= 0x40;
for (j = 4; j >= i; j--)
bit_write_RC (dat, byte[j]);
}
|
Base
| 1 |
static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen)
{
muscle_private_t* priv = MUSCLE_DATA(card);
mscfs_t *fs = priv->fs;
int x;
int count = 0;
mscfs_check_cache(priv->fs);
for(x = 0; x < fs->cache.size; x++) {
u8* oid= fs->cache.array[x].objectId.id;
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"FILE: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
if(0 == memcmp(fs->currentPath, oid, 2)) {
buf[0] = oid[2];
buf[1] = oid[3];
if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */
buf += 2;
count+=2;
}
}
return count;
}
|
Variant
| 0 |
static int spl_filesystem_file_call(spl_filesystem_object *intern, zend_function *func_ptr, int pass_num_args, zval *return_value, zval *arg2 TSRMLS_DC) /* {{{ */
{
zend_fcall_info fci;
zend_fcall_info_cache fcic;
zval z_fname;
zval * zresource_ptr = &intern->u.file.zresource, *retval;
int result;
int num_args = pass_num_args + (arg2 ? 2 : 1);
zval ***params = (zval***)safe_emalloc(num_args, sizeof(zval**), 0);
params[0] = &zresource_ptr;
if (arg2) {
params[1] = &arg2;
}
zend_get_parameters_array_ex(pass_num_args, params+(arg2 ? 2 : 1));
ZVAL_STRING(&z_fname, func_ptr->common.function_name, 0);
fci.size = sizeof(fci);
fci.function_table = EG(function_table);
fci.object_ptr = NULL;
fci.function_name = &z_fname;
fci.retval_ptr_ptr = &retval;
fci.param_count = num_args;
fci.params = params;
fci.no_separation = 1;
fci.symbol_table = NULL;
fcic.initialized = 1;
fcic.function_handler = func_ptr;
fcic.calling_scope = NULL;
fcic.called_scope = NULL;
fcic.object_ptr = NULL;
result = zend_call_function(&fci, &fcic TSRMLS_CC);
if (result == FAILURE) {
RETVAL_FALSE;
} else {
ZVAL_ZVAL(return_value, retval, 1, 1);
}
efree(params);
return result;
} /* }}} */
|
Base
| 1 |
horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2)
{
int32 r1, g1, b1, a1, r2, g2, b2, a2, mask;
float fltsize = Fltsize;
#define CLAMP(v) ( (v<(float)0.) ? 0 \
: (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \
: (v>(float)24.2) ? 2047 \
: LogK1*log(v*LogK2) + 0.5 )
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
wp += 3;
ip += 3;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
}
} else if (stride == 4) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
a2 = wp[3] = (uint16) CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
wp += 4;
ip += 4;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1;
}
} else {
ip += n - 1; /* point to last one */
wp += n - 1; /* point to last one */
n -= stride;
while (n > 0) {
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]);
wp[stride] -= wp[0];
wp[stride] &= mask;
wp--; ip--)
n -= stride;
}
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--)
}
}
}
|
Class
| 2 |
void luaD_call (lua_State *L, StkId func, int nresults) {
lua_CFunction f;
retry:
switch (ttypetag(s2v(func))) {
case LUA_VCCL: /* C closure */
f = clCvalue(s2v(func))->f;
goto Cfunc;
case LUA_VLCF: /* light C function */
f = fvalue(s2v(func));
Cfunc: {
int n; /* number of returns */
CallInfo *ci = next_ci(L);
checkstackp(L, LUA_MINSTACK, func); /* ensure minimum stack size */
ci->nresults = nresults;
ci->callstatus = CIST_C;
ci->top = L->top + LUA_MINSTACK;
ci->func = func;
L->ci = ci;
lua_assert(ci->top <= L->stack_last);
if (L->hookmask & LUA_MASKCALL) {
int narg = cast_int(L->top - func) - 1;
luaD_hook(L, LUA_HOOKCALL, -1, 1, narg);
}
lua_unlock(L);
n = (*f)(L); /* do the actual call */
lua_lock(L);
api_checknelems(L, n);
luaD_poscall(L, ci, n);
break;
}
case LUA_VLCL: { /* Lua function */
CallInfo *ci = next_ci(L);
Proto *p = clLvalue(s2v(func))->p;
int narg = cast_int(L->top - func) - 1; /* number of real arguments */
int nfixparams = p->numparams;
int fsize = p->maxstacksize; /* frame size */
checkstackp(L, fsize, func);
ci->nresults = nresults;
ci->u.l.savedpc = p->code; /* starting point */
ci->callstatus = 0;
ci->top = func + 1 + fsize;
ci->func = func;
L->ci = ci;
for (; narg < nfixparams; narg++)
setnilvalue(s2v(L->top++)); /* complete missing arguments */
lua_assert(ci->top <= L->stack_last);
luaV_execute(L, ci); /* run the function */
break;
}
default: { /* not a function */
checkstackp(L, 1, func); /* space for metamethod */
luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */
goto retry; /* try again with metamethod */
}
}
}
|
Base
| 1 |
static inline int unicode_cp_is_allowed(unsigned uni_cp, int document_type)
{
/* XML 1.0 HTML 4.01 HTML 5
* 0x09..0x0A 0x09..0x0A 0x09..0x0A
* 0x0D 0x0D 0x0C..0x0D
* 0x0020..0xD7FF 0x20..0x7E 0x20..0x7E
* 0x00A0..0xD7FF 0x00A0..0xD7FF
* 0xE000..0xFFFD 0xE000..0x10FFFF 0xE000..0xFDCF
* 0x010000..0x10FFFF 0xFDF0..0x10FFFF (*)
*
* (*) exclude code points where ((code & 0xFFFF) >= 0xFFFE)
*
* References:
* XML 1.0: <http://www.w3.org/TR/REC-xml/#charsets>
* HTML 4.01: <http://www.w3.org/TR/1999/PR-html40-19990824/sgml/sgmldecl.html>
* HTML 5: <http://dev.w3.org/html5/spec/Overview.html#preprocessing-the-input-stream>
*
* Not sure this is the relevant part for HTML 5, though. I opted to
* disallow the characters that would result in a parse error when
* preprocessing of the input stream. See also section 8.1.3.
*
* It's unclear if XHTML 1.0 allows C1 characters. I'll opt to apply to
* XHTML 1.0 the same rules as for XML 1.0.
* See <http://cmsmcq.com/2007/C1.xml>.
*/
switch (document_type) {
case ENT_HTML_DOC_HTML401:
return (uni_cp >= 0x20 && uni_cp <= 0x7E) ||
(uni_cp == 0x0A || uni_cp == 0x09 || uni_cp == 0x0D) ||
(uni_cp >= 0xA0 && uni_cp <= 0xD7FF) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF);
case ENT_HTML_DOC_HTML5:
return (uni_cp >= 0x20 && uni_cp <= 0x7E) ||
(uni_cp >= 0x09 && uni_cp <= 0x0D && uni_cp != 0x0B) || /* form feed U+0C allowed */
(uni_cp >= 0xA0 && uni_cp <= 0xD7FF) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF &&
((uni_cp & 0xFFFF) < 0xFFFE) && /* last two of each plane (nonchars) disallowed */
(uni_cp < 0xFDD0 || uni_cp > 0xFDEF)); /* U+FDD0-U+FDEF (nonchars) disallowed */
case ENT_HTML_DOC_XHTML:
case ENT_HTML_DOC_XML1:
return (uni_cp >= 0x20 && uni_cp <= 0xD7FF) ||
(uni_cp == 0x0A || uni_cp == 0x09 || uni_cp == 0x0D) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF && uni_cp != 0xFFFE && uni_cp != 0xFFFF);
default:
return 1;
}
}
|
Base
| 1 |
horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc)
{
TIFFPredictorState* sp = PredictorState(tif);
tmsize_t stride = sp->stride;
uint16 *wp = (uint16*) cp0;
tmsize_t wc = cc/2;
assert((cc%(2*stride))==0);
if (wc > stride) {
wc -= stride;
wp += wc - 1;
do {
REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] - (unsigned int)wp[0]) & 0xffff); wp--)
wc -= stride;
} while (wc > 0);
}
}
|
Class
| 2 |
int AES_encrypt_DH(char *message, uint8_t *encr_message, uint64_t encrLen) {
if (!message) {
LOG_ERROR("Null message in AES_encrypt_DH");
return -1;
}
if (!encr_message) {
LOG_ERROR("Null encr message in AES_encrypt_DH");
return -2;
}
uint64_t len = strlen(message) + 1;
if (len + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE > encrLen ) {
LOG_ERROR("Output buffer too small");
return -3;
}
sgx_read_rand(encr_message + SGX_AESGCM_MAC_SIZE, SGX_AESGCM_IV_SIZE);
sgx_status_t status = sgx_rijndael128GCM_encrypt(&AES_DH_key, (uint8_t*)message, strlen(message),
encr_message + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE,
encr_message + SGX_AESGCM_MAC_SIZE, SGX_AESGCM_IV_SIZE,
NULL, 0,
(sgx_aes_gcm_128bit_tag_t *) encr_message);
return status;
}
|
Base
| 1 |
static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
/* if the deadline is ahead of our clock, nothing to do */
if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
return;
if (cfs_rq->runtime_remaining < 0)
return;
/*
* If the local deadline has passed we have to consider the
* possibility that our sched_clock is 'fast' and the global deadline
* has not truly expired.
*
* Fortunately we can check determine whether this the case by checking
* whether the global deadline(cfs_b->expires_seq) has advanced.
*/
if (cfs_rq->expires_seq == cfs_b->expires_seq) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
} else {
/* global deadline is ahead, expiration has passed */
cfs_rq->runtime_remaining = 0;
}
}
|
Class
| 2 |
ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq)
{
/* funcdef: 'def' NAME parameters ['->' test] ':' suite */
return ast_for_funcdef_impl(c, n, decorator_seq,
0 /* is_async */);
}
|
Base
| 1 |
static inline int pmd_large(pmd_t pte)
{
return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
|
Class
| 2 |
void imap_quote_string(char *dest, size_t dlen, const char *src, bool quote_backtick)
{
const char *quote = "`\"\\";
if (!quote_backtick)
quote++;
char *pt = dest;
const char *s = src;
*pt++ = '"';
/* save room for trailing quote-char */
dlen -= 2;
for (; *s && dlen; s++)
{
if (strchr(quote, *s))
{
dlen -= 2;
if (dlen == 0)
break;
*pt++ = '\\';
*pt++ = *s;
}
else
{
*pt++ = *s;
dlen--;
}
}
*pt++ = '"';
*pt = '\0';
}
|
Base
| 1 |
static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
{
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct sock *sk;
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
result = __le16_to_cpu(rsp->result);
BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
scid, flags, result);
sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
if (!sk)
return 0;
switch (result) {
case L2CAP_CONF_SUCCESS:
break;
case L2CAP_CONF_UNACCEPT:
if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
char req[128];
/* It does not make sense to adjust L2CAP parameters
* that are currently defined in the spec. We simply
* resend config request that we sent earlier. It is
* stupid, but it helps qualification testing which
* expects at least some response from us. */
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
l2cap_build_conf_req(sk, req), req);
goto done;
}
default:
sk->sk_state = BT_DISCONN;
sk->sk_err = ECONNRESET;
l2cap_sock_set_timer(sk, HZ * 5);
{
struct l2cap_disconn_req req;
req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_DISCONN_REQ, sizeof(req), &req);
}
goto done;
}
if (flags & 0x01)
goto done;
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
sk->sk_state = BT_CONNECTED;
l2cap_chan_ready(sk);
}
done:
bh_unlock_sock(sk);
return 0;
}
|
Base
| 1 |
void fmtutil_macbitmap_read_pixmap_only_fields(deark *c, dbuf *f, struct fmtutil_macbitmap_info *bi,
i64 pos)
{
i64 pixmap_version;
i64 pack_size;
i64 plane_bytes;
i64 n;
de_dbg(c, "additional PixMap header fields, at %d", (int)pos);
de_dbg_indent(c, 1);
pixmap_version = dbuf_getu16be(f, pos+0);
de_dbg(c, "pixmap version: %d", (int)pixmap_version);
bi->packing_type = dbuf_getu16be(f, pos+2);
de_dbg(c, "packing type: %d", (int)bi->packing_type);
pack_size = dbuf_getu32be(f, pos+4);
de_dbg(c, "pixel data length: %d", (int)pack_size);
bi->hdpi = pict_read_fixed(f, pos+8);
bi->vdpi = pict_read_fixed(f, pos+12);
de_dbg(c, "dpi: %.2f"DE_CHAR_TIMES"%.2f", bi->hdpi, bi->vdpi);
bi->pixeltype = dbuf_getu16be(f, pos+16);
bi->pixelsize = dbuf_getu16be(f, pos+18);
bi->cmpcount = dbuf_getu16be(f, pos+20);
bi->cmpsize = dbuf_getu16be(f, pos+22);
de_dbg(c, "pixel type=%d, bits/pixel=%d, components/pixel=%d, bits/comp=%d",
(int)bi->pixeltype, (int)bi->pixelsize, (int)bi->cmpcount, (int)bi->cmpsize);
bi->pdwidth = (bi->rowbytes*8)/bi->pixelsize;
if(bi->pdwidth < bi->npwidth) {
bi->pdwidth = bi->npwidth;
}
plane_bytes = dbuf_getu32be(f, pos+24);
de_dbg(c, "plane bytes: %d", (int)plane_bytes);
bi->pmTable = (u32)dbuf_getu32be(f, pos+28);
de_dbg(c, "pmTable: 0x%08x", (unsigned int)bi->pmTable);
n = dbuf_getu32be(f, pos+32);
de_dbg(c, "pmReserved: 0x%08x", (unsigned int)n);
de_dbg_indent(c, -1);
}
|
Base
| 1 |
static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied;
int err = 0;
lock_sock(sk);
/*
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
/* Now we can treat all alike */
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
if (!ax25_sk(sk)->pidincl)
skb_pull(skb, 1); /* Remove PID */
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (msg->msg_namelen != 0) {
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
/* We set this correctly, even though we may not let the
application know the digi calls further down (because it
did NOT ask to know them). This could get political... **/
sax->sax25_ndigis = digi.ndigi;
sax->sax25_call = src;
if (sax->sax25_ndigis != 0) {
int ct;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
for (ct = 0; ct < digi.ndigi; ct++)
fsa->fsa_digipeater[ct] = digi.calls[ct];
}
msg->msg_namelen = sizeof(struct full_sockaddr_ax25);
}
skb_free_datagram(sk, skb);
err = copied;
out:
release_sock(sk);
return err;
}
|
Class
| 2 |
static int skcipher_accept_parent(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
ctx->used = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
skcipher_request_set_tfm(&ctx->req, private);
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
return 0;
}
|
Base
| 1 |
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
int nr_regs = sizeof(*regs) / sizeof(__u32);
__uint128_t tmp;
void *valp = &tmp;
u64 off;
int err = 0;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= nr_regs ||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (validate_core_offset(reg))
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
err = -EFAULT;
goto out;
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
case PSR_AA32_MODE_FIQ:
case PSR_AA32_MODE_IRQ:
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
break;
default:
err = -EINVAL;
goto out;
}
}
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
out:
return err;
}
|
Class
| 2 |
static int pop_sync_mailbox (CONTEXT *ctx, int *index_hint)
{
int i, j, ret = 0;
char buf[LONG_STRING];
POP_DATA *pop_data = (POP_DATA *)ctx->data;
progress_t progress;
#ifdef USE_HCACHE
header_cache_t *hc = NULL;
#endif
pop_data->check_time = 0;
FOREVER
{
if (pop_reconnect (ctx) < 0)
return -1;
mutt_progress_init (&progress, _("Marking messages deleted..."),
MUTT_PROGRESS_MSG, WriteInc, ctx->deleted);
#if USE_HCACHE
hc = pop_hcache_open (pop_data, ctx->path);
#endif
for (i = 0, j = 0, ret = 0; ret == 0 && i < ctx->msgcount; i++)
{
if (ctx->hdrs[i]->deleted && ctx->hdrs[i]->refno != -1)
{
j++;
if (!ctx->quiet)
mutt_progress_update (&progress, j, -1);
snprintf (buf, sizeof (buf), "DELE %d\r\n", ctx->hdrs[i]->refno);
if ((ret = pop_query (pop_data, buf, sizeof (buf))) == 0)
{
mutt_bcache_del (pop_data->bcache, ctx->hdrs[i]->data);
#if USE_HCACHE
mutt_hcache_delete (hc, ctx->hdrs[i]->data, strlen);
#endif
}
}
#if USE_HCACHE
if (ctx->hdrs[i]->changed)
{
mutt_hcache_store (hc, ctx->hdrs[i]->data, ctx->hdrs[i], 0, strlen, MUTT_GENERATE_UIDVALIDITY);
}
#endif
}
#if USE_HCACHE
mutt_hcache_close (hc);
#endif
if (ret == 0)
{
strfcpy (buf, "QUIT\r\n", sizeof (buf));
ret = pop_query (pop_data, buf, sizeof (buf));
}
if (ret == 0)
{
pop_data->clear_cache = 1;
pop_clear_cache (pop_data);
pop_data->status = POP_DISCONNECTED;
return 0;
}
if (ret == -2)
{
mutt_error ("%s", pop_data->err_msg);
mutt_sleep (2);
return -1;
}
}
}
|
Class
| 2 |
void dm9000WritePhyReg(uint8_t address, uint16_t data)
{
//Write PHY register address
dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address);
//Write register value
dm9000WriteReg(DM9000_REG_EPDRL, LSB(data));
dm9000WriteReg(DM9000_REG_EPDRH, MSB(data));
//Start the write operation
dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRW);
//PHY access is still in progress?
while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0)
{
}
//Wait 5us minimum
usleep(5);
//Clear command register
dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS);
}
|
Class
| 2 |
checked_xcalloc (size_t num, size_t size)
{
alloc_limit_assert ("checked_xcalloc", (num *size));
return xcalloc (num, size);
}
|
Base
| 1 |
static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct sk_buff *skb = NULL;
struct sockaddr_pn sa;
int rval = -EOPNOTSUPP;
int copylen;
if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
MSG_CMSG_COMPAT))
goto out_nofree;
if (addr_len)
*addr_len = sizeof(sa);
skb = skb_recv_datagram(sk, flags, noblock, &rval);
if (skb == NULL)
goto out_nofree;
pn_skb_get_src_sockaddr(skb, &sa);
copylen = skb->len;
if (len < copylen) {
msg->msg_flags |= MSG_TRUNC;
copylen = len;
}
rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
if (rval) {
rval = -EFAULT;
goto out;
}
rval = (flags & MSG_TRUNC) ? skb->len : copylen;
if (msg->msg_name != NULL)
memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
out:
skb_free_datagram(sk, skb);
out_nofree:
return rval;
}
|
Class
| 2 |
jpeg_error_handler(j_common_ptr)
{
return;
}
|
Base
| 1 |
static int get_exif_tag_dbl_value(struct iw_exif_state *e, unsigned int tag_pos,
double *pv)
{
unsigned int field_type;
unsigned int value_count;
unsigned int value_pos;
unsigned int numer, denom;
field_type = iw_get_ui16_e(&e->d[tag_pos+2],e->endian);
value_count = iw_get_ui32_e(&e->d[tag_pos+4],e->endian);
if(value_count!=1) return 0;
if(field_type!=5) return 0; // 5=Rational (two uint32's)
// A rational is 8 bytes. Since 8>4, it is stored indirectly. First, read
// the location where it is stored.
value_pos = iw_get_ui32_e(&e->d[tag_pos+8],e->endian);
if(value_pos > e->d_len-8) return 0;
// Read the actual value.
numer = iw_get_ui32_e(&e->d[value_pos ],e->endian);
denom = iw_get_ui32_e(&e->d[value_pos+4],e->endian);
if(denom==0) return 0;
*pv = ((double)numer)/denom;
return 1;
}
|
Base
| 1 |
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
unsigned long sz = huge_page_size(h);
pte_t *pte;
int err = 0;
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
if (pte && walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
if (err)
break;
} while (addr = next, addr != end);
return err;
}
|
Class
| 2 |
static int install_thread_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
BUG_ON(new->thread_keyring);
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
|
Class
| 2 |
FunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq *
decorator_list, expr_ty returns, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena)
{
stmt_ty p;
if (!name) {
PyErr_SetString(PyExc_ValueError,
"field name is required for FunctionDef");
return NULL;
}
if (!args) {
PyErr_SetString(PyExc_ValueError,
"field args is required for FunctionDef");
return NULL;
}
p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p));
if (!p)
return NULL;
p->kind = FunctionDef_kind;
p->v.FunctionDef.name = name;
p->v.FunctionDef.args = args;
p->v.FunctionDef.body = body;
p->v.FunctionDef.decorator_list = decorator_list;
p->v.FunctionDef.returns = returns;
p->lineno = lineno;
p->col_offset = col_offset;
p->end_lineno = end_lineno;
p->end_col_offset = end_col_offset;
return p;
}
|
Base
| 1 |
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
OpData* data, const RuntimeShape& lhs_shape,
const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape,
const TfLiteTensor* rhs, TfLiteTensor* output) {
if (lhs->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2);
TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3);
TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4);
TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5);
TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6);
return EvalHybrid<kernel_type>(
context, node, data, lhs_shape, lhs, rhs_shape, rhs, input_quantized,
scaling_factors, accum_scratch, row_sums, input_offsets, output);
} else if (lhs->type == kTfLiteInt8) {
return EvalInt8<kernel_type>(context, data, lhs_shape, lhs, rhs_shape, rhs,
GetTensorShape(output), output);
} else {
TF_LITE_KERNEL_LOG(
context, "Currently only hybrid and int8 quantization is supported.\n");
return kTfLiteError;
}
return kTfLiteOk;
}
|
Base
| 1 |
pci_set_cfgdata8(struct pci_vdev *dev, int offset, uint8_t val)
{
assert(offset <= PCI_REGMAX);
*(uint8_t *)(dev->cfgdata + offset) = val;
}
|
Base
| 1 |
static double ipow( double n, int exp )
{
double r;
if ( exp < 0 )
return 1.0 / ipow( n, -exp );
r = 1;
while ( exp > 0 ) {
if ( exp & 1 )
r *= n;
exp >>= 1;
n *= n;
}
return r;
}
|
Base
| 1 |
snmp_ber_decode_length(unsigned char *buff, uint32_t *buff_len, uint8_t *length)
{
if(*buff_len == 0) {
return NULL;
}
*length = *buff++;
(*buff_len)--;
return buff;
}
|
Base
| 1 |
pimv2_addr_print(netdissect_options *ndo,
const u_char *bp, enum pimv2_addrtype at, int silent)
{
int af;
int len, hdrlen;
ND_TCHECK(bp[0]);
if (pimv2_addr_len == 0) {
ND_TCHECK(bp[1]);
switch (bp[0]) {
case 1:
af = AF_INET;
len = sizeof(struct in_addr);
break;
case 2:
af = AF_INET6;
len = sizeof(struct in6_addr);
break;
default:
return -1;
}
if (bp[1] != 0)
return -1;
hdrlen = 2;
} else {
switch (pimv2_addr_len) {
case sizeof(struct in_addr):
af = AF_INET;
break;
case sizeof(struct in6_addr):
af = AF_INET6;
break;
default:
return -1;
break;
}
len = pimv2_addr_len;
hdrlen = 0;
}
bp += hdrlen;
switch (at) {
case pimv2_unicast:
ND_TCHECK2(bp[0], len);
if (af == AF_INET) {
if (!silent)
ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp)));
}
else if (af == AF_INET6) {
if (!silent)
ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp)));
}
return hdrlen + len;
case pimv2_group:
case pimv2_source:
ND_TCHECK2(bp[0], len + 2);
if (af == AF_INET) {
if (!silent) {
ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2)));
if (bp[1] != 32)
ND_PRINT((ndo, "/%u", bp[1]));
}
}
else if (af == AF_INET6) {
if (!silent) {
ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2)));
if (bp[1] != 128)
ND_PRINT((ndo, "/%u", bp[1]));
}
}
if (bp[0] && !silent) {
if (at == pimv2_group) {
ND_PRINT((ndo, "(0x%02x)", bp[0]));
} else {
ND_PRINT((ndo, "(%s%s%s",
bp[0] & 0x04 ? "S" : "",
bp[0] & 0x02 ? "W" : "",
bp[0] & 0x01 ? "R" : ""));
if (bp[0] & 0xf8) {
ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8));
}
ND_PRINT((ndo, ")"));
}
}
return hdrlen + 2 + len;
default:
return -1;
}
trunc:
return -1;
}
|
Base
| 1 |
int devmem_is_allowed(unsigned long pagenr)
{
if (pagenr < 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
if (!page_is_ram(pagenr))
return 1;
return 0;
}
|
Class
| 2 |
AsyncWith(asdl_seq * items, asdl_seq * body, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena)
{
stmt_ty p;
p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p));
if (!p)
return NULL;
p->kind = AsyncWith_kind;
p->v.AsyncWith.items = items;
p->v.AsyncWith.body = body;
p->lineno = lineno;
p->col_offset = col_offset;
p->end_lineno = end_lineno;
p->end_col_offset = end_col_offset;
return p;
}
|
Base
| 1 |
char* _multi_string_alloc_and_copy( LPCWSTR in )
{
char *chr;
int len = 0;
if ( !in )
{
return in;
}
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
len ++;
}
chr = malloc( len + 2 );
len = 0;
while ( in[ len ] != 0 || in[ len + 1 ] != 0 )
{
chr[ len ] = 0xFF & in[ len ];
len ++;
}
chr[ len ++ ] = '\0';
chr[ len ++ ] = '\0';
return chr;
}
|
Class
| 2 |
pci_get_cfgdata32(struct pci_vdev *dev, int offset)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
return (*(uint32_t *)(dev->cfgdata + offset));
}
|
Base
| 1 |
cfm_network_addr_print(netdissect_options *ndo,
register const u_char *tptr)
{
u_int network_addr_type;
u_int hexdump = FALSE;
/*
* Altough AFIs are tpically 2 octects wide,
* 802.1ab specifies that this field width
* is only once octet
*/
network_addr_type = *tptr;
ND_PRINT((ndo, "\n\t Network Address Type %s (%u)",
tok2str(af_values, "Unknown", network_addr_type),
network_addr_type));
/*
* Resolve the passed in Address.
*/
switch(network_addr_type) {
case AFNUM_INET:
ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr + 1)));
break;
case AFNUM_INET6:
ND_PRINT((ndo, ", %s", ip6addr_string(ndo, tptr + 1)));
break;
default:
hexdump = TRUE;
break;
}
return hexdump;
}
|
Base
| 1 |
static void command_port_read_callback(struct urb *urb)
{
struct usb_serial_port *command_port = urb->context;
struct whiteheat_command_private *command_info;
int status = urb->status;
unsigned char *data = urb->transfer_buffer;
int result;
command_info = usb_get_serial_port_data(command_port);
if (!command_info) {
dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
return;
}
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
if (status != -ENOENT)
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
return;
}
usb_serial_debug_data(&command_port->dev, __func__, urb->actual_length, data);
if (data[0] == WHITEHEAT_CMD_COMPLETE) {
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_CMD_FAILURE) {
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_EVENT) {
/* These are unsolicited reports from the firmware, hence no
waiting command to wakeup */
dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
} else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
memcpy(command_info->result_buffer, &data[1],
urb->actual_length - 1);
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else
dev_dbg(&urb->dev->dev, "%s - bad reply from firmware\n", __func__);
/* Continue trying to always read */
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
if (result)
dev_dbg(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n",
__func__, result);
}
|
Class
| 2 |
static int read_private_key(RSA *rsa)
{
int r;
sc_path_t path;
sc_file_t *file;
const sc_acl_entry_t *e;
u8 buf[2048], *p = buf;
size_t bufsize, keysize;
r = select_app_df();
if (r)
return 1;
sc_format_path("I0012", &path);
r = sc_select_file(card, &path, &file);
if (r) {
fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r));
return 2;
}
e = sc_file_get_acl_entry(file, SC_AC_OP_READ);
if (e == NULL || e->method == SC_AC_NEVER)
return 10;
bufsize = file->size;
sc_file_free(file);
r = sc_read_binary(card, 0, buf, bufsize, 0);
if (r < 0) {
fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r));
return 2;
}
bufsize = r;
do {
if (bufsize < 4)
return 3;
keysize = (p[0] << 8) | p[1];
if (keysize == 0)
break;
if (keysize < 3)
return 3;
if (p[2] == opt_key_num)
break;
p += keysize;
bufsize -= keysize;
} while (1);
if (keysize == 0) {
printf("Key number %d not found.\n", opt_key_num);
return 2;
}
return parse_private_key(p, keysize, rsa);
}
|
Class
| 2 |
error_t tja1100Init(NetInterface *interface)
{
uint16_t value;
//Debug message
TRACE_INFO("Initializing TJA1100...\r\n");
//Undefined PHY address?
if(interface->phyAddr >= 32)
{
//Use the default address
interface->phyAddr = TJA1100_PHY_ADDR;
}
//Initialize serial management interface
if(interface->smiDriver != NULL)
{
interface->smiDriver->init();
}
//Initialize external interrupt line driver
if(interface->extIntDriver != NULL)
{
interface->extIntDriver->init();
}
//Reset PHY transceiver
tja1100WritePhyReg(interface, TJA1100_BASIC_CTRL,
TJA1100_BASIC_CTRL_RESET);
//Wait for the reset to complete
while(tja1100ReadPhyReg(interface, TJA1100_BASIC_CTRL) &
TJA1100_BASIC_CTRL_RESET)
{
}
//Dump PHY registers for debugging purpose
tja1100DumpPhyReg(interface);
//Enable configuration register access
value = tja1100ReadPhyReg(interface, TJA1100_EXTENDED_CTRL);
value |= TJA1100_EXTENDED_CTRL_CONFIG_EN;
tja1100WritePhyReg(interface, TJA1100_EXTENDED_CTRL, value);
//Select RMII mode (25MHz XTAL)
value = tja1100ReadPhyReg(interface, TJA1100_CONFIG1);
value &= ~TJA1100_CONFIG1_MII_MODE;
value |= TJA1100_CONFIG1_MII_MODE_RMII_25MHZ;
tja1100WritePhyReg(interface, TJA1100_CONFIG1, value);
//The PHY is configured for autonomous operation
value = tja1100ReadPhyReg(interface, TJA1100_CONFIG1);
value |= TJA1100_CONFIG1_AUTO_OP;
tja1100WritePhyReg(interface, TJA1100_CONFIG1, value);
//Force the TCP/IP stack to poll the link state at startup
interface->phyEvent = TRUE;
//Notify the TCP/IP stack of the event
osSetEvent(&netEvent);
//Successful initialization
return NO_ERROR;
}
|
Class
| 2 |
GF_Err urn_Read(GF_Box *s, GF_BitStream *bs)
{
u32 i, to_read;
char *tmpName;
GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s;
if (! ptr->size ) return GF_OK;
//here we have to handle that in a clever way
to_read = (u32) ptr->size;
tmpName = (char*)gf_malloc(sizeof(char) * to_read);
if (!tmpName) return GF_OUT_OF_MEM;
//get the data
gf_bs_read_data(bs, tmpName, to_read);
//then get the break
i = 0;
while ( (tmpName[i] != 0) && (i < to_read) ) {
i++;
}
//check the data is consistent
if (i == to_read) {
gf_free(tmpName);
return GF_ISOM_INVALID_FILE;
}
//no NULL char, URL is not specified
if (i == to_read - 1) {
ptr->nameURN = tmpName;
ptr->location = NULL;
return GF_OK;
}
//OK, this has both URN and URL
ptr->nameURN = (char*)gf_malloc(sizeof(char) * (i+1));
if (!ptr->nameURN) {
gf_free(tmpName);
return GF_OUT_OF_MEM;
}
ptr->location = (char*)gf_malloc(sizeof(char) * (to_read - i - 1));
if (!ptr->location) {
gf_free(tmpName);
gf_free(ptr->nameURN);
ptr->nameURN = NULL;
return GF_OUT_OF_MEM;
}
memcpy(ptr->nameURN, tmpName, i + 1);
memcpy(ptr->location, tmpName + i + 1, (to_read - i - 1));
gf_free(tmpName);
return GF_OK;
}
|
Base
| 1 |
int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring) {
if (keyring->restrict_link)
return -EPERM;
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
}
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
key->reject_error = -error;
smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
}
|
Class
| 2 |
ga_concat_shorten_esc(garray_T *gap, char_u *str)
{
char_u *p;
char_u *s;
int c;
int clen;
char_u buf[NUMBUFLEN];
int same_len;
if (str == NULL)
{
ga_concat(gap, (char_u *)"NULL");
return;
}
for (p = str; *p != NUL; ++p)
{
same_len = 1;
s = p;
c = mb_ptr2char_adv(&s);
clen = s - p;
while (*s != NUL && c == mb_ptr2char(s))
{
++same_len;
s += clen;
}
if (same_len > 20)
{
ga_concat(gap, (char_u *)"\\[");
ga_concat_esc(gap, p, clen);
ga_concat(gap, (char_u *)" occurs ");
vim_snprintf((char *)buf, NUMBUFLEN, "%d", same_len);
ga_concat(gap, buf);
ga_concat(gap, (char_u *)" times]");
p = s - 1;
}
else
ga_concat_esc(gap, p, clen);
}
}
|
Variant
| 0 |
horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2)
{
int32 r1, g1, b1, a1, r2, g2, b2, a2, mask;
float fltsize = Fltsize;
#define CLAMP(v) ( (v<(float)0.) ? 0 \
: (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \
: (v>(float)24.2) ? 2047 \
: LogK1*log(v*LogK2) + 0.5 )
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
wp += 3;
ip += 3;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
}
} else if (stride == 4) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
a2 = wp[3] = (uint16) CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
wp += 4;
ip += 4;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1;
}
} else {
ip += n - 1; /* point to last one */
wp += n - 1; /* point to last one */
n -= stride;
while (n > 0) {
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]);
wp[stride] -= wp[0];
wp[stride] &= mask;
wp--; ip--)
n -= stride;
}
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--)
}
}
}
|
Class
| 2 |
PHP_FUNCTION(locale_lookup)
{
char* fallback_loc = NULL;
int fallback_loc_len = 0;
const char* loc_range = NULL;
int loc_range_len = 0;
zval* arr = NULL;
HashTable* hash_arr = NULL;
zend_bool boolCanonical = 0;
char* result =NULL;
intl_error_reset( NULL TSRMLS_CC );
if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "as|bs", &arr, &loc_range, &loc_range_len,
&boolCanonical, &fallback_loc, &fallback_loc_len) == FAILURE) {
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_lookup: unable to parse input params", 0 TSRMLS_CC );
RETURN_FALSE;
}
if(loc_range_len == 0) {
loc_range = intl_locale_get_default(TSRMLS_C);
}
hash_arr = HASH_OF(arr);
if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) {
RETURN_EMPTY_STRING();
}
result = lookup_loc_range(loc_range, hash_arr, boolCanonical TSRMLS_CC);
if(result == NULL || result[0] == '\0') {
if( fallback_loc ) {
result = estrndup(fallback_loc, fallback_loc_len);
} else {
RETURN_EMPTY_STRING();
}
}
RETVAL_STRINGL(result, strlen(result), 0);
}
|
Base
| 1 |
R_API int r_core_bin_set_env(RCore *r, RBinFile *binfile) {
RBinObject *binobj = binfile ? binfile->o: NULL;
RBinInfo *info = binobj ? binobj->info: NULL;
if (info) {
int va = info->has_va;
const char * arch = info->arch;
ut16 bits = info->bits;
ut64 baseaddr = r_bin_get_baddr (r->bin);
/* Hack to make baddr work on some corner */
r_config_set_i (r->config, "io.va",
(binobj->info)? binobj->info->has_va: 0);
r_config_set_i (r->config, "bin.baddr", baseaddr);
r_config_set (r->config, "asm.arch", arch);
r_config_set_i (r->config, "asm.bits", bits);
r_config_set (r->config, "anal.arch", arch);
if (info->cpu && *info->cpu) {
r_config_set (r->config, "anal.cpu", info->cpu);
} else {
r_config_set (r->config, "anal.cpu", arch);
}
r_asm_use (r->assembler, arch);
r_core_bin_info (r, R_CORE_BIN_ACC_ALL, R_CORE_BIN_SET, va, NULL, NULL);
r_core_bin_set_cur (r, binfile);
return true;
}
return false;
}
|
Variant
| 0 |
static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_create_device *cd)
{
struct kvm_device_ops *ops = NULL;
struct kvm_device *dev;
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
int ret;
if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
return -ENODEV;
ops = kvm_device_ops_table[cd->type];
if (ops == NULL)
return -ENODEV;
if (test)
return 0;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->ops = ops;
dev->kvm = kvm;
mutex_lock(&kvm->lock);
ret = ops->create(dev, cd->type);
if (ret < 0) {
mutex_unlock(&kvm->lock);
kfree(dev);
return ret;
}
list_add(&dev->vm_node, &kvm->devices);
mutex_unlock(&kvm->lock);
if (ops->init)
ops->init(dev);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
mutex_unlock(&kvm->lock);
ops->destroy(dev);
return ret;
}
kvm_get_kvm(kvm);
cd->fd = ret;
return 0;
}
|
Variant
| 0 |
static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
int id)
{
struct ion_handle *handle;
mutex_lock(&client->lock);
handle = idr_find(&client->idr, id);
if (handle)
ion_handle_get(handle);
mutex_unlock(&client->lock);
return handle ? handle : ERR_PTR(-EINVAL);
}
|
Variant
| 0 |
static int swp_handler(struct pt_regs *regs, unsigned int instr)
{
unsigned int address, destreg, data, type;
unsigned int res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
current->comm, (unsigned long)current->pid);
previous_pid = current->pid;
}
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
type = instr & TYPE_SWPB;
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
(void *)address);
res = -EFAULT;
} else {
res = emulate_swpX(address, &data, type);
}
if (res == 0) {
/*
* On successful emulation, revert the adjustment to the PC
* made in kernel/traps.c in order to resume execution at the
* instruction following the SWP{B}.
*/
regs->ARM_pc += 4;
regs->uregs[destreg] = data;
} else if (res == -EFAULT) {
/*
* Memory errors do not mean emulation failed.
* Set up signal info to return SEGV, then return OK
*/
set_segfault(regs, address);
}
return 0;
}
|
Class
| 2 |
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
return -EEXIST;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->process_keyring = keyring;
return 0;
}
|
Class
| 2 |
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
struct ddpehdr *ddp;
int copied = 0;
int offset = 0;
int err = 0;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
lock_sock(sk);
if (!skb)
goto out;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
copied = ntohs(ddp->deh_len_hops) & 1023;
if (sk->sk_type != SOCK_RAW) {
offset = sizeof(*ddp);
copied -= offset;
}
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
if (!err) {
if (sat) {
sat->sat_family = AF_APPLETALK;
sat->sat_port = ddp->deh_sport;
sat->sat_addr.s_node = ddp->deh_snode;
sat->sat_addr.s_net = ddp->deh_snet;
}
msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
out:
release_sock(sk);
return err ? : copied;
}
|
Class
| 2 |
static inline int mount_entry_on_generic(struct mntent *mntent,
const char* path)
{
unsigned long mntflags;
char *mntdata;
int ret;
bool optional = hasmntopt(mntent, "optional") != NULL;
ret = mount_entry_create_dir_file(mntent, path);
if (ret < 0)
return optional ? 0 : -1;
cull_mntent_opt(mntent);
if (parse_mntopts(mntent->mnt_opts, &mntflags, &mntdata) < 0) {
free(mntdata);
return -1;
}
ret = mount_entry(mntent->mnt_fsname, path, mntent->mnt_type,
mntflags, mntdata, optional);
free(mntdata);
return ret;
}
|
Base
| 1 |
int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr,
size_t sec_attr_len)
{
u8 *tmp;
if (!sc_file_valid(file)) {
return SC_ERROR_INVALID_ARGUMENTS;
}
if (sec_attr == NULL) {
if (file->sec_attr != NULL)
free(file->sec_attr);
file->sec_attr = NULL;
file->sec_attr_len = 0;
return 0;
}
tmp = (u8 *) realloc(file->sec_attr, sec_attr_len);
if (!tmp) {
if (file->sec_attr)
free(file->sec_attr);
file->sec_attr = NULL;
file->sec_attr_len = 0;
return SC_ERROR_OUT_OF_MEMORY;
}
file->sec_attr = tmp;
memcpy(file->sec_attr, sec_attr, sec_attr_len);
file->sec_attr_len = sec_attr_len;
return 0;
}
|
Class
| 2 |
static int find_high_bit(unsigned int x)
{
int i;
for(i=31;i>=0;i--) {
if(x&(1<<i)) return i;
}
return 0;
}
|
Pillar
| 3 |
error_t lpc546xxEthUpdateMacAddrFilter(NetInterface *interface)
{
uint_t i;
bool_t acceptMulticast;
//Debug message
TRACE_DEBUG("Updating MAC filter...\r\n");
//Set the MAC address of the station
ENET->MAC_ADDR_LOW = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16);
ENET->MAC_ADDR_HIGH = interface->macAddr.w[2];
//This flag will be set if multicast addresses should be accepted
acceptMulticast = FALSE;
//The MAC address filter contains the list of MAC addresses to accept
//when receiving an Ethernet frame
for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++)
{
//Valid entry?
if(interface->macAddrFilter[i].refCount > 0)
{
//Accept multicast addresses
acceptMulticast = TRUE;
//We are done
break;
}
}
//Enable the reception of multicast frames if necessary
if(acceptMulticast)
{
ENET->MAC_FRAME_FILTER |= ENET_MAC_FRAME_FILTER_PM_MASK;
}
else
{
ENET->MAC_FRAME_FILTER &= ~ENET_MAC_FRAME_FILTER_PM_MASK;
}
//Successful processing
return NO_ERROR;
}
|
Class
| 2 |
snmp_api_set_string(snmp_varbind_t *varbind, uint32_t *oid, char *string)
{
snmp_api_replace_oid(varbind, oid);
varbind->value_type = BER_DATA_TYPE_OCTET_STRING;
varbind->value.string.string = string;
varbind->value.string.length = strlen(string);
}
|
Base
| 1 |
get_user_command_name(int idx, int cmdidx)
{
if (cmdidx == CMD_USER && idx < ucmds.ga_len)
return USER_CMD(idx)->uc_name;
if (cmdidx == CMD_USER_BUF)
{
// In cmdwin, the alternative buffer should be used.
buf_T *buf =
#ifdef FEAT_CMDWIN
is_in_cmdwin() ? prevwin->w_buffer :
#endif
curbuf;
if (idx < buf->b_ucmds.ga_len)
return USER_CMD_GA(&buf->b_ucmds, idx)->uc_name;
}
return NULL;
}
|
Base
| 1 |
ppp_hdlc(netdissect_options *ndo,
const u_char *p, int length)
{
u_char *b, *s, *t, c;
int i, proto;
const void *se;
if (length <= 0)
return;
b = (uint8_t *)malloc(length);
if (b == NULL)
return;
/*
* Unescape all the data into a temporary, private, buffer.
* Do this so that we dont overwrite the original packet
* contents.
*/
for (s = (u_char *)p, t = b, i = length; i > 0; i--) {
c = *s++;
if (c == 0x7d) {
if (i > 1) {
i--;
c = *s++ ^ 0x20;
} else
continue;
}
*t++ = c;
}
se = ndo->ndo_snapend;
ndo->ndo_snapend = t;
length = t - b;
/* now lets guess about the payload codepoint format */
if (length < 1)
goto trunc;
proto = *b; /* start with a one-octet codepoint guess */
switch (proto) {
case PPP_IP:
ip_print(ndo, b + 1, length - 1);
goto cleanup;
case PPP_IPV6:
ip6_print(ndo, b + 1, length - 1);
goto cleanup;
default: /* no luck - try next guess */
break;
}
if (length < 2)
goto trunc;
proto = EXTRACT_16BITS(b); /* next guess - load two octets */
switch (proto) {
case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */
if (length < 4)
goto trunc;
proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */
handle_ppp(ndo, proto, b + 4, length - 4);
break;
default: /* last guess - proto must be a PPP proto-id */
handle_ppp(ndo, proto, b + 2, length - 2);
break;
}
cleanup:
ndo->ndo_snapend = se;
free(b);
return;
trunc:
ndo->ndo_snapend = se;
free(b);
ND_PRINT((ndo, "[|ppp]"));
}
|
Class
| 2 |
static int put_chars(u32 vtermno, const char *buf, int count)
{
struct port *port;
struct scatterlist sg[1];
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
port = find_port_by_vtermno(vtermno);
if (!port)
return -EPIPE;
sg_init_one(sg, buf, count);
return __send_to_port(port, sg, 1, count, (void *)buf, false);
}
|
Class
| 2 |
static RList *symbols(RBinFile *bf) {
RList *res = r_list_newf ((RListFree)r_bin_symbol_free);
r_return_val_if_fail (res && bf->o && bf->o->bin_obj, res);
RCoreSymCacheElement *element = bf->o->bin_obj;
size_t i;
HtUU *hash = ht_uu_new0 ();
if (!hash) {
return res;
}
bool found = false;
for (i = 0; i < element->hdr->n_lined_symbols; i++) {
RCoreSymCacheElementSymbol *sym = (RCoreSymCacheElementSymbol *)&element->lined_symbols[i];
if (!sym) {
break;
}
ht_uu_find (hash, sym->paddr, &found);
if (found) {
continue;
}
RBinSymbol *s = bin_symbol_from_symbol (element, sym);
if (s) {
r_list_append (res, s);
ht_uu_insert (hash, sym->paddr, 1);
}
}
if (element->symbols) {
for (i = 0; i < element->hdr->n_symbols; i++) {
RCoreSymCacheElementSymbol *sym = &element->symbols[i];
ht_uu_find (hash, sym->paddr, &found);
if (found) {
continue;
}
RBinSymbol *s = bin_symbol_from_symbol (element, sym);
if (s) {
r_list_append (res, s);
}
}
}
ht_uu_free (hash);
return res;
}
|
Base
| 1 |
CAMLprim value caml_alloc_dummy_float (value size)
{
mlsize_t wosize = Int_val(size) * Double_wosize;
if (wosize == 0) return Atom(0);
return caml_alloc (wosize, 0);
}
|
Class
| 2 |
static bool glfs_check_config(const char *cfgstring, char **reason)
{
char *path;
glfs_t *fs = NULL;
glfs_fd_t *gfd = NULL;
gluster_server *hosts = NULL; /* gluster server defination */
bool result = true;
path = strchr(cfgstring, '/');
if (!path) {
if (asprintf(reason, "No path found") == -1)
*reason = NULL;
result = false;
goto done;
}
path += 1; /* get past '/' */
fs = tcmu_create_glfs_object(path, &hosts);
if (!fs) {
tcmu_err("tcmu_create_glfs_object failed\n");
goto done;
}
gfd = glfs_open(fs, hosts->path, ALLOWED_BSOFLAGS);
if (!gfd) {
if (asprintf(reason, "glfs_open failed: %m") == -1)
*reason = NULL;
result = false;
goto unref;
}
if (glfs_access(fs, hosts->path, R_OK|W_OK) == -1) {
if (asprintf(reason, "glfs_access file not present, or not writable") == -1)
*reason = NULL;
result = false;
goto unref;
}
goto done;
unref:
gluster_cache_refresh(fs, path);
done:
if (gfd)
glfs_close(gfd);
gluster_free_server(&hosts);
return result;
}
|
Class
| 2 |
static int add_ast_fields(void)
{
PyObject *empty_tuple, *d;
if (PyType_Ready(&AST_type) < 0)
return -1;
d = AST_type.tp_dict;
empty_tuple = PyTuple_New(0);
if (!empty_tuple ||
PyDict_SetItemString(d, "_fields", empty_tuple) < 0 ||
PyDict_SetItemString(d, "_attributes", empty_tuple) < 0) {
Py_XDECREF(empty_tuple);
return -1;
}
Py_DECREF(empty_tuple);
return 0;
}
|
Base
| 1 |
sysServices_handler(snmp_varbind_t *varbind, uint32_t *oid)
{
snmp_api_set_time_ticks(varbind, oid, clock_seconds() * 100);
}
|
Base
| 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.