label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | BlockDriverState *bdrv_find_node(const char *node_name) { BlockDriverState *bs; assert(node_name); QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { if (!strcmp(node_name, bs->node_name)) { return bs; } } return NULL; } | 10,704 |
0 | static inline void load_seg_vm(int seg, int selector) { selector &= 0xffff; cpu_x86_load_seg_cache(env, seg, selector, (uint8_t *)(selector << 4), 0xffff, 0); } | 10,705 |
0 | static int ehci_state_fetchqtd(EHCIQueue *q, int async) { int again = 0; get_dwords(NLPTR_GET(q->qtdaddr),(uint32_t *) &q->qtd, sizeof(EHCIqtd) >> 2); ehci_trace_qtd(q, NLPTR_GET(q->qtdaddr), &q->qtd); if (q->qtd.token & QTD_TOKEN_ACTIVE) { ehci_set_state(q->ehci, async, EST_EXECUTE); again = 1; } else { ehci_set_state(q->ehci, async, EST_HORIZONTALQH); again = 1; } return again; } | 10,706 |
0 | av_cold void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx) { #if HAVE_SSE2_INLINE if (av_get_cpu_flags() & AV_CPU_FLAG_SSE2) { if (ctx->cid_table->bit_depth == 8) ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2; } #endif /* HAVE_SSE2_INLINE */ } | 10,707 |
0 | static int usb_bt_initfn(USBDevice *dev) { struct USBBtState *s = DO_UPCAST(struct USBBtState, dev, dev); s->dev.speed = USB_SPEED_HIGH; return 0; } | 10,708 |
0 | static void ppc_hash64_set_dsi(CPUState *cs, CPUPPCState *env, uint64_t dar, uint64_t dsisr) { bool vpm; if (msr_dr) { vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); } else { vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); } if (vpm && !msr_hv) { cs->exception_index = POWERPC_EXCP_HDSI; env->spr[SPR_HDAR] = dar; env->spr[SPR_HDSISR] = dsisr; } else { cs->exception_index = POWERPC_EXCP_DSI; env->spr[SPR_DAR] = dar; env->spr[SPR_DSISR] = dsisr; } env->error_code = 0; } | 10,709 |
0 | static void cpu_ppc_decr_cb(void *opaque) { PowerPCCPU *cpu = opaque; _cpu_ppc_store_decr(cpu, 0x00000000, 0xFFFFFFFF, 1); } | 10,710 |
0 | static int vmdk_open_vmdk3(BlockDriverState *bs, BlockDriverState *file, int flags) { int ret; uint32_t magic; VMDK3Header header; VmdkExtent *extent; ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header)); if (ret < 0) { return ret; } extent = vmdk_add_extent(bs, bs->file, false, le32_to_cpu(header.disk_sectors), le32_to_cpu(header.l1dir_offset) << 9, 0, 1 << 6, 1 << 9, le32_to_cpu(header.granularity)); ret = vmdk_init_tables(bs, extent); if (ret) { /* free extent allocated by vmdk_add_extent */ vmdk_free_last_extent(bs); } return ret; } | 10,711 |
0 | static uint64_t mv88w8618_flashcfg_read(void *opaque, target_phys_addr_t offset, unsigned size) { mv88w8618_flashcfg_state *s = opaque; switch (offset) { case MP_FLASHCFG_CFGR0: return s->cfgr0; default: return 0; } } | 10,712 |
0 | static void qio_channel_socket_dgram_worker_free(gpointer opaque) { struct QIOChannelSocketDGramWorkerData *data = opaque; qapi_free_SocketAddressLegacy(data->localAddr); qapi_free_SocketAddressLegacy(data->remoteAddr); g_free(data); } | 10,713 |
0 | static QString *qstring_from_escaped_str(JSONParserContext *ctxt, QObject *token) { const char *ptr = token_get_value(token); QString *str; int double_quote = 1; if (*ptr == '"') { double_quote = 1; } else { double_quote = 0; } ptr++; str = qstring_new(); while (*ptr && ((double_quote && *ptr != '"') || (!double_quote && *ptr != '\''))) { if (*ptr == '\\') { ptr++; switch (*ptr) { case '"': qstring_append(str, "\""); ptr++; break; case '\'': qstring_append(str, "'"); ptr++; break; case '\\': qstring_append(str, "\\"); ptr++; break; case '/': qstring_append(str, "/"); ptr++; break; case 'b': qstring_append(str, "\b"); ptr++; break; case 'f': qstring_append(str, "\f"); ptr++; break; case 'n': qstring_append(str, "\n"); ptr++; break; case 'r': qstring_append(str, "\r"); ptr++; break; case 't': qstring_append(str, "\t"); ptr++; break; case 'u': { uint16_t unicode_char = 0; char utf8_char[4]; int i = 0; ptr++; for (i = 0; i < 4; i++) { if (qemu_isxdigit(*ptr)) { unicode_char |= hex2decimal(*ptr) << ((3 - i) * 4); } else { parse_error(ctxt, token, "invalid hex escape sequence in string"); goto out; } ptr++; } wchar_to_utf8(unicode_char, utf8_char, sizeof(utf8_char)); qstring_append(str, utf8_char); } break; default: parse_error(ctxt, token, "invalid escape sequence in string"); goto out; } } else { char dummy[2]; dummy[0] = *ptr++; dummy[1] = 0; qstring_append(str, dummy); } } return str; out: QDECREF(str); return NULL; } | 10,714 |
0 | static void init_types(void) { static int inited; int i; if (inited) { return; } for (i = 0; i < MODULE_INIT_MAX; i++) { TAILQ_INIT(&init_type_list[i]); } inited = 1; } | 10,715 |
0 | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, int n_start, int n_end, int *num, QCowL2Meta *m) { BDRVQcowState *s = bs->opaque; int l2_index, ret; uint64_t l2_offset, *l2_table; int64_t cluster_offset; unsigned int nb_clusters, i = 0; QCowL2Meta *old_alloc; ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); if (ret < 0) { return ret; } again: nb_clusters = size_to_clusters(s, n_end << 9); nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); cluster_offset = be64_to_cpu(l2_table[l2_index]); /* We keep all QCOW_OFLAG_COPIED clusters */ if (cluster_offset & QCOW_OFLAG_COPIED) { nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, &l2_table[l2_index], 0, 0); cluster_offset &= ~QCOW_OFLAG_COPIED; m->nb_clusters = 0; goto out; } /* for the moment, multiple compressed clusters are not managed */ if (cluster_offset & QCOW_OFLAG_COMPRESSED) nb_clusters = 1; /* how many available clusters ? */ while (i < nb_clusters) { i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, &l2_table[l2_index], i, 0); if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) { break; } i += count_contiguous_free_clusters(nb_clusters - i, &l2_table[l2_index + i]); if (i >= nb_clusters) { break; } cluster_offset = be64_to_cpu(l2_table[l2_index + i]); if ((cluster_offset & QCOW_OFLAG_COPIED) || (cluster_offset & QCOW_OFLAG_COMPRESSED)) break; } assert(i <= nb_clusters); nb_clusters = i; /* * Check if there already is an AIO write request in flight which allocates * the same cluster. In this case we need to wait until the previous * request has completed and updated the L2 table accordingly. */ QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { uint64_t end_offset = offset + nb_clusters * s->cluster_size; uint64_t old_offset = old_alloc->offset; uint64_t old_end_offset = old_alloc->offset + old_alloc->nb_clusters * s->cluster_size; if (end_offset < old_offset || offset > old_end_offset) { /* No intersection */ } else { if (offset < old_offset) { /* Stop at the start of a running allocation */ nb_clusters = (old_offset - offset) >> s->cluster_bits; } else { nb_clusters = 0; } if (nb_clusters == 0) { /* Wait for the dependency to complete. We need to recheck * the free/allocated clusters when we continue. */ qemu_co_mutex_unlock(&s->lock); qemu_co_queue_wait(&old_alloc->dependent_requests); qemu_co_mutex_lock(&s->lock); goto again; } } } if (!nb_clusters) { abort(); } QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); /* allocate a new cluster */ cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size); if (cluster_offset < 0) { ret = cluster_offset; goto fail; } /* save info needed for meta data update */ m->offset = offset; m->n_start = n_start; m->nb_clusters = nb_clusters; out: ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); if (ret < 0) { goto fail_put; } m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end); m->cluster_offset = cluster_offset; *num = m->nb_available - n_start; return 0; fail: qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); fail_put: QLIST_REMOVE(m, next_in_flight); return ret; } | 10,716 |
0 | static void test_validate_union_native_list(TestInputVisitorData *data, const void *unused) { UserDefNativeListUnion *tmp = NULL; Visitor *v; Error *err = NULL; v = validate_test_init(data, "{ 'type': 'integer', 'data' : [ 1, 2 ] }"); visit_type_UserDefNativeListUnion(v, &tmp, NULL, &err); g_assert(!err); qapi_free_UserDefNativeListUnion(tmp); } | 10,717 |
0 | static int delta_decode(uint8_t *dst, const uint8_t *src, int src_size, unsigned val, const int8_t *table) { uint8_t *dst0 = dst; while (src_size--) { uint8_t d = *src++; val = av_clip_uint8(val + table[d & 0xF]); *dst++ = val; val = av_clip_uint8(val + table[d >> 4]); *dst++ = val; } return dst-dst0; } | 10,718 |
0 | int css_do_ssch(SubchDev *sch, ORB *orb) { SCSW *s = &sch->curr_status.scsw; PMCW *p = &sch->curr_status.pmcw; int ret; if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { ret = -ENODEV; goto out; } if (s->ctrl & SCSW_STCTL_STATUS_PEND) { ret = -EINPROGRESS; goto out; } if (s->ctrl & (SCSW_FCTL_START_FUNC | SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { ret = -EBUSY; goto out; } /* If monitoring is active, update counter. */ if (channel_subsys.chnmon_active) { css_update_chnmon(sch); } sch->channel_prog = orb->cpa; /* Trigger the start function. */ s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); s->flags &= ~SCSW_FLAGS_MASK_PNO; do_subchannel_work(sch, orb); ret = 0; out: return ret; } | 10,719 |
0 | static void ehci_reset(void *opaque) { EHCIState *s = opaque; int i; USBDevice *devs[NB_PORTS]; trace_usb_ehci_reset(); /* * Do the detach before touching portsc, so that it correctly gets send to * us or to our companion based on PORTSC_POWNER before the reset. */ for(i = 0; i < NB_PORTS; i++) { devs[i] = s->ports[i].dev; if (devs[i]) { usb_attach(&s->ports[i], NULL); } } memset(&s->mmio[OPREGBASE], 0x00, MMIO_SIZE - OPREGBASE); s->usbcmd = NB_MAXINTRATE << USBCMD_ITC_SH; s->usbsts = USBSTS_HALT; s->astate = EST_INACTIVE; s->pstate = EST_INACTIVE; s->isoch_pause = -1; s->attach_poll_counter = 0; for(i = 0; i < NB_PORTS; i++) { if (s->companion_ports[i]) { s->portsc[i] = PORTSC_POWNER | PORTSC_PPOWER; } else { s->portsc[i] = PORTSC_PPOWER; } if (devs[i]) { usb_attach(&s->ports[i], devs[i]); } } ehci_queues_rip_all(s); } | 10,720 |
1 | DISAS_INSN(divl) { TCGv num; TCGv den; TCGv reg; uint16_t ext; ext = read_im16(env, s); if (ext & 0x87f8) { gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED); return; } num = DREG(ext, 12); reg = DREG(ext, 0); tcg_gen_mov_i32(QREG_DIV1, num); SRC_EA(env, den, OS_LONG, 0, NULL); tcg_gen_mov_i32(QREG_DIV2, den); if (ext & 0x0800) { gen_helper_divs(cpu_env, tcg_const_i32(0)); } else { gen_helper_divu(cpu_env, tcg_const_i32(0)); } if ((ext & 7) == ((ext >> 12) & 7)) { /* div */ tcg_gen_mov_i32 (reg, QREG_DIV1); } else { /* rem */ tcg_gen_mov_i32 (reg, QREG_DIV2); } set_cc_op(s, CC_OP_FLAGS); } | 10,724 |
1 | static void gen_dcbi(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else TCGv EA, val; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } EA = tcg_temp_new(); gen_set_access_type(ctx, ACCESS_CACHE); gen_addr_reg_index(ctx, EA); val = tcg_temp_new(); /* XXX: specification says this should be treated as a store by the MMU */ gen_qemu_ld8u(ctx, val, EA); gen_qemu_st8(ctx, val, EA); tcg_temp_free(val); tcg_temp_free(EA); #endif } | 10,725 |
1 | static int setup_common(char *argv[], int argv_sz) { memset(cur_ide, 0, sizeof(cur_ide)); return append_arg(0, argv, argv_sz, g_strdup("-nodefaults -display none")); } | 10,726 |
1 | static int encode_init(AVCodecContext *avctx) { HYuvContext *s = avctx->priv_data; int i, j, width, height; s->avctx= avctx; s->flags= avctx->flags; dsputil_init(&s->dsp, avctx); width= s->width= avctx->width; height= s->height= avctx->height; assert(width && height); avctx->extradata= av_mallocz(1024*30); avctx->stats_out= av_mallocz(1024*30); s->version=2; avctx->coded_frame= &s->picture; switch(avctx->pix_fmt){ case PIX_FMT_YUV420P: s->bitstream_bpp= 12; break; case PIX_FMT_YUV422P: s->bitstream_bpp= 16; break; default: av_log(avctx, AV_LOG_ERROR, "format not supported\n"); return -1; } avctx->bits_per_sample= s->bitstream_bpp; s->decorrelate= s->bitstream_bpp >= 24; s->predictor= avctx->prediction_method; s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; if(avctx->context_model==1){ s->context= avctx->context_model; if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){ av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n"); return -1; } }else s->context= 0; if(avctx->codec->id==CODEC_ID_HUFFYUV){ if(avctx->pix_fmt==PIX_FMT_YUV420P){ av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n"); return -1; } if(avctx->context_model){ av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n"); return -1; } if(s->interlaced != ( height > 288 )) av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n"); }else if(avctx->strict_std_compliance>=0){ av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodeable with future versions!!! Set vstrict=-1 to use it anyway.\n"); return -1; } ((uint8_t*)avctx->extradata)[0]= s->predictor; ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp; ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20; if(s->context) ((uint8_t*)avctx->extradata)[2]|= 0x40; ((uint8_t*)avctx->extradata)[3]= 0; s->avctx->extradata_size= 4; if(avctx->stats_in){ char *p= avctx->stats_in; for(i=0; i<3; i++) for(j=0; j<256; j++) s->stats[i][j]= 1; for(;;){ for(i=0; i<3; i++){ char *next; for(j=0; j<256; j++){ s->stats[i][j]+= strtol(p, &next, 0); if(next==p) return -1; p=next; } } if(p[0]==0 || p[1]==0 || p[2]==0) break; } }else{ for(i=0; i<3; i++) for(j=0; j<256; j++){ int d= FFMIN(j, 256-j); s->stats[i][j]= 100000000/(d+1); } } for(i=0; i<3; i++){ generate_len_table(s->len[i], s->stats[i], 256); if(generate_bits_table(s->bits[i], s->len[i])<0){ return -1; } s->avctx->extradata_size+= store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]); } if(s->context){ for(i=0; i<3; i++){ int pels = width*height / (i?40:10); for(j=0; j<256; j++){ int d= FFMIN(j, 256-j); s->stats[i][j]= pels/(d+1); } } }else{ for(i=0; i<3; i++) for(j=0; j<256; j++) s->stats[i][j]= 0; } // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced); s->picture_number=0; return 0; } | 10,731 |
1 | size_t qemu_file_set_rate_limit(QEMUFile *f, size_t new_rate) { if (f->set_rate_limit) return f->set_rate_limit(f->opaque, new_rate); return 0; } | 10,732 |
1 | static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd, ALSChannelData **cd, int *reverted, unsigned int offset, int c) { ALSChannelData *ch = cd[c]; unsigned int dep = 0; unsigned int channels = ctx->avctx->channels; if (reverted[c]) return 0; reverted[c] = 1; while (dep < channels && !ch[dep].stop_flag) { revert_channel_correlation(ctx, bd, cd, reverted, offset, ch[dep].master_channel); dep++; } if (dep == channels) { av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel correlation.\n"); return AVERROR_INVALIDDATA; } bd->const_block = ctx->const_block + c; bd->shift_lsbs = ctx->shift_lsbs + c; bd->opt_order = ctx->opt_order + c; bd->store_prev_samples = ctx->store_prev_samples + c; bd->use_ltp = ctx->use_ltp + c; bd->ltp_lag = ctx->ltp_lag + c; bd->ltp_gain = ctx->ltp_gain[c]; bd->lpc_cof = ctx->lpc_cof[c]; bd->quant_cof = ctx->quant_cof[c]; bd->raw_samples = ctx->raw_samples[c] + offset; dep = 0; while (!ch[dep].stop_flag) { unsigned int smp; unsigned int begin = 1; unsigned int end = bd->block_length - 1; int64_t y; int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset; if (ch[dep].time_diff_flag) { int t = ch[dep].time_diff_index; if (ch[dep].time_diff_sign) { t = -t; begin -= t; } else { end -= t; } for (smp = begin; smp < end; smp++) { y = (1 << 6) + MUL64(ch[dep].weighting[0], master[smp - 1 ]) + MUL64(ch[dep].weighting[1], master[smp ]) + MUL64(ch[dep].weighting[2], master[smp + 1 ]) + MUL64(ch[dep].weighting[3], master[smp - 1 + t]) + MUL64(ch[dep].weighting[4], master[smp + t]) + MUL64(ch[dep].weighting[5], master[smp + 1 + t]); bd->raw_samples[smp] += y >> 7; } } else { for (smp = begin; smp < end; smp++) { y = (1 << 6) + MUL64(ch[dep].weighting[0], master[smp - 1]) + MUL64(ch[dep].weighting[1], master[smp ]) + MUL64(ch[dep].weighting[2], master[smp + 1]); bd->raw_samples[smp] += y >> 7; } } dep++; } return 0; } | 10,733 |
1 | const char *cpu_parse_cpu_model(const char *typename, const char *cpu_model) { ObjectClass *oc; CPUClass *cc; Error *err = NULL; gchar **model_pieces; const char *cpu_type; model_pieces = g_strsplit(cpu_model, ",", 2); oc = cpu_class_by_name(typename, model_pieces[0]); if (oc == NULL) { g_strfreev(model_pieces); return NULL; } cpu_type = object_class_get_name(oc); cc = CPU_CLASS(oc); cc->parse_features(cpu_type, model_pieces[1], &err); g_strfreev(model_pieces); if (err != NULL) { error_report_err(err); return NULL; } return cpu_type; } | 10,734 |
0 | static void find_peaks(DCAEncContext *c) { int band, ch; for (band = 0; band < 32; band++) for (ch = 0; ch < c->fullband_channels; ch++) { int sample; int32_t m = 0; for (sample = 0; sample < SUBBAND_SAMPLES; sample++) { int32_t s = abs(c->subband[sample][band][ch]); if (m < s) m = s; } c->peak_cb[band][ch] = get_cb(m); } if (c->lfe_channel) { int sample; int32_t m = 0; for (sample = 0; sample < DCA_LFE_SAMPLES; sample++) if (m < abs(c->downsampled_lfe[sample])) m = abs(c->downsampled_lfe[sample]); c->lfe_peak_cb = get_cb(m); } } | 10,737 |
1 | void av_free(void *ptr) { #if CONFIG_MEMALIGN_HACK if (ptr) free((char *)ptr - ((char *)ptr)[-1]); #elif HAVE_ALIGNED_MALLOC _aligned_free(ptr); #else free(ptr); #endif } | 10,740 |
1 | void do_addeo (void) { T2 = T0; T0 += T1 + xer_ca; if (likely(!(T0 < T2 || (xer_ca == 1 && T0 == T2)))) { xer_ca = 0; } else { xer_ca = 1; } if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) { xer_ov = 0; } else { xer_so = 1; xer_ov = 1; } } | 10,742 |
1 | static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b) { TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(tmp1, a); dead_tmp(a); tcg_gen_extu_i32_i64(tmp2, b); dead_tmp(b); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_temp_free_i64(tmp2); return tmp1; } | 10,743 |
1 | static int tta_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { TTAContext *c = s->priv_data; AVStream *st = s->streams[stream_index]; int index = av_index_search_timestamp(st, timestamp, flags); if (index < 0) return -1; c->currentframe = index; avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET); return 0; } | 10,745 |
1 | static void quantize_and_encode_band_cost_UQUAD_mips(struct AACEncContext *s, PutBitContext *pb, const float *in, float *out, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, const float ROUNDING) { const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; int i; int qc1, qc2, qc3, qc4; uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1]; uint16_t *p_codes = (uint16_t *)ff_aac_spectral_codes[cb-1]; float *p_vec = (float *)ff_aac_codebook_vectors[cb-1]; abs_pow34_v(s->scoefs, in, size); scaled = s->scoefs; for (i = 0; i < size; i += 4) { int curidx, sign, count; int *in_int = (int *)&in[i]; uint8_t v_bits; unsigned int v_codes; int t0, t1, t2, t3, t4; const float *vec; qc1 = scaled[i ] * Q34 + ROUND_STANDARD; qc2 = scaled[i+1] * Q34 + ROUND_STANDARD; qc3 = scaled[i+2] * Q34 + ROUND_STANDARD; qc4 = scaled[i+3] * Q34 + ROUND_STANDARD; __asm__ volatile ( ".set push \n\t" ".set noreorder \n\t" "ori %[t4], $zero, 2 \n\t" "ori %[sign], $zero, 0 \n\t" "slt %[t0], %[t4], %[qc1] \n\t" "slt %[t1], %[t4], %[qc2] \n\t" "slt %[t2], %[t4], %[qc3] \n\t" "slt %[t3], %[t4], %[qc4] \n\t" "movn %[qc1], %[t4], %[t0] \n\t" "movn %[qc2], %[t4], %[t1] \n\t" "movn %[qc3], %[t4], %[t2] \n\t" "movn %[qc4], %[t4], %[t3] \n\t" "lw %[t0], 0(%[in_int]) \n\t" "lw %[t1], 4(%[in_int]) \n\t" "lw %[t2], 8(%[in_int]) \n\t" "lw %[t3], 12(%[in_int]) \n\t" "slt %[t0], %[t0], $zero \n\t" "movn %[sign], %[t0], %[qc1] \n\t" "slt %[t1], %[t1], $zero \n\t" "slt %[t2], %[t2], $zero \n\t" "slt %[t3], %[t3], $zero \n\t" "sll %[t0], %[sign], 1 \n\t" "or %[t0], %[t0], %[t1] \n\t" "movn %[sign], %[t0], %[qc2] \n\t" "slt %[t4], $zero, %[qc1] \n\t" "slt %[t1], $zero, %[qc2] \n\t" "slt %[count], $zero, %[qc3] \n\t" "sll %[t0], %[sign], 1 \n\t" "or %[t0], %[t0], %[t2] \n\t" "movn %[sign], %[t0], %[qc3] \n\t" "slt %[t2], $zero, %[qc4] \n\t" "addu %[count], %[count], %[t4] \n\t" "addu %[count], %[count], %[t1] \n\t" "sll %[t0], %[sign], 1 \n\t" "or %[t0], %[t0], %[t3] \n\t" "movn %[sign], %[t0], %[qc4] \n\t" "addu %[count], %[count], %[t2] \n\t" ".set pop \n\t" : [qc1]"+r"(qc1), [qc2]"+r"(qc2), [qc3]"+r"(qc3), [qc4]"+r"(qc4), [sign]"=&r"(sign), [count]"=&r"(count), [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=&r"(t3), [t4]"=&r"(t4) : [in_int]"r"(in_int) : "memory" ); curidx = qc1; curidx *= 3; curidx += qc2; curidx *= 3; curidx += qc3; curidx *= 3; curidx += qc4; v_codes = (p_codes[curidx] << count) | (sign & ((1 << count) - 1)); v_bits = p_bits[curidx] + count; put_bits(pb, v_bits, v_codes); if (out) { vec = &p_vec[curidx*4]; out[i+0] = copysignf(vec[0] * IQ, in[i+0]); out[i+1] = copysignf(vec[1] * IQ, in[i+1]); out[i+2] = copysignf(vec[2] * IQ, in[i+2]); out[i+3] = copysignf(vec[3] * IQ, in[i+3]); } } } | 10,746 |
1 | void ff_aac_update_ltp(AACEncContext *s, SingleChannelElement *sce) { int i, j, lag; float corr, s0, s1, max_corr = 0.0f; float *samples = &s->planar_samples[s->cur_channel][1024]; float *pred_signal = &sce->ltp_state[0]; int samples_num = 2048; if (s->profile != FF_PROFILE_AAC_LTP) return; /* Calculate lag */ for (i = 0; i < samples_num; i++) { s0 = s1 = 0.0f; for (j = 0; j < samples_num; j++) { if (j + 1024 < i) continue; s0 += samples[j]*pred_signal[j-i+1024]; s1 += pred_signal[j-i+1024]*pred_signal[j-i+1024]; } corr = s1 > 0.0f ? s0/sqrt(s1) : 0.0f; if (corr > max_corr) { max_corr = corr; lag = i; } } lag = av_clip(lag, 0, 2048); /* 11 bits => 2^11 = 2048 */ if (!lag) { sce->ics.ltp.lag = lag; return; } s0 = s1 = 0.0f; for (i = 0; i < lag; i++) { s0 += samples[i]; s1 += pred_signal[i-lag+1024]; } sce->ics.ltp.coef_idx = quant_array_idx(s0/s1, ltp_coef, 8); sce->ics.ltp.coef = ltp_coef[sce->ics.ltp.coef_idx]; /* Predict the new samples */ if (lag < 1024) samples_num = lag + 1024; for (i = 0; i < samples_num; i++) pred_signal[i+1024] = sce->ics.ltp.coef*pred_signal[i-lag+1024]; memset(&pred_signal[samples_num], 0, (2048 - samples_num)*sizeof(float)); sce->ics.ltp.lag = lag; } | 10,747 |
1 | static void z2_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; MemoryRegion *address_space_mem = get_system_memory(); uint32_t sector_len = 0x10000; PXA2xxState *mpu; DriveInfo *dinfo; int be; void *z2_lcd; I2CBus *bus; DeviceState *wm; if (!cpu_model) { cpu_model = "pxa270-c5"; } /* Setup CPU & memory */ mpu = pxa270_init(address_space_mem, z2_binfo.ram_size, cpu_model); #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif dinfo = drive_get(IF_PFLASH, 0, 0); if (!dinfo && !qtest_enabled()) { fprintf(stderr, "Flash image must be given with the " "'pflash' parameter\n"); exit(1); } if (!pflash_cfi01_register(Z2_FLASH_BASE, NULL, "z2.flash0", Z2_FLASH_SIZE, dinfo ? dinfo->bdrv : NULL, sector_len, Z2_FLASH_SIZE / sector_len, 4, 0, 0, 0, 0, be)) { fprintf(stderr, "qemu: Error registering flash memory.\n"); exit(1); } /* setup keypad */ pxa27x_register_keypad(mpu->kp, map, 0x100); /* MMC/SD host */ pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mpu->gpio, Z2_GPIO_SD_DETECT)); type_register_static(&zipit_lcd_info); type_register_static(&aer915_info); z2_lcd = ssi_create_slave(mpu->ssp[1], "zipit-lcd"); bus = pxa2xx_i2c_bus(mpu->i2c[0]); i2c_create_slave(bus, TYPE_AER915, 0x55); wm = i2c_create_slave(bus, "wm8750", 0x1b); mpu->i2s->opaque = wm; mpu->i2s->codec_out = wm8750_dac_dat; mpu->i2s->codec_in = wm8750_adc_dat; wm8750_data_req_set(wm, mpu->i2s->data_req, mpu->i2s); qdev_connect_gpio_out(mpu->gpio, Z2_GPIO_LCD_CS, qemu_allocate_irqs(z2_lcd_cs, z2_lcd, 1)[0]); z2_binfo.kernel_filename = kernel_filename; z2_binfo.kernel_cmdline = kernel_cmdline; z2_binfo.initrd_filename = initrd_filename; z2_binfo.board_id = 0x6dd; arm_load_kernel(mpu->cpu, &z2_binfo); } | 10,748 |
0 | void ff_put_h264_qpel16_mc20_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hz_16w_msa(src - 2, stride, dst, stride, 16); } | 10,749 |
0 | void avformat_free_context(AVFormatContext *s) { int i; AVStream *st; av_opt_free(s); if (s->iformat && s->iformat->priv_class && s->priv_data) av_opt_free(s->priv_data); for(i=0;i<s->nb_streams;i++) { /* free all data in a stream component */ st = s->streams[i]; if (st->parser) { av_parser_close(st->parser); av_free_packet(&st->cur_pkt); } if (st->attached_pic.data) av_free_packet(&st->attached_pic); av_dict_free(&st->metadata); av_free(st->index_entries); av_free(st->codec->extradata); av_free(st->codec->subtitle_header); av_free(st->codec); av_free(st->priv_data); av_free(st->info); av_free(st); } for(i=s->nb_programs-1; i>=0; i--) { av_dict_free(&s->programs[i]->metadata); av_freep(&s->programs[i]->stream_index); av_freep(&s->programs[i]); } av_freep(&s->programs); av_freep(&s->priv_data); while(s->nb_chapters--) { av_dict_free(&s->chapters[s->nb_chapters]->metadata); av_free(s->chapters[s->nb_chapters]); } av_freep(&s->chapters); av_dict_free(&s->metadata); av_freep(&s->streams); av_free(s); } | 10,752 |
0 | static int encode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]) { int x, y, p, i; const int ring_size = s->avctx->context_model ? 3 : 2; int16_t *sample[4][3]; int lbd = s->bits_per_raw_sample <= 8; int bits = s->bits_per_raw_sample > 0 ? s->bits_per_raw_sample : 8; int offset = 1 << bits; s->run_index = 0; memset(s->sample_buffer, 0, ring_size * MAX_PLANES * (w + 6) * sizeof(*s->sample_buffer)); for (y = 0; y < h; y++) { for (i = 0; i < ring_size; i++) for (p = 0; p < MAX_PLANES; p++) sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3; for (x = 0; x < w; x++) { int b, g, r, av_uninit(a); if (lbd) { unsigned v = *((uint32_t*)(src[0] + x*4 + stride[0]*y)); b = v & 0xFF; g = (v >> 8) & 0xFF; r = (v >> 16) & 0xFF; a = v >> 24; } else { b = *((uint16_t*)(src[0] + x*2 + stride[0]*y)); g = *((uint16_t*)(src[1] + x*2 + stride[1]*y)); r = *((uint16_t*)(src[2] + x*2 + stride[2]*y)); } b -= g; r -= g; g += (b + r) >> 2; b += offset; r += offset; sample[0][0][x] = g; sample[1][0][x] = b; sample[2][0][x] = r; sample[3][0][x] = a; } for (p = 0; p < 3 + s->transparency; p++) { int ret; sample[p][0][-1] = sample[p][1][0 ]; sample[p][1][ w] = sample[p][1][w-1]; if (lbd) ret = encode_line(s, w, sample[p], (p + 1) / 2, 9); else ret = encode_line(s, w, sample[p], (p + 1) / 2, bits + 1); if (ret < 0) return ret; } } return 0; } | 10,753 |
0 | av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter) { int i; int usesVFilter, usesHFilter; int unscaled; SwsFilter dummyFilter = { NULL, NULL, NULL, NULL }; int srcW = c->srcW; int srcH = c->srcH; int dstW = c->dstW; int dstH = c->dstH; int dst_stride = FFALIGN(dstW * sizeof(int16_t) + 16, 16); int dst_stride_px = dst_stride >> 1; int flags, cpu_flags; enum PixelFormat srcFormat = c->srcFormat; enum PixelFormat dstFormat = c->dstFormat; cpu_flags = av_get_cpu_flags(); flags = c->flags; emms_c(); if (!rgb15to16) sws_rgb2rgb_init(); unscaled = (srcW == dstW && srcH == dstH); if (!sws_isSupportedInput(srcFormat)) { av_log(c, AV_LOG_ERROR, "%s is not supported as input pixel format\n", sws_format_name(srcFormat)); return AVERROR(EINVAL); } if (!sws_isSupportedOutput(dstFormat)) { av_log(c, AV_LOG_ERROR, "%s is not supported as output pixel format\n", sws_format_name(dstFormat)); return AVERROR(EINVAL); } i = flags & (SWS_POINT | SWS_AREA | SWS_BILINEAR | SWS_FAST_BILINEAR | SWS_BICUBIC | SWS_X | SWS_GAUSS | SWS_LANCZOS | SWS_SINC | SWS_SPLINE | SWS_BICUBLIN); if (!i || (i & (i - 1))) { av_log(c, AV_LOG_ERROR, "Exactly one scaler algorithm must be chosen\n"); return AVERROR(EINVAL); } /* sanity check */ if (srcW < 4 || srcH < 1 || dstW < 8 || dstH < 1) { /* FIXME check if these are enough and try to lower them after * fixing the relevant parts of the code */ av_log(c, AV_LOG_ERROR, "%dx%d -> %dx%d is invalid scaling dimension\n", srcW, srcH, dstW, dstH); return AVERROR(EINVAL); } if (!dstFilter) dstFilter = &dummyFilter; if (!srcFilter) srcFilter = &dummyFilter; c->lumXInc = (((int64_t)srcW << 16) + (dstW >> 1)) / dstW; c->lumYInc = (((int64_t)srcH << 16) + (dstH >> 1)) / dstH; c->dstFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[dstFormat]); c->srcFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[srcFormat]); c->vRounder = 4 * 0x0001000100010001ULL; usesVFilter = (srcFilter->lumV && srcFilter->lumV->length > 1) || (srcFilter->chrV && srcFilter->chrV->length > 1) || (dstFilter->lumV && dstFilter->lumV->length > 1) || (dstFilter->chrV && dstFilter->chrV->length > 1); usesHFilter = (srcFilter->lumH && srcFilter->lumH->length > 1) || (srcFilter->chrH && srcFilter->chrH->length > 1) || (dstFilter->lumH && dstFilter->lumH->length > 1) || (dstFilter->chrH && dstFilter->chrH->length > 1); getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat); getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat); /* reuse chroma for 2 pixels RGB/BGR unless user wants full * chroma interpolation */ if (flags & SWS_FULL_CHR_H_INT && isAnyRGB(dstFormat) && dstFormat != PIX_FMT_RGBA && dstFormat != PIX_FMT_ARGB && dstFormat != PIX_FMT_BGRA && dstFormat != PIX_FMT_ABGR && dstFormat != PIX_FMT_RGB24 && dstFormat != PIX_FMT_BGR24) { av_log(c, AV_LOG_ERROR, "full chroma interpolation for destination format '%s' not yet implemented\n", sws_format_name(dstFormat)); flags &= ~SWS_FULL_CHR_H_INT; c->flags = flags; } if (isAnyRGB(dstFormat) && !(flags & SWS_FULL_CHR_H_INT)) c->chrDstHSubSample = 1; // drop some chroma lines if the user wants it c->vChrDrop = (flags & SWS_SRC_V_CHR_DROP_MASK) >> SWS_SRC_V_CHR_DROP_SHIFT; c->chrSrcVSubSample += c->vChrDrop; /* drop every other pixel for chroma calculation unless user * wants full chroma */ if (isAnyRGB(srcFormat) && !(flags & SWS_FULL_CHR_H_INP) && srcFormat != PIX_FMT_RGB8 && srcFormat != PIX_FMT_BGR8 && srcFormat != PIX_FMT_RGB4 && srcFormat != PIX_FMT_BGR4 && srcFormat != PIX_FMT_RGB4_BYTE && srcFormat != PIX_FMT_BGR4_BYTE && ((dstW >> c->chrDstHSubSample) <= (srcW >> 1) || (flags & SWS_FAST_BILINEAR))) c->chrSrcHSubSample = 1; // Note the -((-x)>>y) is so that we always round toward +inf. c->chrSrcW = -((-srcW) >> c->chrSrcHSubSample); c->chrSrcH = -((-srcH) >> c->chrSrcVSubSample); c->chrDstW = -((-dstW) >> c->chrDstHSubSample); c->chrDstH = -((-dstH) >> c->chrDstVSubSample); /* unscaled special cases */ if (unscaled && !usesHFilter && !usesVFilter && (c->srcRange == c->dstRange || isAnyRGB(dstFormat))) { ff_get_unscaled_swscale(c); if (c->swScale) { if (flags & SWS_PRINT_INFO) av_log(c, AV_LOG_INFO, "using unscaled %s -> %s special converter\n", sws_format_name(srcFormat), sws_format_name(dstFormat)); return 0; } } c->srcBpc = 1 + av_pix_fmt_descriptors[srcFormat].comp[0].depth_minus1; if (c->srcBpc < 8) c->srcBpc = 8; c->dstBpc = 1 + av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1; if (c->dstBpc < 8) c->dstBpc = 8; if (c->dstBpc == 16) dst_stride <<= 1; FF_ALLOC_OR_GOTO(c, c->formatConvBuffer, (FFALIGN(srcW, 16) * 2 * FFALIGN(c->srcBpc, 8) >> 3) + 16, fail); if (HAVE_MMXEXT && HAVE_INLINE_ASM && cpu_flags & AV_CPU_FLAG_MMXEXT && c->srcBpc == 8 && c->dstBpc <= 10) { c->canMMX2BeUsed = (dstW >= srcW && (dstW & 31) == 0 && (srcW & 15) == 0) ? 1 : 0; if (!c->canMMX2BeUsed && dstW >= srcW && (srcW & 15) == 0 && (flags & SWS_FAST_BILINEAR)) { if (flags & SWS_PRINT_INFO) av_log(c, AV_LOG_INFO, "output width is not a multiple of 32 -> no MMX2 scaler\n"); } if (usesHFilter) c->canMMX2BeUsed = 0; } else c->canMMX2BeUsed = 0; c->chrXInc = (((int64_t)c->chrSrcW << 16) + (c->chrDstW >> 1)) / c->chrDstW; c->chrYInc = (((int64_t)c->chrSrcH << 16) + (c->chrDstH >> 1)) / c->chrDstH; /* Match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src * to pixel n-2 of dst, but only for the FAST_BILINEAR mode otherwise do * correct scaling. * n-2 is the last chrominance sample available. * This is not perfect, but no one should notice the difference, the more * correct variant would be like the vertical one, but that would require * some special code for the first and last pixel */ if (flags & SWS_FAST_BILINEAR) { if (c->canMMX2BeUsed) { c->lumXInc += 20; c->chrXInc += 20; } // we don't use the x86 asm scaler if MMX is available else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) { c->lumXInc = ((int64_t)(srcW - 2) << 16) / (dstW - 2) - 20; c->chrXInc = ((int64_t)(c->chrSrcW - 2) << 16) / (c->chrDstW - 2) - 20; } } /* precalculate horizontal scaler filter coefficients */ { #if HAVE_MMXEXT_INLINE // can't downscale !!! if (c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR)) { c->lumMmx2FilterCodeSize = initMMX2HScaler(dstW, c->lumXInc, NULL, NULL, NULL, 8); c->chrMmx2FilterCodeSize = initMMX2HScaler(c->chrDstW, c->chrXInc, NULL, NULL, NULL, 4); #ifdef MAP_ANONYMOUS c->lumMmx2FilterCode = mmap(NULL, c->lumMmx2FilterCodeSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); c->chrMmx2FilterCode = mmap(NULL, c->chrMmx2FilterCodeSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #elif HAVE_VIRTUALALLOC c->lumMmx2FilterCode = VirtualAlloc(NULL, c->lumMmx2FilterCodeSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE); c->chrMmx2FilterCode = VirtualAlloc(NULL, c->chrMmx2FilterCodeSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE); #else c->lumMmx2FilterCode = av_malloc(c->lumMmx2FilterCodeSize); c->chrMmx2FilterCode = av_malloc(c->chrMmx2FilterCodeSize); #endif if (!c->lumMmx2FilterCode || !c->chrMmx2FilterCode) return AVERROR(ENOMEM); FF_ALLOCZ_OR_GOTO(c, c->hLumFilter, (dstW / 8 + 8) * sizeof(int16_t), fail); FF_ALLOCZ_OR_GOTO(c, c->hChrFilter, (c->chrDstW / 4 + 8) * sizeof(int16_t), fail); FF_ALLOCZ_OR_GOTO(c, c->hLumFilterPos, (dstW / 2 / 8 + 8) * sizeof(int32_t), fail); FF_ALLOCZ_OR_GOTO(c, c->hChrFilterPos, (c->chrDstW / 2 / 4 + 8) * sizeof(int32_t), fail); initMMX2HScaler(dstW, c->lumXInc, c->lumMmx2FilterCode, c->hLumFilter, c->hLumFilterPos, 8); initMMX2HScaler(c->chrDstW, c->chrXInc, c->chrMmx2FilterCode, c->hChrFilter, c->hChrFilterPos, 4); #ifdef MAP_ANONYMOUS mprotect(c->lumMmx2FilterCode, c->lumMmx2FilterCodeSize, PROT_EXEC | PROT_READ); mprotect(c->chrMmx2FilterCode, c->chrMmx2FilterCodeSize, PROT_EXEC | PROT_READ); #endif } else #endif /* HAVE_MMXEXT_INLINE */ { const int filterAlign = (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) ? 4 : (HAVE_ALTIVEC && cpu_flags & AV_CPU_FLAG_ALTIVEC) ? 8 : 1; if (initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc, srcW, dstW, filterAlign, 1 << 14, (flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags, cpu_flags, srcFilter->lumH, dstFilter->lumH, c->param, 1) < 0) goto fail; if (initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc, c->chrSrcW, c->chrDstW, filterAlign, 1 << 14, (flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags, cpu_flags, srcFilter->chrH, dstFilter->chrH, c->param, 1) < 0) goto fail; } } // initialize horizontal stuff /* precalculate vertical scaler filter coefficients */ { const int filterAlign = (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) ? 2 : (HAVE_ALTIVEC && cpu_flags & AV_CPU_FLAG_ALTIVEC) ? 8 : 1; if (initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc, srcH, dstH, filterAlign, (1 << 12), (flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags, cpu_flags, srcFilter->lumV, dstFilter->lumV, c->param, 0) < 0) goto fail; if (initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc, c->chrSrcH, c->chrDstH, filterAlign, (1 << 12), (flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags, cpu_flags, srcFilter->chrV, dstFilter->chrV, c->param, 0) < 0) goto fail; #if HAVE_ALTIVEC FF_ALLOC_OR_GOTO(c, c->vYCoeffsBank, sizeof(vector signed short) * c->vLumFilterSize * c->dstH, fail); FF_ALLOC_OR_GOTO(c, c->vCCoeffsBank, sizeof(vector signed short) * c->vChrFilterSize * c->chrDstH, fail); for (i = 0; i < c->vLumFilterSize * c->dstH; i++) { int j; short *p = (short *)&c->vYCoeffsBank[i]; for (j = 0; j < 8; j++) p[j] = c->vLumFilter[i]; } for (i = 0; i < c->vChrFilterSize * c->chrDstH; i++) { int j; short *p = (short *)&c->vCCoeffsBank[i]; for (j = 0; j < 8; j++) p[j] = c->vChrFilter[i]; } #endif } // calculate buffer sizes so that they won't run out while handling these damn slices c->vLumBufSize = c->vLumFilterSize; c->vChrBufSize = c->vChrFilterSize; for (i = 0; i < dstH; i++) { int chrI = (int64_t)i * c->chrDstH / dstH; int nextSlice = FFMAX(c->vLumFilterPos[i] + c->vLumFilterSize - 1, ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1) << c->chrSrcVSubSample)); nextSlice >>= c->chrSrcVSubSample; nextSlice <<= c->chrSrcVSubSample; if (c->vLumFilterPos[i] + c->vLumBufSize < nextSlice) c->vLumBufSize = nextSlice - c->vLumFilterPos[i]; if (c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice >> c->chrSrcVSubSample)) c->vChrBufSize = (nextSlice >> c->chrSrcVSubSample) - c->vChrFilterPos[chrI]; } /* Allocate pixbufs (we use dynamic allocation because otherwise we would * need to allocate several megabytes to handle all possible cases) */ FF_ALLOC_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail); FF_ALLOC_OR_GOTO(c, c->chrUPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail); FF_ALLOC_OR_GOTO(c, c->chrVPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail); if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat)) FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail); /* Note we need at least one pixel more at the end because of the MMX code * (just in case someone wants to replace the 4000/8000). */ /* align at 16 bytes for AltiVec */ for (i = 0; i < c->vLumBufSize; i++) { FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i + c->vLumBufSize], dst_stride + 16, fail); c->lumPixBuf[i] = c->lumPixBuf[i + c->vLumBufSize]; } // 64 / (c->dstBpc & ~7) is the same as 16 / sizeof(scaling_intermediate) c->uv_off_px = dst_stride_px + 64 / (c->dstBpc & ~7); c->uv_off_byte = dst_stride + 16; for (i = 0; i < c->vChrBufSize; i++) { FF_ALLOC_OR_GOTO(c, c->chrUPixBuf[i + c->vChrBufSize], dst_stride * 2 + 32, fail); c->chrUPixBuf[i] = c->chrUPixBuf[i + c->vChrBufSize]; c->chrVPixBuf[i] = c->chrVPixBuf[i + c->vChrBufSize] = c->chrUPixBuf[i] + (dst_stride >> 1) + 8; } if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) for (i = 0; i < c->vLumBufSize; i++) { FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i + c->vLumBufSize], dst_stride + 16, fail); c->alpPixBuf[i] = c->alpPixBuf[i + c->vLumBufSize]; } // try to avoid drawing green stuff between the right end and the stride end for (i = 0; i < c->vChrBufSize; i++) memset(c->chrUPixBuf[i], 64, dst_stride * 2 + 1); assert(c->chrDstH <= dstH); if (flags & SWS_PRINT_INFO) { if (flags & SWS_FAST_BILINEAR) av_log(c, AV_LOG_INFO, "FAST_BILINEAR scaler, "); else if (flags & SWS_BILINEAR) av_log(c, AV_LOG_INFO, "BILINEAR scaler, "); else if (flags & SWS_BICUBIC) av_log(c, AV_LOG_INFO, "BICUBIC scaler, "); else if (flags & SWS_X) av_log(c, AV_LOG_INFO, "Experimental scaler, "); else if (flags & SWS_POINT) av_log(c, AV_LOG_INFO, "Nearest Neighbor / POINT scaler, "); else if (flags & SWS_AREA) av_log(c, AV_LOG_INFO, "Area Averaging scaler, "); else if (flags & SWS_BICUBLIN) av_log(c, AV_LOG_INFO, "luma BICUBIC / chroma BILINEAR scaler, "); else if (flags & SWS_GAUSS) av_log(c, AV_LOG_INFO, "Gaussian scaler, "); else if (flags & SWS_SINC) av_log(c, AV_LOG_INFO, "Sinc scaler, "); else if (flags & SWS_LANCZOS) av_log(c, AV_LOG_INFO, "Lanczos scaler, "); else if (flags & SWS_SPLINE) av_log(c, AV_LOG_INFO, "Bicubic spline scaler, "); else av_log(c, AV_LOG_INFO, "ehh flags invalid?! "); av_log(c, AV_LOG_INFO, "from %s to %s%s ", sws_format_name(srcFormat), #ifdef DITHER1XBPP dstFormat == PIX_FMT_BGR555 || dstFormat == PIX_FMT_BGR565 || dstFormat == PIX_FMT_RGB444BE || dstFormat == PIX_FMT_RGB444LE || dstFormat == PIX_FMT_BGR444BE || dstFormat == PIX_FMT_BGR444LE ? "dithered " : "", #else "", #endif sws_format_name(dstFormat)); if (HAVE_MMXEXT && cpu_flags & AV_CPU_FLAG_MMXEXT) av_log(c, AV_LOG_INFO, "using MMX2\n"); else if (HAVE_AMD3DNOW && cpu_flags & AV_CPU_FLAG_3DNOW) av_log(c, AV_LOG_INFO, "using 3DNOW\n"); else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) av_log(c, AV_LOG_INFO, "using MMX\n"); else if (HAVE_ALTIVEC && cpu_flags & AV_CPU_FLAG_ALTIVEC) av_log(c, AV_LOG_INFO, "using AltiVec\n"); else av_log(c, AV_LOG_INFO, "using C\n"); av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH); av_log(c, AV_LOG_DEBUG, "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc); av_log(c, AV_LOG_DEBUG, "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc); } c->swScale = ff_getSwsFunc(c); return 0; fail: // FIXME replace things by appropriate error codes return -1; } | 10,754 |
0 | static int vsink_query_formats(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; AVFilterFormats *formats = NULL; unsigned i; int ret; if (buf->pixel_fmts_size % sizeof(*buf->pixel_fmts)) { av_log(ctx, AV_LOG_ERROR, "Invalid size for format list\n"); return AVERROR(EINVAL); } if (buf->pixel_fmts_size) { for (i = 0; i < NB_ITEMS(buf->pixel_fmts); i++) if ((ret = ff_add_format(&formats, buf->pixel_fmts[i])) < 0) return ret; ff_set_common_formats(ctx, formats); } else { ff_default_query_formats(ctx); } return 0; } | 10,755 |
0 | av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha) { VP56Context *s = avctx->priv_data; int i; s->avctx = avctx; avctx->pix_fmt = has_alpha ? PIX_FMT_YUVA420P : PIX_FMT_YUV420P; if (avctx->idct_algo == FF_IDCT_AUTO) avctx->idct_algo = FF_IDCT_VP3; ff_dsputil_init(&s->dsp, avctx); ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id); ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct); for (i=0; i<4; i++) s->framep[i] = &s->frames[i]; s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN]; s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2]; s->edge_emu_buffer_alloc = NULL; s->above_blocks = NULL; s->macroblocks = NULL; s->quantizer = -1; s->deblock_filtering = 1; s->filter = NULL; s->has_alpha = has_alpha; if (flip) { s->flip = -1; s->frbi = 2; s->srbi = 0; } else { s->flip = 1; s->frbi = 0; s->srbi = 2; } } | 10,756 |
1 | static int sd_snapshot_delete(BlockDriverState *bs, const char *snapshot_id) { /* FIXME: Delete specified snapshot id. */ return 0; } | 10,757 |
1 | static int rso_read_packet(AVFormatContext *s, AVPacket *pkt) { int bps = av_get_bits_per_sample(s->streams[0]->codec->codec_id); int ret = av_get_packet(s->pb, pkt, BLOCK_SIZE * bps >> 3); if (ret < 0) return ret; pkt->stream_index = 0; /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret; return 0; } | 10,758 |
1 | static int vmd_read_header(AVFormatContext *s) { VmdDemuxContext *vmd = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = NULL, *vst; unsigned int toc_offset; unsigned char *raw_frame_table; int raw_frame_table_size; int64_t current_offset; int i, j; unsigned int total_frames; int64_t current_audio_pts = 0; unsigned char chunk[BYTES_PER_FRAME_RECORD]; int num, den; int sound_buffers; /* fetch the main header, including the 2 header length bytes */ avio_seek(pb, 0, SEEK_SET); if (avio_read(pb, vmd->vmd_header, VMD_HEADER_SIZE) != VMD_HEADER_SIZE) return AVERROR(EIO); if(vmd->vmd_header[24] == 'i' && vmd->vmd_header[25] == 'v' && vmd->vmd_header[26] == '3') vmd->is_indeo3 = 1; else vmd->is_indeo3 = 0; /* start up the decoders */ vst = avformat_new_stream(s, NULL); if (!vst) return AVERROR(ENOMEM); avpriv_set_pts_info(vst, 33, 1, 10); vmd->video_stream_index = vst->index; vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_id = vmd->is_indeo3 ? AV_CODEC_ID_INDEO3 : AV_CODEC_ID_VMDVIDEO; vst->codec->codec_tag = 0; /* no fourcc */ vst->codec->width = AV_RL16(&vmd->vmd_header[12]); vst->codec->height = AV_RL16(&vmd->vmd_header[14]); if(vmd->is_indeo3 && vst->codec->width > 320){ vst->codec->width >>= 1; vst->codec->height >>= 1; } vst->codec->extradata_size = VMD_HEADER_SIZE; vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE); /* if sample rate is 0, assume no audio */ vmd->sample_rate = AV_RL16(&vmd->vmd_header[804]); if (vmd->sample_rate) { st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); vmd->audio_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_VMDAUDIO; st->codec->codec_tag = 0; /* no fourcc */ if (vmd->vmd_header[811] & 0x80) { st->codec->channels = 2; st->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else { st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; } st->codec->sample_rate = vmd->sample_rate; st->codec->block_align = AV_RL16(&vmd->vmd_header[806]); if (st->codec->block_align & 0x8000) { st->codec->bits_per_coded_sample = 16; st->codec->block_align = -(st->codec->block_align - 0x10000); } else { st->codec->bits_per_coded_sample = 8; } st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_coded_sample * st->codec->channels; /* calculate pts */ num = st->codec->block_align; den = st->codec->sample_rate * st->codec->channels; av_reduce(&den, &num, den, num, (1UL<<31)-1); avpriv_set_pts_info(vst, 33, num, den); avpriv_set_pts_info(st, 33, num, den); } toc_offset = AV_RL32(&vmd->vmd_header[812]); vmd->frame_count = AV_RL16(&vmd->vmd_header[6]); vmd->frames_per_block = AV_RL16(&vmd->vmd_header[18]); avio_seek(pb, toc_offset, SEEK_SET); raw_frame_table = NULL; vmd->frame_table = NULL; sound_buffers = AV_RL16(&vmd->vmd_header[808]); raw_frame_table_size = vmd->frame_count * 6; if(vmd->frame_count * vmd->frames_per_block >= UINT_MAX / sizeof(vmd_frame) - sound_buffers){ av_log(s, AV_LOG_ERROR, "vmd->frame_count * vmd->frames_per_block too large\n"); return -1; } raw_frame_table = av_malloc(raw_frame_table_size); vmd->frame_table = av_malloc((vmd->frame_count * vmd->frames_per_block + sound_buffers) * sizeof(vmd_frame)); if (!raw_frame_table || !vmd->frame_table) { av_free(raw_frame_table); av_free(vmd->frame_table); return AVERROR(ENOMEM); } if (avio_read(pb, raw_frame_table, raw_frame_table_size) != raw_frame_table_size) { av_free(raw_frame_table); av_free(vmd->frame_table); return AVERROR(EIO); } total_frames = 0; for (i = 0; i < vmd->frame_count; i++) { current_offset = AV_RL32(&raw_frame_table[6 * i + 2]); /* handle each entry in index block */ for (j = 0; j < vmd->frames_per_block; j++) { int type; uint32_t size; avio_read(pb, chunk, BYTES_PER_FRAME_RECORD); type = chunk[0]; size = AV_RL32(&chunk[2]); if(!size && type != 1) continue; switch(type) { case 1: /* Audio Chunk */ if (!st) break; /* first audio chunk contains several audio buffers */ vmd->frame_table[total_frames].frame_offset = current_offset; vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index; vmd->frame_table[total_frames].frame_size = size; memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD); vmd->frame_table[total_frames].pts = current_audio_pts; total_frames++; if(!current_audio_pts) current_audio_pts += sound_buffers - 1; else current_audio_pts++; break; case 2: /* Video Chunk */ vmd->frame_table[total_frames].frame_offset = current_offset; vmd->frame_table[total_frames].stream_index = vmd->video_stream_index; vmd->frame_table[total_frames].frame_size = size; memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD); vmd->frame_table[total_frames].pts = i; total_frames++; break; } current_offset += size; } } av_free(raw_frame_table); vmd->current_frame = 0; vmd->frame_count = total_frames; return 0; } | 10,759 |
1 | static int read_f(BlockBackend *blk, int argc, char **argv) { struct timeval t1, t2; bool Cflag = false, qflag = false, vflag = false; bool Pflag = false, sflag = false, lflag = false, bflag = false; int c, cnt; char *buf; int64_t offset; int64_t count; /* Some compilers get confused and warn if this is not initialized. */ int64_t total = 0; int pattern = 0; int64_t pattern_offset = 0, pattern_count = 0; while ((c = getopt(argc, argv, "bCl:pP:qs:v")) != -1) { switch (c) { case 'b': bflag = true; break; case 'C': Cflag = true; break; case 'l': lflag = true; pattern_count = cvtnum(optarg); if (pattern_count < 0) { print_cvtnum_err(pattern_count, optarg); return 0; } break; case 'p': /* Ignored for backwards compatibility */ break; case 'P': Pflag = true; pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; case 'q': qflag = true; break; case 's': sflag = true; pattern_offset = cvtnum(optarg); if (pattern_offset < 0) { print_cvtnum_err(pattern_offset, optarg); return 0; } break; case 'v': vflag = true; break; default: return qemuio_command_usage(&read_cmd); } } if (optind != argc - 2) { return qemuio_command_usage(&read_cmd); } offset = cvtnum(argv[optind]); if (offset < 0) { print_cvtnum_err(offset, argv[optind]); return 0; } optind++; count = cvtnum(argv[optind]); if (count < 0) { print_cvtnum_err(count, argv[optind]); return 0; } else if (count > SIZE_MAX) { printf("length cannot exceed %" PRIu64 ", given %s\n", (uint64_t) SIZE_MAX, argv[optind]); return 0; } if (!Pflag && (lflag || sflag)) { return qemuio_command_usage(&read_cmd); } if (!lflag) { pattern_count = count - pattern_offset; } if ((pattern_count < 0) || (pattern_count + pattern_offset > count)) { printf("pattern verification range exceeds end of read data\n"); return 0; } if (bflag) { if (offset & 0x1ff) { printf("offset %" PRId64 " is not sector aligned\n", offset); return 0; } if (count & 0x1ff) { printf("count %"PRId64" is not sector aligned\n", count); return 0; } } buf = qemu_io_alloc(blk, count, 0xab); gettimeofday(&t1, NULL); if (bflag) { cnt = do_load_vmstate(blk, buf, offset, count, &total); } else { cnt = do_pread(blk, buf, offset, count, &total); } gettimeofday(&t2, NULL); if (cnt < 0) { printf("read failed: %s\n", strerror(-cnt)); goto out; } if (Pflag) { void *cmp_buf = g_malloc(pattern_count); memset(cmp_buf, pattern, pattern_count); if (memcmp(buf + pattern_offset, cmp_buf, pattern_count)) { printf("Pattern verification failed at offset %" PRId64 ", %"PRId64" bytes\n", offset + pattern_offset, pattern_count); } g_free(cmp_buf); } if (qflag) { goto out; } if (vflag) { dump_buffer(buf, offset, count); } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report("read", &t2, offset, count, total, cnt, Cflag); out: qemu_io_free(buf); return 0; } | 10,760 |
1 | PPC_OP(subfze) { T1 = ~T0; T0 = T1 + xer_ca; if (T0 < T1) { xer_ca = 1; } else { xer_ca = 0; } RETURN(); } | 10,761 |
0 | void ff_celp_lp_zero_synthesis_filterf(float *out, const float* filter_coeffs, const float* in, int buffer_length, int filter_length) { int i,n; // Avoids a +1 in the inner loop. filter_length++; for (n = 0; n < buffer_length; n++) { out[n] = in[n]; for (i = 1; i < filter_length; i++) out[n] += filter_coeffs[i-1] * in[n-i]; } } | 10,763 |
0 | static int RENAME(epzs_motion_search2)(MpegEncContext * s, int *mx_ptr, int *my_ptr, int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3], uint8_t *ref_data[3], int stride, int uvstride, int16_t (*last_mv)[2], int ref_mv_scale, uint8_t * const mv_penalty) { int best[2]={0, 0}; int d, dmin; const int shift= 1+s->quarter_sample; uint32_t *map= s->me.map; int map_generation; const int penalty_factor= s->me.penalty_factor; const int size=0; //FIXME pass as arg const int h=8; const int ref_mv_stride= s->mb_stride; const int ref_mv_xy= s->mb_x + s->mb_y *ref_mv_stride; me_cmp_func cmp, chroma_cmp; LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; map_generation= update_map_generation(s); dmin = 1000000; //printf("%d %d %d %d //",xmin, ymin, xmax, ymax); /* first line */ if (s->first_slice_line) { CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) CHECK_MV(P_MV1[0]>>shift, P_MV1[1]>>shift) }else{ CHECK_MV(P_MV1[0]>>shift, P_MV1[1]>>shift) //FIXME try some early stop if(dmin>64*2){ CHECK_MV(P_MEDIAN[0]>>shift, P_MEDIAN[1]>>shift) CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) CHECK_MV(P_TOP[0]>>shift, P_TOP[1]>>shift) CHECK_MV(P_TOPRIGHT[0]>>shift, P_TOPRIGHT[1]>>shift) CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) } } if(dmin>64*4){ CHECK_CLIPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy+1][1]*ref_mv_scale + (1<<15))>>16) if(s->end_mb_y == s->mb_height || s->mb_y+1<s->end_mb_y) //FIXME replace at least with last_slice_line CHECK_CLIPED_MV((last_mv[ref_mv_xy+ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy+ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16) } if(s->me.dia_size==-1) dmin= RENAME(funny_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, pred_x, pred_y, penalty_factor, shift, map, map_generation, size, h, mv_penalty); else if(s->me.dia_size<-1) dmin= RENAME(sab_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, pred_x, pred_y, penalty_factor, shift, map, map_generation, size, h, mv_penalty); else if(s->me.dia_size<2) dmin= RENAME(small_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, pred_x, pred_y, penalty_factor, shift, map, map_generation, size, h, mv_penalty); else dmin= RENAME(var_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, pred_x, pred_y, penalty_factor, shift, map, map_generation, size, h, mv_penalty); *mx_ptr= best[0]; *my_ptr= best[1]; // printf("%d %d %d \n", best[0], best[1], dmin); return dmin; } | 10,764 |
0 | static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride) { const int index = size2index[log2h][log2w]; const int h = 1 << log2h; int code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table, BLOCK_TYPE_VLC_BITS, 1); uint16_t *start = f->last_frame_buffer; uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w); int ret; int scale = 1; unsigned dc = 0; if (code < 0 || code > 6 || log2w < 0) return AVERROR_INVALIDDATA; if (code == 1) { log2h--; if ((ret = decode_p_block(f, dst, src, log2w, log2h, stride)) < 0) return ret; return decode_p_block(f, dst + (stride << log2h), src + (stride << log2h), log2w, log2h, stride); } else if (code == 2) { log2w--; if ((ret = decode_p_block(f, dst , src, log2w, log2h, stride)) < 0) return ret; return decode_p_block(f, dst + (1 << log2w), src + (1 << log2w), log2w, log2h, stride); } else if (code == 6) { if (log2w) { dst[0] = bytestream2_get_le16(&f->g2); dst[1] = bytestream2_get_le16(&f->g2); } else { dst[0] = bytestream2_get_le16(&f->g2); dst[stride] = bytestream2_get_le16(&f->g2); } return 0; } if (code == 0) { src += f->mv[bytestream2_get_byte(&f->g)]; } else if (code == 3 && f->version >= 2) { return 0; } else if (code == 4) { src += f->mv[bytestream2_get_byte(&f->g)]; dc = bytestream2_get_le16(&f->g2); } else if (code == 5) { scale = 0; dc = bytestream2_get_le16(&f->g2); } if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return AVERROR_INVALIDDATA; } mcdc(dst, src, log2w, h, stride, scale, dc); return 0; } | 10,766 |
0 | static void achroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset, int column) { const int plane = s->desc->comp[component].plane; const int mirror = s->mirror; const int c1_linesize = in->linesize[(plane + 1) % s->ncomp]; const int c2_linesize = in->linesize[(plane + 2) % s->ncomp]; const int d1_linesize = out->linesize[(plane + 1) % s->ncomp]; const int d2_linesize = out->linesize[(plane + 2) % s->ncomp]; const int max = 255 - intensity; const int src_h = in->height; const int src_w = in->width; int x, y; if (column) { const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1); const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1); for (x = 0; x < src_w; x++) { const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp]; const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp]; uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset * d1_linesize; uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset * d2_linesize; uint8_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1); uint8_t * const d1 = (mirror ? d1_bottom_line : d1_data); uint8_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1); uint8_t * const d2 = (mirror ? d2_bottom_line : d2_data); for (y = 0; y < src_h; y++) { const int c1 = c1_data[x] - 128; const int c2 = c2_data[x] - 128; uint8_t *target; int p; for (p = 128 + c1; p < 128; p++) { target = d1 + x + d1_signed_linesize * p; update(target, max, 1); } for (p = 128 + c1 - 1; p > 128; p--) { target = d1 + x + d1_signed_linesize * p; update(target, max, 1); } for (p = 128 + c2; p < 128; p++) { target = d2 + x + d2_signed_linesize * p; update(target, max, 1); } for (p = 128 + c2 - 1; p > 128; p--) { target = d2 + x + d2_signed_linesize * p; update(target, max, 1); } c1_data += c1_linesize; c2_data += c2_linesize; d1_data += d1_linesize; d2_data += d2_linesize; } } } else { const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp]; const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp]; uint8_t *d0_data = out->data[plane] + offset; uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset; uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset; if (mirror) { d0_data += s->size - 1; d1_data += s->size - 1; d2_data += s->size - 1; } for (y = 0; y < src_h; y++) { for (x = 0; x < src_w; x++) { const int c1 = c1_data[x] - 128; const int c2 = c2_data[x] - 128; uint8_t *target; int p; for (p = 128 + c1; p < 128; p++) { if (mirror) target = d1_data - p; else target = d1_data + p; update(target, max, 1); } for (p = 128 + 1; p < 128 + c1; p++) { if (mirror) target = d1_data - p; else target = d1_data + p; update(target, max, 1); } for (p = 128 + c2; p < 128; p++) { if (mirror) target = d2_data - p; else target = d2_data + p; update(target, max, 1); } for (p = 128 + 1; p < 128 + c2; p++) { if (mirror) target = d2_data - p; else target = d2_data + p; update(target, max, 1); } } c1_data += c1_linesize; c2_data += c2_linesize; d1_data += d1_linesize; d2_data += d2_linesize; } } envelope(s, out, plane, (plane + 1) % s->ncomp); envelope(s, out, plane, (plane + 2) % s->ncomp); } | 10,768 |
0 | static int flac_probe(AVProbeData *p) { uint8_t *bufptr = p->buf; if(ff_id3v2_match(bufptr)) bufptr += ff_id3v2_tag_len(bufptr); if(memcmp(bufptr, "fLaC", 4)) return 0; else return AVPROBE_SCORE_MAX / 2; } | 10,769 |
0 | static av_always_inline void decode_dc_coeffs(GetBitContext *gb, DCTELEM *out, int blocks_per_slice) { DCTELEM prev_dc; int code, i, sign; OPEN_READER(re, gb); DECODE_CODEWORD(code, FIRST_DC_CB); prev_dc = TOSIGNED(code); out[0] = prev_dc; out += 64; // dc coeff for the next block code = 5; sign = 0; for (i = 1; i < blocks_per_slice; i++, out += 64) { DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6)]); if(code) sign ^= -(code & 1); else sign = 0; prev_dc += (((code + 1) >> 1) ^ sign) - sign; out[0] = prev_dc; } CLOSE_READER(re, gb); } | 10,770 |
1 | int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, VdpGetProcAddress *get_proc, unsigned flags) { VDPAUHWContext *hwctx; if (flags != 0) return AVERROR(EINVAL); if (av_reallocp(&avctx->hwaccel_context, sizeof(*hwctx))) return AVERROR(ENOMEM); hwctx = avctx->hwaccel_context; memset(hwctx, 0, sizeof(*hwctx)); hwctx->context.decoder = VDP_INVALID_HANDLE; hwctx->device = device; hwctx->get_proc_address = get_proc; hwctx->reset = 1; return 0; } | 10,771 |
1 | static int decode_ref_pic_marking(H264Context *h){ MpegEncContext * const s = &h->s; int i; if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields s->broken_link= get_bits1(&s->gb) -1; h->mmco[0].long_index= get_bits1(&s->gb) - 1; // current_long_term_idx if(h->mmco[0].long_index == -1) h->mmco_index= 0; else{ h->mmco[0].opcode= MMCO_LONG; h->mmco_index= 1; } }else{ if(get_bits1(&s->gb)){ // adaptive_ref_pic_marking_mode_flag for(i= 0; i<MAX_MMCO_COUNT; i++) { MMCOOpcode opcode= get_ue_golomb(&s->gb);; h->mmco[i].opcode= opcode; if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){ h->mmco[i].short_frame_num= (h->frame_num - get_ue_golomb(&s->gb) - 1) & ((1<<h->sps.log2_max_frame_num)-1); //FIXME fields /* if(h->mmco[i].short_frame_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_frame_num ] == NULL){ av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %d\n", mmco); return -1; }*/ } if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){ h->mmco[i].long_index= get_ue_golomb(&s->gb); if(/*h->mmco[i].long_index >= h->long_ref_count || h->long_ref[ h->mmco[i].long_index ] == NULL*/ h->mmco[i].long_index >= 16){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode); return -1; } } if(opcode > MMCO_LONG){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode); return -1; } if(opcode == MMCO_END) break; } h->mmco_index= i; }else{ assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count); if(h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count){ //FIXME fields h->mmco[0].opcode= MMCO_SHORT2UNUSED; h->mmco[0].short_frame_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num; h->mmco_index= 1; }else h->mmco_index= 0; } } return 0; } | 10,774 |
1 | static DeviceState *sbi_init(target_phys_addr_t addr, qemu_irq **parent_irq) { DeviceState *dev; SysBusDevice *s; unsigned int i; dev = qdev_create(NULL, "sbi"); qdev_init(dev); s = sysbus_from_qdev(dev); for (i = 0; i < MAX_CPUS; i++) { sysbus_connect_irq(s, i, *parent_irq[i]); } sysbus_mmio_map(s, 0, addr); return dev; } | 10,775 |
1 | void qdist_add(struct qdist *dist, double x, long count) { struct qdist_entry *entry = NULL; if (dist->n) { struct qdist_entry e; e.x = x; entry = bsearch(&e, dist->entries, dist->n, sizeof(e), qdist_cmp); } if (entry) { entry->count += count; return; } if (unlikely(dist->n == dist->size)) { dist->size *= 2; dist->entries = g_realloc(dist->entries, sizeof(*dist->entries) * (dist->size)); } dist->n++; entry = &dist->entries[dist->n - 1]; entry->x = x; entry->count = count; qsort(dist->entries, dist->n, sizeof(*entry), qdist_cmp); } | 10,776 |
0 | static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; tc[2] = tc0_table[index_a][bS[2]]; tc[3] = tc0_table[index_a][bS[3]]; h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta); } } | 10,777 |
0 | static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, int n, const uint8_t *scantable, int qp, int max_coeff){ MpegEncContext * const s = &h->s; const uint16_t *qmul= dequant_coeff[qp]; static const int coeff_token_table_index[17]= {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3}; int level[16], run[16]; int suffix_length, zeros_left, coeff_num, coeff_token, total_coeff, i, trailing_ones; //FIXME put trailing_onex into the context if(n == CHROMA_DC_BLOCK_INDEX){ coeff_token= get_vlc2(gb, chroma_dc_coeff_token_vlc.table, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 1); total_coeff= coeff_token>>2; }else{ if(n == LUMA_DC_BLOCK_INDEX){ total_coeff= pred_non_zero_count(h, 0); coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2); total_coeff= coeff_token>>2; }else{ total_coeff= pred_non_zero_count(h, n); coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2); total_coeff= coeff_token>>2; h->non_zero_count_cache[ scan8[n] ]= total_coeff; } } //FIXME set last_non_zero? if(total_coeff==0) return 0; trailing_ones= coeff_token&3; tprintf("trailing:%d, total:%d\n", trailing_ones, total_coeff); assert(total_coeff<=16); for(i=0; i<trailing_ones; i++){ level[i]= 1 - 2*get_bits1(gb); } suffix_length= total_coeff > 10 && trailing_ones < 3; for(; i<total_coeff; i++){ const int prefix= get_level_prefix(gb); int level_code, mask; if(prefix<14){ //FIXME try to build a large unified VLC table for all this if(suffix_length) level_code= (prefix<<suffix_length) + get_bits(gb, suffix_length); //part else level_code= (prefix<<suffix_length); //part }else if(prefix==14){ if(suffix_length) level_code= (prefix<<suffix_length) + get_bits(gb, suffix_length); //part else level_code= prefix + get_bits(gb, 4); //part }else if(prefix==15){ level_code= (prefix<<suffix_length) + get_bits(gb, 12); //part if(suffix_length==0) level_code+=15; //FIXME doesnt make (much)sense }else{ av_log(h->s.avctx, AV_LOG_ERROR, "prefix too large at %d %d\n", s->mb_x, s->mb_y); return -1; } if(i==trailing_ones && i<3) level_code+= 2; //FIXME split first iteration mask= -(level_code&1); level[i]= (((2+level_code)>>1) ^ mask) - mask; if(suffix_length==0) suffix_length=1; //FIXME split first iteration #if 1 if(ABS(level[i]) > (3<<(suffix_length-1)) && suffix_length<6) suffix_length++; #else if((2+level_code)>>1) > (3<<(suffix_length-1)) && suffix_length<6) suffix_length++; ? == prefix > 2 or sth #endif tprintf("level: %d suffix_length:%d\n", level[i], suffix_length); } if(total_coeff == max_coeff) zeros_left=0; else{ if(n == CHROMA_DC_BLOCK_INDEX) zeros_left= get_vlc2(gb, chroma_dc_total_zeros_vlc[ total_coeff-1 ].table, CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 1); else zeros_left= get_vlc2(gb, total_zeros_vlc[ total_coeff-1 ].table, TOTAL_ZEROS_VLC_BITS, 1); } for(i=0; i<total_coeff-1; i++){ if(zeros_left <=0) break; else if(zeros_left < 7){ run[i]= get_vlc2(gb, run_vlc[zeros_left-1].table, RUN_VLC_BITS, 1); }else{ run[i]= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2); } zeros_left -= run[i]; } if(zeros_left<0){ av_log(h->s.avctx, AV_LOG_ERROR, "negative number of zero coeffs at %d %d\n", s->mb_x, s->mb_y); return -1; } for(; i<total_coeff-1; i++){ run[i]= 0; } run[i]= zeros_left; coeff_num=-1; if(n > 24){ for(i=total_coeff-1; i>=0; i--){ //FIXME merge into rundecode? int j; coeff_num += run[i] + 1; //FIXME add 1 earlier ? j= scantable[ coeff_num ]; block[j]= level[i]; } }else{ for(i=total_coeff-1; i>=0; i--){ //FIXME merge into rundecode? int j; coeff_num += run[i] + 1; //FIXME add 1 earlier ? j= scantable[ coeff_num ]; block[j]= level[i] * qmul[j]; // printf("%d %d ", block[j], qmul[j]); } } return 0; } | 10,779 |
1 | static void spapr_phb_vfio_finish_realize(sPAPRPHBState *sphb, Error **errp) { sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb); struct vfio_iommu_spapr_tce_info info = { .argsz = sizeof(info) }; int ret; sPAPRTCETable *tcet; uint32_t liobn = svphb->phb.dma_liobn; if (svphb->iommugroupid == -1) { error_setg(errp, "Wrong IOMMU group ID %d", svphb->iommugroupid); return; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_CHECK_EXTENSION, (void *) VFIO_SPAPR_TCE_IOMMU); if (ret != 1) { error_setg_errno(errp, -ret, "spapr-vfio: SPAPR extension is not supported"); return; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); if (ret) { error_setg_errno(errp, -ret, "spapr-vfio: get info from container failed"); return; } tcet = spapr_tce_new_table(DEVICE(sphb), liobn, info.dma32_window_start, SPAPR_TCE_PAGE_SHIFT, info.dma32_window_size >> SPAPR_TCE_PAGE_SHIFT, true); if (!tcet) { error_setg(errp, "spapr-vfio: failed to create VFIO TCE table"); return; } /* Register default 32bit DMA window */ memory_region_add_subregion(&sphb->iommu_root, tcet->bus_offset, spapr_tce_get_iommu(tcet)); } | 10,780 |
1 | static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) { return bdrv_co_flush(bs->backing->bs); | 10,781 |
1 | static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { AVStream *st = c->fc->streams[c->fc->nb_streams-1]; MOVStreamContext *sc = (MOVStreamContext *)st->priv_data; int entries, i; print_atom("stss", atom); get_byte(pb); /* version */ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */ entries = get_be32(pb); sc->keyframe_count = entries; #ifdef DEBUG av_log(NULL, AV_LOG_DEBUG, "keyframe_count = %ld\n", sc->keyframe_count); #endif sc->keyframes = (long*) av_malloc(entries * sizeof(long)); if (!sc->keyframes) return -1; for(i=0; i<entries; i++) { sc->keyframes[i] = get_be32(pb); #ifdef DEBUG /* av_log(NULL, AV_LOG_DEBUG, "keyframes[]=%ld\n", sc->keyframes[i]); */ #endif } return 0; } | 10,782 |
1 | static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt) { ConcatContext *cat = avf->priv_data; int ret; int64_t delta; while (1) { if ((ret = av_read_frame(cat->avf, pkt)) != AVERROR_EOF || (ret = open_next_file(avf)) < 0) break; } delta = av_rescale_q(cat->cur_file->start_time - cat->avf->start_time, AV_TIME_BASE_Q, cat->avf->streams[pkt->stream_index]->time_base); if (pkt->pts != AV_NOPTS_VALUE) pkt->pts += delta; if (pkt->dts != AV_NOPTS_VALUE) pkt->dts += delta; } | 10,784 |
1 | static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; int ret; qemu_co_mutex_lock(&s->lock); ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { qemu_co_mutex_unlock(&s->lock); return ret; } ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { qemu_co_mutex_unlock(&s->lock); return ret; } qemu_co_mutex_unlock(&s->lock); return 0; } | 10,785 |
1 | static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, int width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24); #else int i; for (i=0; i<width; i++) { int r= src[i*3+0]; int g= src[i*3+1]; int b= src[i*3+2]; dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); } #endif } | 10,786 |
1 | static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, int src_size) { const uint16_t *end; const uint16_t *mm_end; uint8_t *d = dst; const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); mm_end = end - 3; while (s < mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" "movq %1, %%mm2 \n\t" "pand %2, %%mm0 \n\t" "pand %3, %%mm1 \n\t" "pand %4, %%mm2 \n\t" "psllq $3, %%mm0 \n\t" "psrlq $3, %%mm1 \n\t" "psrlq $8, %%mm2 \n\t" PACK_RGB32 :"=m"(*d) :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r) :"memory"); d += 16; s += 4; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); while (s < end) { register uint16_t bgr; bgr = *s++; *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0xF800)>>8; *d++ = 255; } } | 10,787 |
1 | void helper_sysret(CPUX86State *env, int dflag) { int cpl, selector; if (!(env->efer & MSR_EFER_SCE)) { raise_exception_err(env, EXCP06_ILLOP, 0); } cpl = env->hflags & HF_CPL_MASK; if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { raise_exception_err(env, EXCP0D_GPF, 0); } selector = (env->star >> 48) & 0xffff; if (env->hflags & HF_LMA_MASK) { cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); if (dflag == 2) { cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); env->eip = env->regs[R_ECX]; } else { cpu_x86_load_seg_cache(env, R_CS, selector | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)env->regs[R_ECX]; } cpu_x86_load_seg_cache(env, R_SS, selector + 8, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } else { env->eflags |= IF_MASK; cpu_x86_load_seg_cache(env, R_CS, selector | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)env->regs[R_ECX]; cpu_x86_load_seg_cache(env, R_SS, selector + 8, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } } | 10,788 |
0 | static av_cold int adx_encode_init(AVCodecContext *avctx) { ADXContext *c = avctx->priv_data; if (avctx->channels > 2) return -1; avctx->frame_size = 32; avctx->coded_frame = avcodec_alloc_frame(); avctx->coded_frame->key_frame = 1; /* the cutoff can be adjusted, but this seems to work pretty well */ c->cutoff = 500; ff_adx_calculate_coeffs(c->cutoff, avctx->sample_rate, COEFF_BITS, c->coeff); return 0; } | 10,789 |
0 | static int mpegps_read_header(AVFormatContext *s, AVFormatParameters *ap) { MpegDemuxContext *m = s->priv_data; uint8_t buffer[8192]; char *p; m->header_state = 0xff; s->ctx_flags |= AVFMTCTX_NOHEADER; get_buffer(&s->pb, buffer, sizeof(buffer)); if ((p=memchr(buffer, 'S', sizeof(buffer)))) if (!memcmp(p, "Sofdec", 6)) m->sofdec = 1; url_fseek(&s->pb, -sizeof(buffer), SEEK_CUR); /* no need to do more */ return 0; } | 10,790 |
0 | int get_frame_filename(char *buf, int buf_size, const char *path, int number) { const char *p; char *q, buf1[20]; int nd, len, c, percentd_found; q = buf; p = path; percentd_found = 0; for(;;) { c = *p++; if (c == '\0') break; if (c == '%') { do { nd = 0; while (isdigit(*p)) { nd = nd * 10 + *p++ - '0'; } c = *p++; if (c == '*' && nd > 0) { // The nd field is actually the modulus number = number % nd; c = *p++; nd = 0; } } while (isdigit(c)); switch(c) { case '%': goto addchar; case 'd': if (percentd_found) goto fail; percentd_found = 1; snprintf(buf1, sizeof(buf1), "%0*d", nd, number); len = strlen(buf1); if ((q - buf + len) > buf_size - 1) goto fail; memcpy(q, buf1, len); q += len; break; default: goto fail; } } else { addchar: if ((q - buf) < buf_size - 1) *q++ = c; } } if (!percentd_found) goto fail; *q = '\0'; return 0; fail: *q = '\0'; return -1; } | 10,791 |
1 | static int ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx) { int mbn, blk, num_blocks, num_coeffs, blk_size, scan_pos, run, val, pos, is_intra, mc_type = 0, mv_x, mv_y, col_mask; uint8_t col_flags[8]; int32_t prev_dc, trvec[64]; uint32_t cbp, sym, lo, hi, quant, buf_offs, q; IVIMbInfo *mb; RVMapDesc *rvmap = band->rv_map; void (*mc_with_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); void (*mc_no_delta_func) (int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); const uint16_t *base_tab; const uint8_t *scale_tab; prev_dc = 0; /* init intra prediction for the DC coefficient */ blk_size = band->blk_size; col_mask = blk_size - 1; /* column mask for tracking non-zero coeffs */ num_blocks = (band->mb_size != blk_size) ? 4 : 1; /* number of blocks per mb */ num_coeffs = blk_size * blk_size; if (blk_size == 8) { mc_with_delta_func = ff_ivi_mc_8x8_delta; mc_no_delta_func = ff_ivi_mc_8x8_no_delta; } else { mc_with_delta_func = ff_ivi_mc_4x4_delta; mc_no_delta_func = ff_ivi_mc_4x4_no_delta; } for (mbn = 0, mb = tile->mbs; mbn < tile->num_MBs; mb++, mbn++) { is_intra = !mb->type; cbp = mb->cbp; buf_offs = mb->buf_offs; quant = av_clip(band->glob_quant + mb->q_delta, 0, 23); base_tab = is_intra ? band->intra_base : band->inter_base; scale_tab = is_intra ? band->intra_scale : band->inter_scale; if (scale_tab) quant = scale_tab[quant]; if (!is_intra) { mv_x = mb->mv_x; mv_y = mb->mv_y; if (band->is_halfpel) { mc_type = ((mv_y & 1) << 1) | (mv_x & 1); mv_x >>= 1; mv_y >>= 1; /* convert halfpel vectors into fullpel ones */ } if (mb->type) { int dmv_x, dmv_y, cx, cy; dmv_x = mb->mv_x >> band->is_halfpel; dmv_y = mb->mv_y >> band->is_halfpel; cx = mb->mv_x & band->is_halfpel; cy = mb->mv_y & band->is_halfpel; if ( mb->xpos + dmv_x < 0 || mb->xpos + dmv_x + band->mb_size + cx > band->pitch || mb->ypos + dmv_y < 0 || mb->ypos + dmv_y + band->mb_size + cy > band->aheight) { return AVERROR_INVALIDDATA; } } } for (blk = 0; blk < num_blocks; blk++) { /* adjust block position in the buffer according to its number */ if (blk & 1) { buf_offs += blk_size; } else if (blk == 2) { buf_offs -= blk_size; buf_offs += blk_size * band->pitch; } if (cbp & 1) { /* block coded ? */ if (!band->scan) { av_log(avctx, AV_LOG_ERROR, "Scan pattern is not set.\n"); return AVERROR_INVALIDDATA; } scan_pos = -1; memset(trvec, 0, num_coeffs*sizeof(trvec[0])); /* zero transform vector */ memset(col_flags, 0, sizeof(col_flags)); /* zero column flags */ while (scan_pos <= num_coeffs) { sym = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); if (sym == rvmap->eob_sym) break; /* End of block */ if (sym == rvmap->esc_sym) { /* Escape - run/val explicitly coded using 3 vlc codes */ run = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1) + 1; lo = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); hi = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); val = IVI_TOSIGNED((hi << 6) | lo); /* merge them and convert into signed val */ } else { if (sym >= 256U) { av_log(avctx, AV_LOG_ERROR, "Invalid sym encountered: %d.\n", sym); return -1; } run = rvmap->runtab[sym]; val = rvmap->valtab[sym]; } /* de-zigzag and dequantize */ scan_pos += run; if (scan_pos >= (unsigned)num_coeffs) break; pos = band->scan[scan_pos]; if (!val) av_dlog(avctx, "Val = 0 encountered!\n"); q = (base_tab[pos] * quant) >> 9; if (q > 1) val = val * q + FFSIGN(val) * (((q ^ 1) - 1) >> 1); trvec[pos] = val; col_flags[pos & col_mask] |= !!val; /* track columns containing non-zero coeffs */ }// while if (scan_pos >= num_coeffs && sym != rvmap->eob_sym) return -1; /* corrupt block data */ /* undoing DC coeff prediction for intra-blocks */ if (is_intra && band->is_2d_trans) { prev_dc += trvec[0]; trvec[0] = prev_dc; col_flags[0] |= !!prev_dc; } if(band->transform_size > band->blk_size){ av_log(NULL, AV_LOG_ERROR, "Too large transform\n"); return AVERROR_INVALIDDATA; } /* apply inverse transform */ band->inv_transform(trvec, band->buf + buf_offs, band->pitch, col_flags); /* apply motion compensation */ if (!is_intra) mc_with_delta_func(band->buf + buf_offs, band->ref_buf + buf_offs + mv_y * band->pitch + mv_x, band->pitch, mc_type); } else { /* block not coded */ /* for intra blocks apply the dc slant transform */ /* for inter - perform the motion compensation without delta */ if (is_intra) { band->dc_transform(&prev_dc, band->buf + buf_offs, band->pitch, blk_size); } else mc_no_delta_func(band->buf + buf_offs, band->ref_buf + buf_offs + mv_y * band->pitch + mv_x, band->pitch, mc_type); } cbp >>= 1; }// for blk }// for mbn align_get_bits(gb); return 0; } | 10,792 |
1 | static inline int handle_cpu_signal(uintptr_t pc, unsigned long address, int is_write, sigset_t *old_set) { CPUState *cpu; CPUClass *cc; int ret; #if defined(DEBUG_SIGNAL) printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", pc, address, is_write, *(unsigned long *)old_set); #endif /* XXX: locking issue */ if (is_write && h2g_valid(address)) { switch (page_unprotect(h2g(address), pc)) { case 0: /* Fault not caused by a page marked unwritable to protect * cached translations, must be the guest binary's problem */ break; case 1: /* Fault caused by protection of cached translation; TBs * invalidated, so resume execution */ return 1; case 2: /* Fault caused by protection of cached translation, and the * currently executing TB was modified and must be exited * immediately. */ cpu_exit_tb_from_sighandler(current_cpu, old_set); g_assert_not_reached(); default: g_assert_not_reached(); } } /* Convert forcefully to guest address space, invalid addresses are still valid segv ones */ address = h2g_nocheck(address); cpu = current_cpu; cc = CPU_GET_CLASS(cpu); /* see if it is an MMU fault */ g_assert(cc->handle_mmu_fault); ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX); if (ret < 0) { return 0; /* not an MMU fault */ } if (ret == 0) { return 1; /* the MMU fault was handled without causing real CPU fault */ } /* Now we have a real cpu fault. Since this is the exact location of * the exception, we must undo the adjustment done by cpu_restore_state * for handling call return addresses. */ cpu_restore_state(cpu, pc + GETPC_ADJ); sigprocmask(SIG_SETMASK, old_set, NULL); cpu_loop_exit(cpu); /* never comes here */ return 1; } | 10,793 |
1 | static void put_bool(QEMUFile *f, void *pv, size_t size) { bool *v = pv; qemu_put_byte(f, *v); } | 10,794 |
1 | static void uhci_frame_timer(void *opaque) { UHCIState *s = opaque; /* prepare the timer for the next frame */ s->expire_time += (get_ticks_per_sec() / FRAME_TIMER_FREQ); s->frame_bytes = 0; s->completions_only = false; qemu_bh_cancel(s->bh); if (!(s->cmd & UHCI_CMD_RS)) { /* Full stop */ trace_usb_uhci_schedule_stop(); qemu_del_timer(s->frame_timer); uhci_async_cancel_all(s); /* set hchalted bit in status - UHCI11D 2.1.2 */ s->status |= UHCI_STS_HCHALTED; return; } /* Process the current frame */ trace_usb_uhci_frame_start(s->frnum); uhci_async_validate_begin(s); uhci_process_frame(s); uhci_async_validate_end(s); /* The uhci spec says frnum reflects the frame currently being processed, * and the guest must look at frnum - 1 on interrupt, so inc frnum now */ s->frnum = (s->frnum + 1) & 0x7ff; /* Complete the previous frame */ if (s->pending_int_mask) { s->status2 |= s->pending_int_mask; s->status |= UHCI_STS_USBINT; uhci_update_irq(s); } s->pending_int_mask = 0; qemu_mod_timer(s->frame_timer, s->expire_time); } | 10,795 |
1 | static TCGv gen_vfp_mrs(void) { TCGv tmp = new_tmp(); tcg_gen_mov_i32(tmp, cpu_F0s); return tmp; } | 10,796 |
1 | static int epzs_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, int P[5][2], int pred_x, int pred_y, int xmin, int ymin, int xmax, int ymax) { int best[2]={0, 0}; int d, dmin; UINT8 *new_pic, *old_pic; const int pic_stride= s->linesize; const int pic_xy= (s->mb_y*pic_stride + s->mb_x)*16; UINT16 *mv_penalty= s->mv_penalty[s->f_code] + MAX_MV; // f_code of the prev frame int quant= s->qscale; // qscale of the prev frame const int shift= 1+s->quarter_sample; new_pic = s->new_picture[0] + pic_xy; old_pic = s->last_picture[0] + pic_xy; //printf("%d %d %d %d\n", xmin, ymin, xmax, ymax); dmin = pix_abs16x16(new_pic, old_pic, pic_stride, 16); if(dmin<Z_THRESHOLD){ *mx_ptr= 0; *my_ptr= 0; //printf("Z"); return dmin; } /* first line */ if ((s->mb_y == 0 || s->first_slice_line || s->first_gob_line)) { CHECK_MV(P[1][0]>>shift, P[1][1]>>shift) }else{ CHECK_MV(P[4][0]>>shift, P[4][1]>>shift) if(dmin<Z_THRESHOLD){ *mx_ptr= P[4][0]>>shift; *my_ptr= P[4][1]>>shift; //printf("M\n"); return dmin; } CHECK_MV(P[1][0]>>shift, P[1][1]>>shift) CHECK_MV(P[2][0]>>shift, P[2][1]>>shift) CHECK_MV(P[3][0]>>shift, P[3][1]>>shift) } CHECK_MV(P[0][0]>>shift, P[0][1]>>shift) dmin= small_diamond_search(s, best, dmin, new_pic, old_pic, pic_stride, pred_x, pred_y, mv_penalty, quant, xmin, ymin, xmax, ymax, shift); *mx_ptr= best[0]; *my_ptr= best[1]; // printf("%d %d %d \n", best[0], best[1], dmin); return dmin; } | 10,797 |
1 | void acpi_build(AcpiBuildTables *tables, MachineState *machine) { PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); GArray *table_offsets; unsigned facs, dsdt, rsdt, fadt; AcpiPmInfo pm; AcpiMiscInfo misc; AcpiMcfgInfo mcfg; Range pci_hole, pci_hole64; uint8_t *u; size_t aml_len = 0; GArray *tables_blob = tables->table_data; AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL }; acpi_get_pm_info(&pm); acpi_get_misc_info(&misc); acpi_get_pci_holes(&pci_hole, &pci_hole64); acpi_get_slic_oem(&slic_oem); table_offsets = g_array_new(false, true /* clear */, sizeof(uint32_t)); ACPI_BUILD_DPRINTF("init ACPI tables\n"); bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE, tables_blob, 64 /* Ensure FACS is aligned */, false /* high memory */); /* * FACS is pointed to by FADT. * We place it first since it's the only table that has alignment * requirements. */ facs = tables_blob->len; build_facs(tables_blob, tables->linker); /* DSDT is pointed to by FADT */ dsdt = tables_blob->len; build_dsdt(tables_blob, tables->linker, &pm, &misc, &pci_hole, &pci_hole64, machine); /* Count the size of the DSDT and SSDT, we will need it for legacy * sizing of ACPI tables. */ aml_len += tables_blob->len - dsdt; /* ACPI tables pointed to by RSDT */ fadt = tables_blob->len; acpi_add_table(table_offsets, tables_blob); build_fadt(tables_blob, tables->linker, &pm, facs, dsdt, slic_oem.id, slic_oem.table_id); aml_len += tables_blob->len - fadt; acpi_add_table(table_offsets, tables_blob); build_madt(tables_blob, tables->linker, pcms); if (misc.has_hpet) { acpi_add_table(table_offsets, tables_blob); build_hpet(tables_blob, tables->linker); } if (misc.tpm_version != TPM_VERSION_UNSPEC) { acpi_add_table(table_offsets, tables_blob); build_tpm_tcpa(tables_blob, tables->linker, tables->tcpalog); if (misc.tpm_version == TPM_VERSION_2_0) { acpi_add_table(table_offsets, tables_blob); build_tpm2(tables_blob, tables->linker); } } if (pcms->numa_nodes) { acpi_add_table(table_offsets, tables_blob); build_srat(tables_blob, tables->linker, machine); } if (acpi_get_mcfg(&mcfg)) { acpi_add_table(table_offsets, tables_blob); build_mcfg_q35(tables_blob, tables->linker, &mcfg); } if (x86_iommu_get_default()) { IommuType IOMMUType = x86_iommu_get_type(); if (IOMMUType == TYPE_AMD) { acpi_add_table(table_offsets, tables_blob); build_amd_iommu(tables_blob, tables->linker); } else if (IOMMUType == TYPE_INTEL) { acpi_add_table(table_offsets, tables_blob); build_dmar_q35(tables_blob, tables->linker); } } if (pcms->acpi_nvdimm_state.is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, pcms->acpi_nvdimm_state.dsm_mem, machine->ram_slots); } /* Add tables supplied by user (if any) */ for (u = acpi_table_first(); u; u = acpi_table_next(u)) { unsigned len = acpi_table_len(u); acpi_add_table(table_offsets, tables_blob); g_array_append_vals(tables_blob, u, len); } /* RSDT is pointed to by RSDP */ rsdt = tables_blob->len; build_rsdt(tables_blob, tables->linker, table_offsets, slic_oem.id, slic_oem.table_id); /* RSDP is in FSEG memory, so allocate it separately */ build_rsdp(tables->rsdp, tables->linker, rsdt); /* We'll expose it all to Guest so we want to reduce * chance of size changes. * * We used to align the tables to 4k, but of course this would * too simple to be enough. 4k turned out to be too small an * alignment very soon, and in fact it is almost impossible to * keep the table size stable for all (max_cpus, max_memory_slots) * combinations. So the table size is always 64k for pc-i440fx-2.1 * and we give an error if the table grows beyond that limit. * * We still have the problem of migrating from "-M pc-i440fx-2.0". For * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables * than 2.0 and we can always pad the smaller tables with zeros. We can * then use the exact size of the 2.0 tables. * * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration. */ if (pcmc->legacy_acpi_table_size) { /* Subtracting aml_len gives the size of fixed tables. Then add the * size of the PIIX4 DSDT/SSDT in QEMU 2.0. */ int legacy_aml_len = pcmc->legacy_acpi_table_size + ACPI_BUILD_LEGACY_CPU_AML_SIZE * max_cpus; int legacy_table_size = ROUND_UP(tables_blob->len - aml_len + legacy_aml_len, ACPI_BUILD_ALIGN_SIZE); if (tables_blob->len > legacy_table_size) { /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */ error_report("Warning: migration may not work."); } g_array_set_size(tables_blob, legacy_table_size); } else { /* Make sure we have a buffer in case we need to resize the tables. */ if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */ error_report("Warning: ACPI tables are larger than 64k."); error_report("Warning: migration may not work."); error_report("Warning: please remove CPUs, NUMA nodes, " "memory slots or PCI bridges."); } acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); } acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); /* Cleanup memory that's no longer used. */ g_array_free(table_offsets, true); } | 10,798 |
1 | void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width){ const int w2= (width+1)>>1; // SSE2 code runs faster with pointers aligned on a 32-byte boundary. DWTELEM temp_buf[(width>>1) + 4]; DWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2); const int w_l= (width>>1); const int w_r= w2 - 1; int i; { // Lift 0 DWTELEM * const ref = b + w2 - 1; DWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice // (the first time erroneously), we allow the SSE2 code to run an extra pass. // The savings in code and time are well worth having to store this value and // calculate b[0] correctly afterwards. i = 0; asm volatile( "pcmpeqd %%xmm7, %%xmm7 \n\t" "pslld $31, %%xmm7 \n\t" "psrld $29, %%xmm7 \n\t" ::); for(; i<w_l-7; i+=8){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" "movdqu 4(%1), %%xmm2 \n\t" "movdqu 20(%1), %%xmm6 \n\t" "paddd %%xmm1, %%xmm2 \n\t" "paddd %%xmm5, %%xmm6 \n\t" "movdqa %%xmm2, %%xmm0 \n\t" "movdqa %%xmm6, %%xmm4 \n\t" "paddd %%xmm2, %%xmm2 \n\t" "paddd %%xmm6, %%xmm6 \n\t" "paddd %%xmm0, %%xmm2 \n\t" "paddd %%xmm4, %%xmm6 \n\t" "paddd %%xmm7, %%xmm2 \n\t" "paddd %%xmm7, %%xmm6 \n\t" "psrad $3, %%xmm2 \n\t" "psrad $3, %%xmm6 \n\t" "movdqa (%0), %%xmm0 \n\t" "movdqa 16(%0), %%xmm4 \n\t" "psubd %%xmm2, %%xmm0 \n\t" "psubd %%xmm6, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); } { // Lift 1 DWTELEM * const dst = b+w2; i = 0; for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){ dst[i] = dst[i] - (b[i] + b[i + 1]); } for(; i<w_r-7; i+=8){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" "movdqu 4(%1), %%xmm2 \n\t" "movdqu 20(%1), %%xmm6 \n\t" "paddd %%xmm1, %%xmm2 \n\t" "paddd %%xmm5, %%xmm6 \n\t" "movdqa (%0), %%xmm0 \n\t" "movdqa 16(%0), %%xmm4 \n\t" "psubd %%xmm2, %%xmm0 \n\t" "psubd %%xmm6, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&dst[i]), "r"(&b[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); } { // Lift 2 DWTELEM * const ref = b+w2 - 1; DWTELEM b_0 = b[0]; i = 0; asm volatile( "pslld $1, %%xmm7 \n\t" ::); for(; i<w_l-7; i+=8){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" "movdqu 4(%1), %%xmm0 \n\t" "movdqu 20(%1), %%xmm4 \n\t" //FIXME try aligned reads and shifts "paddd %%xmm1, %%xmm0 \n\t" "paddd %%xmm5, %%xmm4 \n\t" "paddd %%xmm7, %%xmm0 \n\t" "paddd %%xmm7, %%xmm4 \n\t" "movdqa (%0), %%xmm1 \n\t" "movdqa 16(%0), %%xmm5 \n\t" "psrad $2, %%xmm0 \n\t" "psrad $2, %%xmm4 \n\t" "paddd %%xmm1, %%xmm0 \n\t" "paddd %%xmm5, %%xmm4 \n\t" "psrad $2, %%xmm0 \n\t" "psrad $2, %%xmm4 \n\t" "paddd %%xmm1, %%xmm0 \n\t" "paddd %%xmm5, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS); } { // Lift 3 DWTELEM * const src = b+w2; i = 0; for(; (((long)&temp[i]) & 0xF) && i<w_r; i++){ temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS); } for(; i<w_r-7; i+=8){ asm volatile( "movdqu 4(%1), %%xmm2 \n\t" "movdqu 20(%1), %%xmm6 \n\t" "paddd (%1), %%xmm2 \n\t" "paddd 16(%1), %%xmm6 \n\t" "movdqu (%0), %%xmm0 \n\t" "movdqu 16(%0), %%xmm4 \n\t" "paddd %%xmm2, %%xmm0 \n\t" "paddd %%xmm6, %%xmm4 \n\t" "psrad $1, %%xmm2 \n\t" "psrad $1, %%xmm6 \n\t" "paddd %%xmm0, %%xmm2 \n\t" "paddd %%xmm4, %%xmm6 \n\t" "movdqa %%xmm2, (%2) \n\t" "movdqa %%xmm6, 16(%2) \n\t" :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); } { snow_interleave_line_header(&i, width, b, temp); for (; (i & 0x1E) != 0x1E; i-=2){ b[i+1] = temp[i>>1]; b[i] = b[i>>1]; } for (i-=30; i>=0; i-=32){ asm volatile( "movdqa (%1), %%xmm0 \n\t" "movdqa 16(%1), %%xmm2 \n\t" "movdqa 32(%1), %%xmm4 \n\t" "movdqa 48(%1), %%xmm6 \n\t" "movdqa (%1), %%xmm1 \n\t" "movdqa 16(%1), %%xmm3 \n\t" "movdqa 32(%1), %%xmm5 \n\t" "movdqa 48(%1), %%xmm7 \n\t" "punpckldq (%2), %%xmm0 \n\t" "punpckldq 16(%2), %%xmm2 \n\t" "punpckldq 32(%2), %%xmm4 \n\t" "punpckldq 48(%2), %%xmm6 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm2, 32(%0) \n\t" "movdqa %%xmm4, 64(%0) \n\t" "movdqa %%xmm6, 96(%0) \n\t" "punpckhdq (%2), %%xmm1 \n\t" "punpckhdq 16(%2), %%xmm3 \n\t" "punpckhdq 32(%2), %%xmm5 \n\t" "punpckhdq 48(%2), %%xmm7 \n\t" "movdqa %%xmm1, 16(%0) \n\t" "movdqa %%xmm3, 48(%0) \n\t" "movdqa %%xmm5, 80(%0) \n\t" "movdqa %%xmm7, 112(%0) \n\t" :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1]) : "memory" ); } } } | 10,799 |
1 | PPC_OP(btest_T1) { if (T0) { regs->nip = T1 & ~3; } else { regs->nip = PARAM1; } RETURN(); } | 10,800 |
1 | static BlockDriverState *bdrv_open_inherit(const char *filename, const char *reference, QDict *options, int flags, BlockDriverState *parent, const BdrvChildRole *child_role, Error **errp) { int ret; BlockBackend *file = NULL; BlockDriverState *bs; BlockDriver *drv = NULL; const char *drvname; const char *backing; Error *local_err = NULL; QDict *snapshot_options = NULL; int snapshot_flags = 0; assert(!child_role || !flags); assert(!child_role == !parent); if (reference) { bool options_non_empty = options ? qdict_size(options) : false; QDECREF(options); if (filename || options_non_empty) { error_setg(errp, "Cannot reference an existing block device with " "additional options or a new filename"); return NULL; } bs = bdrv_lookup_bs(reference, reference, errp); if (!bs) { return NULL; } bdrv_ref(bs); return bs; } bs = bdrv_new(); /* NULL means an empty set of options */ if (options == NULL) { options = qdict_new(); } /* json: syntax counts as explicit options, as if in the QDict */ parse_json_protocol(options, &filename, &local_err); if (local_err) { goto fail; } bs->explicit_options = qdict_clone_shallow(options); if (child_role) { bs->inherits_from = parent; child_role->inherit_options(&flags, options, parent->open_flags, parent->options); } ret = bdrv_fill_options(&options, filename, &flags, &local_err); if (local_err) { goto fail; } /* * Set the BDRV_O_RDWR and BDRV_O_ALLOW_RDWR flags. * Caution: getting a boolean member of @options requires care. * When @options come from -blockdev or blockdev_add, members are * typed according to the QAPI schema, but when they come from * -drive, they're all QString. */ if (g_strcmp0(qdict_get_try_str(options, BDRV_OPT_READ_ONLY), "on") && !qdict_get_try_bool(options, BDRV_OPT_READ_ONLY, false)) { flags |= (BDRV_O_RDWR | BDRV_O_ALLOW_RDWR); } else { flags &= ~BDRV_O_RDWR; } if (flags & BDRV_O_SNAPSHOT) { snapshot_options = qdict_new(); bdrv_temp_snapshot_options(&snapshot_flags, snapshot_options, flags, options); /* Let bdrv_backing_options() override "read-only" */ qdict_del(options, BDRV_OPT_READ_ONLY); bdrv_backing_options(&flags, options, flags, options); } bs->open_flags = flags; bs->options = options; options = qdict_clone_shallow(options); /* Find the right image format driver */ /* See cautionary note on accessing @options above */ drvname = qdict_get_try_str(options, "driver"); if (drvname) { drv = bdrv_find_format(drvname); if (!drv) { error_setg(errp, "Unknown driver: '%s'", drvname); goto fail; } } assert(drvname || !(flags & BDRV_O_PROTOCOL)); /* See cautionary note on accessing @options above */ backing = qdict_get_try_str(options, "backing"); if (backing && *backing == '\0') { flags |= BDRV_O_NO_BACKING; qdict_del(options, "backing"); } /* Open image file without format layer. This BlockBackend is only used for * probing, the block drivers will do their own bdrv_open_child() for the * same BDS, which is why we put the node name back into options. */ if ((flags & BDRV_O_PROTOCOL) == 0) { BlockDriverState *file_bs; file_bs = bdrv_open_child_bs(filename, options, "file", bs, &child_file, true, &local_err); if (local_err) { goto fail; } if (file_bs != NULL) { file = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); blk_insert_bs(file, file_bs, &local_err); bdrv_unref(file_bs); if (local_err) { goto fail; } qdict_put_str(options, "file", bdrv_get_node_name(file_bs)); } } /* Image format probing */ bs->probed = !drv; if (!drv && file) { ret = find_image_format(file, filename, &drv, &local_err); if (ret < 0) { goto fail; } /* * This option update would logically belong in bdrv_fill_options(), * but we first need to open bs->file for the probing to work, while * opening bs->file already requires the (mostly) final set of options * so that cache mode etc. can be inherited. * * Adding the driver later is somewhat ugly, but it's not an option * that would ever be inherited, so it's correct. We just need to make * sure to update both bs->options (which has the full effective * options for bs) and options (which has file.* already removed). */ qdict_put_str(bs->options, "driver", drv->format_name); qdict_put_str(options, "driver", drv->format_name); } else if (!drv) { error_setg(errp, "Must specify either driver or file"); goto fail; } /* BDRV_O_PROTOCOL must be set iff a protocol BDS is about to be created */ assert(!!(flags & BDRV_O_PROTOCOL) == !!drv->bdrv_file_open); /* file must be NULL if a protocol BDS is about to be created * (the inverse results in an error message from bdrv_open_common()) */ assert(!(flags & BDRV_O_PROTOCOL) || !file); /* Open the image */ ret = bdrv_open_common(bs, file, options, &local_err); if (ret < 0) { goto fail; } if (file) { blk_unref(file); file = NULL; } /* If there is a backing file, use it */ if ((flags & BDRV_O_NO_BACKING) == 0) { ret = bdrv_open_backing_file(bs, options, "backing", &local_err); if (ret < 0) { goto close_and_fail; } } bdrv_refresh_filename(bs); /* Check if any unknown options were used */ if (qdict_size(options) != 0) { const QDictEntry *entry = qdict_first(options); if (flags & BDRV_O_PROTOCOL) { error_setg(errp, "Block protocol '%s' doesn't support the option " "'%s'", drv->format_name, entry->key); } else { error_setg(errp, "Block format '%s' does not support the option '%s'", drv->format_name, entry->key); } goto close_and_fail; } bdrv_parent_cb_change_media(bs, true); QDECREF(options); /* For snapshot=on, create a temporary qcow2 overlay. bs points to the * temporary snapshot afterwards. */ if (snapshot_flags) { BlockDriverState *snapshot_bs; snapshot_bs = bdrv_append_temp_snapshot(bs, snapshot_flags, snapshot_options, &local_err); snapshot_options = NULL; if (local_err) { goto close_and_fail; } /* We are not going to return bs but the overlay on top of it * (snapshot_bs); thus, we have to drop the strong reference to bs * (which we obtained by calling bdrv_new()). bs will not be deleted, * though, because the overlay still has a reference to it. */ bdrv_unref(bs); bs = snapshot_bs; } return bs; fail: blk_unref(file); if (bs->file != NULL) { bdrv_unref_child(bs, bs->file); } QDECREF(snapshot_options); QDECREF(bs->explicit_options); QDECREF(bs->options); QDECREF(options); bs->options = NULL; bs->explicit_options = NULL; bdrv_unref(bs); error_propagate(errp, local_err); return NULL; close_and_fail: bdrv_unref(bs); QDECREF(snapshot_options); QDECREF(options); error_propagate(errp, local_err); return NULL; } | 10,802 |
0 | static void decode(Real288_internal *glob, float gain, int cb_coef) { unsigned int x, y; float f; double sum, sumsum; float *p1, *p2; float buffer[5]; for (x=36; x--; glob->sb[x+5] = glob->sb[x]); for (x=5; x--;) { p1 = glob->sb+x; p2 = glob->pr1; for (sum=0, y=36; y--; sum -= (*(++p1))*(*(p2++))); glob->sb[x] = sum; } /* convert log and do rms */ for (sum=32, x=10; x--; sum -= glob->pr2[x] * glob->lhist[x]); if (sum < 0) sum = 0; else if (sum > 60) sum = 60; sumsum = exp(sum * 0.1151292546497) * gain; /* pow(10.0,sum/20)*f */ for (sum=0, x=5; x--;) { buffer[x] = codetable[cb_coef][x] * sumsum; sum += buffer[x] * buffer[x]; } if ((sum /= 5) < 1) sum = 1; /* shift and store */ for (x=10; --x; glob->lhist[x] = glob->lhist[x-1]); *glob->lhist = glob->history[glob->phase] = 10 * log10(sum) - 32; for (x=1; x < 5; x++) for (y=x; y--; buffer[x] -= glob->pr1[x-y-1] * buffer[y]); /* output */ for (x=0; x < 5; x++) { f = glob->sb[4-x] + buffer[x]; if (f > 4095) f = 4095; else if (f < -4095) f = -4095; glob->output[glob->phasep+x] = glob->sb[4-x] = f; } } | 10,804 |
0 | int url_open_dyn_packet_buf(AVIOContext **s, int max_packet_size) { if (max_packet_size <= 0) return -1; return url_open_dyn_buf_internal(s, max_packet_size); } | 10,805 |
0 | static int ffmal_update_format(AVCodecContext *avctx) { MMALDecodeContext *ctx = avctx->priv_data; MMAL_STATUS_T status; int ret = 0; MMAL_COMPONENT_T *decoder = ctx->decoder; MMAL_ES_FORMAT_T *format_out = decoder->output[0]->format; ffmmal_poolref_unref(ctx->pool_out); if (!(ctx->pool_out = av_mallocz(sizeof(*ctx->pool_out)))) { ret = AVERROR(ENOMEM); goto fail; } atomic_store(&ctx->pool_out->refcount, 1); if (!format_out) goto fail; if ((status = mmal_port_parameter_set_uint32(decoder->output[0], MMAL_PARAMETER_EXTRA_BUFFERS, ctx->extra_buffers))) goto fail; if ((status = mmal_port_parameter_set_boolean(decoder->output[0], MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS, 0))) goto fail; if (avctx->pix_fmt == AV_PIX_FMT_MMAL) { format_out->encoding = MMAL_ENCODING_OPAQUE; } else { format_out->encoding_variant = format_out->encoding = MMAL_ENCODING_I420; } if ((status = mmal_port_format_commit(decoder->output[0]))) goto fail; if ((ret = ff_set_dimensions(avctx, format_out->es->video.crop.x + format_out->es->video.crop.width, format_out->es->video.crop.y + format_out->es->video.crop.height)) < 0) goto fail; if (format_out->es->video.par.num && format_out->es->video.par.den) { avctx->sample_aspect_ratio.num = format_out->es->video.par.num; avctx->sample_aspect_ratio.den = format_out->es->video.par.den; } avctx->colorspace = ffmmal_csp_to_av_csp(format_out->es->video.color_space); decoder->output[0]->buffer_size = FFMAX(decoder->output[0]->buffer_size_min, decoder->output[0]->buffer_size_recommended); decoder->output[0]->buffer_num = FFMAX(decoder->output[0]->buffer_num_min, decoder->output[0]->buffer_num_recommended) + ctx->extra_buffers; ctx->pool_out->pool = mmal_pool_create(decoder->output[0]->buffer_num, decoder->output[0]->buffer_size); if (!ctx->pool_out->pool) { ret = AVERROR(ENOMEM); goto fail; } return 0; fail: return ret < 0 ? ret : AVERROR_UNKNOWN; } | 10,806 |
1 | static void gen_stswx(DisasContext *ctx) { TCGv t0; TCGv_i32 t1, t2; gen_set_access_type(ctx, ACCESS_INT); /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); t1 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t1, cpu_xer); tcg_gen_andi_i32(t1, t1, 0x7F); t2 = tcg_const_i32(rS(ctx->opcode)); gen_helper_stsw(cpu_env, t0, t1, t2); tcg_temp_free(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); } | 10,807 |
1 | uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) { uint64_t result; DO_ABD(result, a, b, int32_t); return result; } | 10,808 |
1 | static int stdio_fclose(void *opaque) { QEMUFileStdio *s = opaque; int ret = 0; if (qemu_file_is_writable(s->file)) { int fd = fileno(s->stdio_file); struct stat st; ret = fstat(fd, &st); if (ret == 0 && S_ISREG(st.st_mode)) { /* * If the file handle is a regular file make sure the * data is flushed to disk before signaling success. */ ret = fsync(fd); if (ret != 0) { ret = -errno; return ret; } } } if (fclose(s->stdio_file) == EOF) { ret = -errno; } g_free(s); return ret; } | 10,809 |
1 | static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env, ppc_slb_t *slb, target_ulong eaddr, ppc_hash_pte64_t *pte) { hwaddr pteg_off, pte_offset; hwaddr hash; uint64_t vsid, epnshift, epnmask, epn, ptem; /* Page size according to the SLB, which we use to generate the * EPN for hash table lookup.. When we implement more recent MMU * extensions this might be different from the actual page size * encoded in the PTE */ epnshift = (slb->vsid & SLB_VSID_L) ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; epnmask = ~((1ULL << epnshift) - 1); if (slb->vsid & SLB_VSID_B) { /* 1TB segment */ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; hash = vsid ^ (vsid << 25) ^ (epn >> epnshift); } else { /* 256M segment */ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; hash = vsid ^ (epn >> epnshift); } ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); /* Page address translation */ LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx " hash " TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, hash); /* Primary PTEG lookup */ LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, vsid, ptem, hash); pteg_off = (hash * HASH_PTEG_SIZE_64) & env->htab_mask; pte_offset = ppc_hash64_pteg_search(env, pteg_off, 0, ptem, pte); if (pte_offset == -1) { /* Secondary PTEG lookup */ LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, vsid, ptem, ~hash); pteg_off = (~hash * HASH_PTEG_SIZE_64) & env->htab_mask; pte_offset = ppc_hash64_pteg_search(env, pteg_off, 1, ptem, pte); } return pte_offset; } | 10,810 |
0 | static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias) { // init first elem to 1 to avoid div by 0 in convert_matrix uint16_t weight_matrix[64] = {1,}; // convert_matrix needs uint16_t* int qscale, i; CHECKED_ALLOCZ(ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int)); CHECKED_ALLOCZ(ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int)); CHECKED_ALLOCZ(ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t)); CHECKED_ALLOCZ(ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t)); for (i = 1; i < 64; i++) { int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]]; weight_matrix[j] = ctx->cid_table->luma_weight[i]; } ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix, ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1); for (i = 1; i < 64; i++) { int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]]; weight_matrix[j] = ctx->cid_table->chroma_weight[i]; } ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix, ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1); for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) { for (i = 0; i < 64; i++) { ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2; ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2; ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2; } } return 0; fail: return -1; } | 10,811 |
0 | static av_cold int a64multi_close_encoder(AVCodecContext *avctx) { A64Context *c = avctx->priv_data; av_frame_free(&avctx->coded_frame); av_free(c->mc_meta_charset); av_free(c->mc_best_cb); av_free(c->mc_charset); av_free(c->mc_charmap); av_free(c->mc_colram); return 0; } | 10,812 |
1 | void helper_stf_asi(target_ulong addr, int asi, int size, int rd) { unsigned int i; target_ulong val = 0; helper_check_align(addr, 3); addr = asi_address_mask(env, asi, addr); switch (asi) { case 0xe0: // UA2007 Block commit store primary (cache flush) case 0xe1: // UA2007 Block commit store secondary (cache flush) case 0xf0: // Block store primary case 0xf1: // Block store secondary case 0xf8: // Block store primary LE case 0xf9: // Block store secondary LE helper_st_asi(addr, val, asi & 0x8f, 4); default: break; switch(size) { default: case 4: val = *((uint32_t *)&env->fpr[rd]); break; case 8: val = *((int64_t *)&DT0); break; case 16: // XXX break; helper_st_asi(addr, val, asi, size); | 10,814 |
1 | static int drive_add(const char *file, const char *fmt, ...) { va_list ap; int index = drive_opt_get_free_idx(); if (nb_drives_opt >= MAX_DRIVES || index == -1) { fprintf(stderr, "qemu: too many drives\n"); exit(1); } drives_opt[index].file = file; va_start(ap, fmt); vsnprintf(drives_opt[index].opt, sizeof(drives_opt[0].opt), fmt, ap); va_end(ap); nb_drives_opt++; return index; } | 10,816 |
1 | int avcodec_close(AVCodecContext *avctx) { entangled_thread_counter++; if(entangled_thread_counter != 1){ av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); entangled_thread_counter--; return -1; } if (ENABLE_THREADS && avctx->thread_opaque) avcodec_thread_free(avctx); if (avctx->codec->close) avctx->codec->close(avctx); avcodec_default_free_buffers(avctx); av_freep(&avctx->priv_data); avctx->codec = NULL; entangled_thread_counter--; return 0; } | 10,817 |
1 | void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output) { int i; uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL; if (ic->nb_streams && !printed) return; av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", is_output ? "Output" : "Input", index, is_output ? ic->oformat->name : ic->iformat->name, is_output ? "to" : "from", url); dump_metadata(NULL, ic->metadata, " "); if (!is_output) { av_log(NULL, AV_LOG_INFO, " Duration: "); if (ic->duration != AV_NOPTS_VALUE) { int hours, mins, secs, us; int64_t duration = ic->duration + (ic->duration <= INT64_MAX - 5000 ? 5000 : 0); secs = duration / AV_TIME_BASE; us = duration % AV_TIME_BASE; mins = secs / 60; secs %= 60; hours = mins / 60; mins %= 60; av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs, (100 * us) / AV_TIME_BASE); } else { av_log(NULL, AV_LOG_INFO, "N/A"); } if (ic->start_time != AV_NOPTS_VALUE) { int secs, us; av_log(NULL, AV_LOG_INFO, ", start: "); secs = ic->start_time / AV_TIME_BASE; us = llabs(ic->start_time % AV_TIME_BASE); av_log(NULL, AV_LOG_INFO, "%d.%06d", secs, (int) av_rescale(us, 1000000, AV_TIME_BASE)); } av_log(NULL, AV_LOG_INFO, ", bitrate: "); if (ic->bit_rate) av_log(NULL, AV_LOG_INFO, "%"PRId64" kb/s", (int64_t)ic->bit_rate / 1000); else av_log(NULL, AV_LOG_INFO, "N/A"); av_log(NULL, AV_LOG_INFO, "\n"); } for (i = 0; i < ic->nb_chapters; i++) { AVChapter *ch = ic->chapters[i]; av_log(NULL, AV_LOG_INFO, " Chapter #%d:%d: ", index, i); av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base)); av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base)); dump_metadata(NULL, ch->metadata, " "); } if (ic->nb_programs) { int j, k, total = 0; for (j = 0; j < ic->nb_programs; j++) { AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata, "name", NULL, 0); av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id, name ? name->value : ""); dump_metadata(NULL, ic->programs[j]->metadata, " "); for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) { dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output); printed[ic->programs[j]->stream_index[k]] = 1; } total += ic->programs[j]->nb_stream_indexes; } if (total < ic->nb_streams) av_log(NULL, AV_LOG_INFO, " No Program\n"); } for (i = 0; i < ic->nb_streams; i++) if (!printed[i]) dump_stream_format(ic, i, index, is_output); av_free(printed); } | 10,818 |
1 | static void vp8_idct_add_c(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride) { int i, t0, t1, t2, t3; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; DCTELEM tmp[16]; for (i = 0; i < 4; i++) { t0 = block[0*4+i] + block[2*4+i]; t1 = block[0*4+i] - block[2*4+i]; t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]); t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]); block[0*4+i] = 0; block[1*4+i] = 0; block[2*4+i] = 0; block[3*4+i] = 0; tmp[i*4+0] = t0 + t3; tmp[i*4+1] = t1 + t2; tmp[i*4+2] = t1 - t2; tmp[i*4+3] = t0 - t3; } for (i = 0; i < 4; i++) { t0 = tmp[0*4+i] + tmp[2*4+i]; t1 = tmp[0*4+i] - tmp[2*4+i]; t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]); t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]); dst[0] = cm[dst[0] + ((t0 + t3 + 4) >> 3)]; dst[1] = cm[dst[1] + ((t1 + t2 + 4) >> 3)]; dst[2] = cm[dst[2] + ((t1 - t2 + 4) >> 3)]; dst[3] = cm[dst[3] + ((t0 - t3 + 4) >> 3)]; dst += stride; } } | 10,819 |
1 | static int get_next_block(DumpState *s, RAMBlock *block) { while (1) { block = QTAILQ_NEXT(block, next); if (!block) { /* no more block */ return 1; } s->start = 0; s->block = block; if (s->has_filter) { if (block->offset >= s->begin + s->length || block->offset + block->length <= s->begin) { /* This block is out of the range */ continue; } if (s->begin > block->offset) { s->start = s->begin - block->offset; } } return 0; } } | 10,821 |
1 | static void scsi_req_xfer_mode(SCSIRequest *req) { switch (req->cmd.buf[0]) { case WRITE_6: case WRITE_10: case WRITE_VERIFY: case WRITE_12: case WRITE_VERIFY_12: case WRITE_16: case WRITE_VERIFY_16: case COPY: case COPY_VERIFY: case COMPARE: case CHANGE_DEFINITION: case LOG_SELECT: case MODE_SELECT: case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER: case FORMAT_UNIT: case REASSIGN_BLOCKS: case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME: case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12: case MEDIUM_SCAN: case SEND_VOLUME_TAG: case WRITE_LONG_2: case PERSISTENT_RESERVE_OUT: case MAINTENANCE_OUT: req->cmd.mode = SCSI_XFER_TO_DEV; break; default: if (req->cmd.xfer) req->cmd.mode = SCSI_XFER_FROM_DEV; else { req->cmd.mode = SCSI_XFER_NONE; } break; } } | 10,822 |
1 | static int decode_hextile(VmncContext *c, uint8_t *dst, const uint8_t *src, int ssize, int w, int h, int stride) { int i, j, k; int bg = 0, fg = 0, rects, color, flags, xy, wh; const int bpp = c->bpp2; uint8_t *dst2; int bw = 16, bh = 16; const uint8_t *ssrc = src; for (j = 0; j < h; j += 16) { dst2 = dst; bw = 16; if (j + 16 > h) bh = h - j; for (i = 0; i < w; i += 16, dst2 += 16 * bpp) { if (src - ssrc >= ssize) { av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n"); return -1; } if (i + 16 > w) bw = w - i; flags = *src++; if (flags & HT_RAW) { if (src - ssrc > ssize - bw * bh * bpp) { av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n"); return -1; } paint_raw(dst2, bw, bh, src, bpp, c->bigendian, stride); src += bw * bh * bpp; } else { if (flags & HT_BKG) { bg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; } if (flags & HT_FG) { fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; } rects = 0; if (flags & HT_SUB) rects = *src++; color = !!(flags & HT_CLR); paint_rect(dst2, 0, 0, bw, bh, bg, bpp, stride); if (src - ssrc > ssize - rects * (color * bpp + 2)) { av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n"); return -1; } for (k = 0; k < rects; k++) { if (color) { fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; } xy = *src++; wh = *src++; paint_rect(dst2, xy >> 4, xy & 0xF, (wh >> 4) + 1, (wh & 0xF) + 1, fg, bpp, stride); } } } dst += stride * 16; } return src - ssrc; } | 10,823 |
1 | void do_interrupt(CPUState *env) { int intno = env->exception_index; #ifdef DEBUG_PCALL if (qemu_loglevel_mask(CPU_LOG_INT)) { static int count; const char *name; if (intno < 0 || intno >= 0x180) name = "Unknown"; else if (intno >= 0x100) name = "Trap Instruction"; else if (intno >= 0xc0) name = "Window Fill"; else if (intno >= 0x80) name = "Window Spill"; else { name = excp_names[intno]; if (!name) name = "Unknown"; } qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64 " SP=%016" PRIx64 "\n", count, name, intno, env->pc, env->npc, env->regwptr[6]); log_cpu_state(env, 0); #if 0 { int i; uint8_t *ptr; qemu_log(" code="); ptr = (uint8_t *)env->pc; for(i = 0; i < 16; i++) { qemu_log(" %02x", ldub(ptr + i)); } qemu_log("\n"); } #endif count++; } #endif #if !defined(CONFIG_USER_ONLY) if (env->tl >= env->maxtl) { cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d)," " Error state", env->exception_index, env->tl, env->maxtl); return; } #endif if (env->tl < env->maxtl - 1) { env->tl++; } else { env->pstate |= PS_RED; if (env->tl < env->maxtl) env->tl++; } env->tsptr = &env->ts[env->tl & MAXTL_MASK]; env->tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) | ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) | GET_CWP64(env); env->tsptr->tpc = env->pc; env->tsptr->tnpc = env->npc; env->tsptr->tt = intno; if (!(env->def->features & CPU_FEATURE_GL)) { switch (intno) { case TT_IVEC: change_pstate(PS_PEF | PS_PRIV | PS_IG); break; case TT_TFAULT: case TT_TMISS: case TT_DFAULT: case TT_DMISS: case TT_DPROT: change_pstate(PS_PEF | PS_PRIV | PS_MG); break; default: change_pstate(PS_PEF | PS_PRIV | PS_AG); break; } } if (intno == TT_CLRWIN) cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1)); else if ((intno & 0x1c0) == TT_SPILL) cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2)); else if ((intno & 0x1c0) == TT_FILL) cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1)); env->tbr &= ~0x7fffULL; env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5); env->pc = env->tbr; env->npc = env->pc + 4; env->exception_index = 0; } | 10,824 |
1 | int bdrv_open2(BlockDriverState *bs, const char *filename, int flags, BlockDriver *drv) { int ret, open_flags; char tmp_filename[PATH_MAX]; char backing_filename[PATH_MAX]; bs->read_only = 0; bs->is_temporary = 0; bs->encrypted = 0; bs->autogrow = 0; if (flags & BDRV_O_AUTOGROW) bs->autogrow = 1; if (flags & BDRV_O_SNAPSHOT) { BlockDriverState *bs1; int64_t total_size; /* if snapshot, we create a temporary backing file and open it instead of opening 'filename' directly */ /* if there is a backing file, use it */ bs1 = bdrv_new(""); if (!bs1) { return -ENOMEM; } if (bdrv_open(bs1, filename, 0) < 0) { bdrv_delete(bs1); return -1; } total_size = bdrv_getlength(bs1) >> SECTOR_BITS; bdrv_delete(bs1); get_tmp_filename(tmp_filename, sizeof(tmp_filename)); realpath(filename, backing_filename); if (bdrv_create(&bdrv_qcow2, tmp_filename, total_size, backing_filename, 0) < 0) { return -1; } filename = tmp_filename; bs->is_temporary = 1; } pstrcpy(bs->filename, sizeof(bs->filename), filename); if (flags & BDRV_O_FILE) { drv = find_protocol(filename); if (!drv) return -ENOENT; } else { if (!drv) { drv = find_image_format(filename); if (!drv) return -1; } } bs->drv = drv; bs->opaque = qemu_mallocz(drv->instance_size); bs->total_sectors = 0; /* driver will set if it does not do getlength */ if (bs->opaque == NULL && drv->instance_size > 0) return -1; /* Note: for compatibility, we open disk image files as RDWR, and RDONLY as fallback */ if (!(flags & BDRV_O_FILE)) open_flags = BDRV_O_RDWR | (flags & BDRV_O_DIRECT); else open_flags = flags & ~(BDRV_O_FILE | BDRV_O_SNAPSHOT); ret = drv->bdrv_open(bs, filename, open_flags); if (ret == -EACCES && !(flags & BDRV_O_FILE)) { ret = drv->bdrv_open(bs, filename, BDRV_O_RDONLY); bs->read_only = 1; } if (ret < 0) { qemu_free(bs->opaque); bs->opaque = NULL; bs->drv = NULL; return ret; } if (drv->bdrv_getlength) { bs->total_sectors = bdrv_getlength(bs) >> SECTOR_BITS; } #ifndef _WIN32 if (bs->is_temporary) { unlink(filename); } #endif if (bs->backing_file[0] != '\0') { /* if there is a backing file, use it */ bs->backing_hd = bdrv_new(""); if (!bs->backing_hd) { fail: bdrv_close(bs); return -ENOMEM; } path_combine(backing_filename, sizeof(backing_filename), filename, bs->backing_file); if (bdrv_open(bs->backing_hd, backing_filename, 0) < 0) goto fail; } /* call the change callback */ bs->media_changed = 1; if (bs->change_cb) bs->change_cb(bs->change_opaque); return 0; } | 10,825 |
1 | int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { MotionEstContext * const c= &s->me; int mx, my, dmin; int P[10][2]; const int shift= 1+s->quarter_sample; const int xy= mb_x + mb_y*s->mb_stride; init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0); assert(s->quarter_sample==0 || s->quarter_sample==1); c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp); c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV; get_limits(s, 16*mb_x, 16*mb_y); c->skip=0; P_LEFT[0] = s->p_mv_table[xy + 1][0]; P_LEFT[1] = s->p_mv_table[xy + 1][1]; if(P_LEFT[0] < (c->xmin<<shift)) P_LEFT[0] = (c->xmin<<shift); /* special case for first line */ if (s->first_slice_line) { c->pred_x= P_LEFT[0]; c->pred_y= P_LEFT[1]; P_TOP[0]= P_TOPRIGHT[0]= P_MEDIAN[0]= P_TOP[1]= P_TOPRIGHT[1]= P_MEDIAN[1]= 0; //FIXME } else { P_TOP[0] = s->p_mv_table[xy + s->mb_stride ][0]; P_TOP[1] = s->p_mv_table[xy + s->mb_stride ][1]; P_TOPRIGHT[0] = s->p_mv_table[xy + s->mb_stride - 1][0]; P_TOPRIGHT[1] = s->p_mv_table[xy + s->mb_stride - 1][1]; if(P_TOP[1] < (c->ymin<<shift)) P_TOP[1] = (c->ymin<<shift); if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); if(P_TOPRIGHT[1] < (c->ymin<<shift)) P_TOPRIGHT[1]= (c->ymin<<shift); P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]); P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]); c->pred_x = P_MEDIAN[0]; c->pred_y = P_MEDIAN[1]; } dmin = ff_epzs_motion_search(s, &mx, &my, P, 0, 0, s->p_mv_table, (1<<16)>>shift, 0, 16); s->p_mv_table[xy][0] = mx<<shift; s->p_mv_table[xy][1] = my<<shift; return dmin; } | 10,826 |
1 | static inline int gsm_mult(int a, int b) { return (a * b + (1 << 14)) >> 15; } | 10,827 |
0 | int ff_xvid_rate_control_init(MpegEncContext *s){ char *tmp_name; int fd, i; xvid_plg_create_t xvid_plg_create; xvid_plugin_2pass2_t xvid_2pass2; //xvid_debug=-1; fd=av_tempfile("xvidrc.", &tmp_name, 0, s->avctx); if (fd == -1) { av_log(NULL, AV_LOG_ERROR, "Can't create temporary pass2 file.\n"); return -1; } for(i=0; i<s->rc_context.num_entries; i++){ static const char *frame_types = " ipbs"; char tmp[256]; RateControlEntry *rce; rce= &s->rc_context.entry[i]; snprintf(tmp, sizeof(tmp), "%c %d %d %d %d %d %d\n", frame_types[rce->pict_type], (int)lrintf(rce->qscale / FF_QP2LAMBDA), rce->i_count, s->mb_num - rce->i_count - rce->skip_count, rce->skip_count, (rce->i_tex_bits + rce->p_tex_bits + rce->misc_bits+7)/8, (rce->header_bits+rce->mv_bits+7)/8); //av_log(NULL, AV_LOG_ERROR, "%s\n", tmp); write(fd, tmp, strlen(tmp)); } close(fd); memset(&xvid_2pass2, 0, sizeof(xvid_2pass2)); xvid_2pass2.version= XVID_MAKE_VERSION(1,1,0); xvid_2pass2.filename= tmp_name; xvid_2pass2.bitrate= s->avctx->bit_rate; xvid_2pass2.vbv_size= s->avctx->rc_buffer_size; xvid_2pass2.vbv_maxrate= s->avctx->rc_max_rate; xvid_2pass2.vbv_initial= s->avctx->rc_initial_buffer_occupancy; memset(&xvid_plg_create, 0, sizeof(xvid_plg_create)); xvid_plg_create.version= XVID_MAKE_VERSION(1,1,0); xvid_plg_create.fbase= s->avctx->time_base.den; xvid_plg_create.fincr= s->avctx->time_base.num; xvid_plg_create.param= &xvid_2pass2; if(xvid_plugin_2pass2(NULL, XVID_PLG_CREATE, &xvid_plg_create, &s->rc_context.non_lavc_opaque)<0){ av_log(NULL, AV_LOG_ERROR, "xvid_plugin_2pass2 failed\n"); return -1; } return 0; } | 10,829 |
1 | static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, j, level, last_non_zero, q; const int *qmat; int minLevel, maxLevel; if(s->avctx!=NULL && s->avctx->codec->id==CODEC_ID_MPEG4){ /* mpeg4 */ minLevel= -2048; maxLevel= 2047; }else if(s->out_format==FMT_MPEG1){ /* mpeg1 */ minLevel= -255; maxLevel= 255; }else if(s->out_format==FMT_MJPEG){ /* (m)jpeg */ minLevel= -1023; maxLevel= 1023; }else{ /* h263 / msmpeg4 */ minLevel= -128; maxLevel= 127; } av_fdct (block); /* we need this permutation so that we correct the IDCT permutation. will be moved into DCT code */ block_permute(block); if (s->mb_intra) { if (n < 4) q = s->y_dc_scale; else q = s->c_dc_scale; q = q << 3; /* note: block[0] is assumed to be positive */ block[0] = (block[0] + (q >> 1)) / q; i = 1; last_non_zero = 0; if (s->out_format == FMT_H263) { qmat = s->q_non_intra_matrix; } else { qmat = s->q_intra_matrix; } } else { i = 0; last_non_zero = -1; qmat = s->q_non_intra_matrix; } for(;i<64;i++) { j = zigzag_direct[i]; level = block[j]; level = level * qmat[j]; #ifdef PARANOID { static int count = 0; int level1, level2, qmat1; double val; if (qmat == s->q_non_intra_matrix) { qmat1 = default_non_intra_matrix[j] * s->qscale; } else { qmat1 = default_intra_matrix[j] * s->qscale; } if (av_fdct != jpeg_fdct_ifast) val = ((double)block[j] * 8.0) / (double)qmat1; else val = ((double)block[j] * 8.0 * 2048.0) / ((double)qmat1 * aanscales[j]); level1 = (int)val; level2 = level / (1 << (QMAT_SHIFT - 3)); if (level1 != level2) { fprintf(stderr, "%d: quant error qlevel=%d wanted=%d level=%d qmat1=%d qmat=%d wantedf=%0.6f\n", count, level2, level1, block[j], qmat1, qmat[j], val); count++; } } #endif /* XXX: slight error for the low range. Test should be equivalent to (level <= -(1 << (QMAT_SHIFT - 3)) || level >= (1 << (QMAT_SHIFT - 3))) */ if (((level << (31 - (QMAT_SHIFT - 3))) >> (31 - (QMAT_SHIFT - 3))) != level) { level = level / (1 << (QMAT_SHIFT - 3)); /* XXX: currently, this code is not optimal. the range should be: mpeg1: -255..255 mpeg2: -2048..2047 h263: -128..127 mpeg4: -2048..2047 */ if (level > maxLevel) level = maxLevel; else if (level < minLevel) level = minLevel; block[j] = level; last_non_zero = i; } else { block[j] = 0; } } return last_non_zero; } | 10,830 |
1 | static void drive_backup_abort(BlkActionState *common) { DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); BlockDriverState *bs = state->bs; /* Only cancel if it's the job we started */ if (bs && bs->job && bs->job == state->job) { block_job_cancel_sync(bs->job); } } | 10,831 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.