label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | static void qtest_irq_handler(void *opaque, int n, int level) { qemu_irq old_irq = *(qemu_irq *)opaque; qemu_set_irq(old_irq, level); if (irq_levels[n] != level) { CharDriverState *chr = qtest_chr; irq_levels[n] = level; qtest_send_prefix(chr); qtest_send(chr, "IRQ %s %d\n", level ? "raise" : "lower", n); } } | 16,056 |
0 | void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder) { POWERPC_TBL_DECLARE(altivec_gmc1_num, GMC1_PERF_COND); #ifdef ALTIVEC_USE_REFERENCE_C_CODE const int A=(16-x16)*(16-y16); const int B=( x16)*(16-y16); const int C=(16-x16)*( y16); const int D=( x16)*( y16); int i; POWERPC_TBL_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); for(i=0; i<h; i++) { dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8; dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8; dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8; dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8; dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8; dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8; dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8; dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8; dst+= stride; src+= stride; } POWERPC_TBL_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND); #else /* ALTIVEC_USE_REFERENCE_C_CODE */ const unsigned short __attribute__ ((aligned(16))) rounder_a[8] = {rounder, rounder, rounder, rounder, rounder, rounder, rounder, rounder}; const unsigned short __attribute__ ((aligned(16))) ABCD[8] = { (16-x16)*(16-y16), /* A */ ( x16)*(16-y16), /* B */ (16-x16)*( y16), /* C */ ( x16)*( y16), /* D */ 0, 0, 0, 0 /* padding */ }; register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8); register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD; register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD; int i; unsigned long dst_odd = (unsigned long)dst & 0x0000000F; unsigned long src_really_odd = (unsigned long)src & 0x0000000F; POWERPC_TBL_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); tempA = vec_ld(0, (unsigned short*)ABCD); Av = vec_splat(tempA, 0); Bv = vec_splat(tempA, 1); Cv = vec_splat(tempA, 2); Dv = vec_splat(tempA, 3); rounderV = vec_ld(0, (unsigned short*)rounder_a); // we'll be able to pick-up our 9 char elements // at src from those 32 bytes // we load the first batch here, as inside the loop // we can re-use 'src+stride' from one iteration // as the 'src' of the next. src_0 = vec_ld(0, src); src_1 = vec_ld(16, src); srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src)); if (src_really_odd != 0x0000000F) { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); } else { srcvB = src_1; } srcvA = vec_mergeh(vczero, srcvA); srcvB = vec_mergeh(vczero, srcvB); for(i=0; i<h; i++) { dst_odd = (unsigned long)dst & 0x0000000F; src_really_odd = (((unsigned long)src) + stride) & 0x0000000F; dstv = vec_ld(0, dst); // we we'll be able to pick-up our 9 char elements // at src + stride from those 32 bytes // then reuse the resulting 2 vectors srvcC and srcvD // as the next srcvA and srcvB src_0 = vec_ld(stride + 0, src); src_1 = vec_ld(stride + 16, src); srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); if (src_really_odd != 0x0000000F) { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); } else { srcvD = src_1; } srcvC = vec_mergeh(vczero, srcvC); srcvD = vec_mergeh(vczero, srcvD); // OK, now we (finally) do the math :-) // those four instructions replaces 32 int muls & 32 int adds. // isn't AltiVec nice ? tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV); tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA); tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB); tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC); srcvA = srcvC; srcvB = srcvD; tempD = vec_sr(tempD, vcsr8); dstv2 = vec_pack(tempD, (vector unsigned short)vczero); if (dst_odd) { dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1)); } else { dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3)); } vec_st(dstv2, 0, dst); dst += stride; src += stride; } POWERPC_TBL_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } | 16,058 |
0 | void ff_put_h264_qpel4_mc12_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midh_qrt_4w_msa(src - (2 * stride) - 2, stride, dst, stride, 4, 0); } | 16,059 |
0 | enum AVCodecID avpriv_fmt_v4l2codec(uint32_t v4l2_fmt) { int i; for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) { if (avpriv_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) { return avpriv_fmt_conversion_table[i].codec_id; } } return AV_CODEC_ID_NONE; } | 16,060 |
0 | static int read_sbr_channel_pair_element(AACContext *ac, SpectralBandReplication *sbr, GetBitContext *gb) { if (get_bits1(gb)) // bs_data_extra skip_bits(gb, 8); // bs_reserved if ((sbr->bs_coupling = get_bits1(gb))) { if (read_sbr_grid(ac, sbr, gb, &sbr->data[0])) return -1; copy_sbr_grid(&sbr->data[1], &sbr->data[0]); read_sbr_dtdf(sbr, gb, &sbr->data[0]); read_sbr_dtdf(sbr, gb, &sbr->data[1]); read_sbr_invf(sbr, gb, &sbr->data[0]); memcpy(sbr->data[1].bs_invf_mode[1], sbr->data[1].bs_invf_mode[0], sizeof(sbr->data[1].bs_invf_mode[0])); memcpy(sbr->data[1].bs_invf_mode[0], sbr->data[0].bs_invf_mode[0], sizeof(sbr->data[1].bs_invf_mode[0])); read_sbr_envelope(sbr, gb, &sbr->data[0], 0); read_sbr_noise(sbr, gb, &sbr->data[0], 0); read_sbr_envelope(sbr, gb, &sbr->data[1], 1); read_sbr_noise(sbr, gb, &sbr->data[1], 1); } else { if (read_sbr_grid(ac, sbr, gb, &sbr->data[0]) || read_sbr_grid(ac, sbr, gb, &sbr->data[1])) return -1; read_sbr_dtdf(sbr, gb, &sbr->data[0]); read_sbr_dtdf(sbr, gb, &sbr->data[1]); read_sbr_invf(sbr, gb, &sbr->data[0]); read_sbr_invf(sbr, gb, &sbr->data[1]); read_sbr_envelope(sbr, gb, &sbr->data[0], 0); read_sbr_envelope(sbr, gb, &sbr->data[1], 1); read_sbr_noise(sbr, gb, &sbr->data[0], 0); read_sbr_noise(sbr, gb, &sbr->data[1], 1); } if ((sbr->data[0].bs_add_harmonic_flag = get_bits1(gb))) get_bits1_vector(gb, sbr->data[0].bs_add_harmonic, sbr->n[1]); if ((sbr->data[1].bs_add_harmonic_flag = get_bits1(gb))) get_bits1_vector(gb, sbr->data[1].bs_add_harmonic, sbr->n[1]); return 0; } | 16,061 |
0 | static av_cold int alac_encode_close(AVCodecContext *avctx) { AlacEncodeContext *s = avctx->priv_data; ff_lpc_end(&s->lpc_ctx); av_freep(&avctx->extradata); avctx->extradata_size = 0; av_freep(&avctx->coded_frame); return 0; } | 16,062 |
0 | av_cold int ff_mpv_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; AVCPBProperties *cpb_props; int i, ret, format_supported; mpv_encode_defaults(s); switch (avctx->codec_id) { case AV_CODEC_ID_MPEG2VIDEO: if (avctx->pix_fmt != AV_PIX_FMT_YUV420P && avctx->pix_fmt != AV_PIX_FMT_YUV422P) { av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n"); return -1; } break; case AV_CODEC_ID_MJPEG: format_supported = 0; /* JPEG color space */ if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P || avctx->pix_fmt == AV_PIX_FMT_YUVJ422P || (avctx->color_range == AVCOL_RANGE_JPEG && (avctx->pix_fmt == AV_PIX_FMT_YUV420P || avctx->pix_fmt == AV_PIX_FMT_YUV422P))) format_supported = 1; /* MPEG color space */ else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL && (avctx->pix_fmt == AV_PIX_FMT_YUV420P || avctx->pix_fmt == AV_PIX_FMT_YUV422P)) format_supported = 1; if (!format_supported) { av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); return -1; } break; default: if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) { av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n"); return -1; } } switch (avctx->pix_fmt) { case AV_PIX_FMT_YUVJ422P: case AV_PIX_FMT_YUV422P: s->chroma_format = CHROMA_422; break; case AV_PIX_FMT_YUVJ420P: case AV_PIX_FMT_YUV420P: default: s->chroma_format = CHROMA_420; break; } s->bit_rate = avctx->bit_rate; s->width = avctx->width; s->height = avctx->height; if (avctx->gop_size > 600 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n"); avctx->gop_size = 600; } s->gop_size = avctx->gop_size; s->avctx = avctx; if (avctx->max_b_frames > MAX_B_FRAMES) { av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum " "is %d.\n", MAX_B_FRAMES); } s->max_b_frames = avctx->max_b_frames; s->codec_id = avctx->codec->id; s->strict_std_compliance = avctx->strict_std_compliance; s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0; s->mpeg_quant = avctx->mpeg_quant; s->rtp_mode = !!avctx->rtp_payload_size; s->intra_dc_precision = avctx->intra_dc_precision; s->user_specified_pts = AV_NOPTS_VALUE; if (s->gop_size <= 1) { s->intra_only = 1; s->gop_size = 12; } else { s->intra_only = 0; } #if FF_API_MOTION_EST FF_DISABLE_DEPRECATION_WARNINGS s->me_method = avctx->me_method; FF_ENABLE_DEPRECATION_WARNINGS #endif /* Fixed QSCALE */ s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE); #if FF_API_MPV_OPT FF_DISABLE_DEPRECATION_WARNINGS if (avctx->border_masking != 0.0) s->border_masking = avctx->border_masking; FF_ENABLE_DEPRECATION_WARNINGS #endif s->adaptive_quant = (s->avctx->lumi_masking || s->avctx->dark_masking || s->avctx->temporal_cplx_masking || s->avctx->spatial_cplx_masking || s->avctx->p_masking || s->border_masking || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) && !s->fixed_qscale; s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER); if (avctx->rc_max_rate && !avctx->rc_buffer_size) { av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, " "for encoding with a maximum bitrate\n"); return -1; } if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) { av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n"); } if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) { av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n"); return -1; } if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) { av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n"); return -1; } if (avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate) { av_log(avctx, AV_LOG_INFO, "impossible bitrate constraints, this will fail\n"); } if (avctx->rc_buffer_size && avctx->bit_rate * (int64_t)avctx->time_base.num > avctx->rc_buffer_size * (int64_t)avctx->time_base.den) { av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n"); return -1; } if (!s->fixed_qscale && avctx->bit_rate * av_q2d(avctx->time_base) > avctx->bit_rate_tolerance) { av_log(avctx, AV_LOG_ERROR, "bitrate tolerance too small for bitrate\n"); return -1; } if (s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) && 90000LL * (avctx->rc_buffer_size - 1) > s->avctx->rc_max_rate * 0xFFFFLL) { av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the " "specified vbv buffer is too large for the given bitrate!\n"); } if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P && s->codec_id != AV_CODEC_ID_FLV1) { av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); return -1; } if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) { av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n"); return -1; } if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) { av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n"); return -1; } if (s->max_b_frames && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n"); return -1; } if ((s->codec_id == AV_CODEC_ID_MPEG4 || s->codec_id == AV_CODEC_ID_H263 || s->codec_id == AV_CODEC_ID_H263P) && (avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den > 255)) { av_log(avctx, AV_LOG_ERROR, "Invalid pixel aspect ratio %i/%i, limit is 255/255\n", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); return -1; } if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n"); return -1; } // FIXME mpeg2 uses that too if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) { av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n"); return -1; } if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) { av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n"); return -1; } if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD) { av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n"); return -1; } if (s->avctx->scenechange_threshold < 1000000000 && (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) { av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, " "set threshold to 1000000000\n"); return -1; } if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) { if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n"); return -1; } if (s->max_b_frames != 0) { av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n"); return -1; } } if (s->q_scale_type == 1) { if (avctx->qmax > 12) { av_log(avctx, AV_LOG_ERROR, "non linear quant only supports qmax <= 12 currently\n"); return -1; } } if (avctx->slices > 1 && (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) { av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n"); return AVERROR(EINVAL); } if (s->avctx->thread_count > 1 && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->codec_id != AV_CODEC_ID_MPEG2VIDEO && (s->codec_id != AV_CODEC_ID_H263P)) { av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n"); return -1; } if (s->avctx->thread_count < 1) { av_log(avctx, AV_LOG_ERROR, "automatic thread number detection not supported by codec," "patch welcome\n"); return -1; } if (!avctx->time_base.den || !avctx->time_base.num) { av_log(avctx, AV_LOG_ERROR, "framerate not set\n"); return -1; } if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) { av_log(avctx, AV_LOG_INFO, "notice: b_frame_strategy only affects the first pass\n"); avctx->b_frame_strategy = 0; } i = av_gcd(avctx->time_base.den, avctx->time_base.num); if (i > 1) { av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); avctx->time_base.den /= i; avctx->time_base.num /= i; //return -1; } if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) { // (a + x * 3 / 8) / x s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3); s->inter_quant_bias = 0; } else { s->intra_quant_bias = 0; // (a - x / 4) / x s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2)); } #if FF_API_QUANT_BIAS FF_DISABLE_DEPRECATION_WARNINGS if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) s->intra_quant_bias = avctx->intra_quant_bias; if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) s->inter_quant_bias = avctx->inter_quant_bias; FF_ENABLE_DEPRECATION_WARNINGS #endif if (avctx->codec_id == AV_CODEC_ID_MPEG4 && s->avctx->time_base.den > (1 << 16) - 1) { av_log(avctx, AV_LOG_ERROR, "timebase %d/%d not supported by MPEG 4 standard, " "the maximum admitted value for the timebase denominator " "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den, (1 << 16) - 1); return -1; } s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1; switch (avctx->codec->id) { case AV_CODEC_ID_MPEG1VIDEO: s->out_format = FMT_MPEG1; s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY); avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); break; case AV_CODEC_ID_MPEG2VIDEO: s->out_format = FMT_MPEG1; s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY); avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); s->rtp_mode = 1; break; case AV_CODEC_ID_MJPEG: s->out_format = FMT_MJPEG; s->intra_only = 1; /* force intra only for jpeg */ if (!CONFIG_MJPEG_ENCODER || ff_mjpeg_encode_init(s) < 0) return -1; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_H261: if (!CONFIG_H261_ENCODER) return -1; if (ff_h261_get_picture_format(s->width, s->height) < 0) { av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the " "H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height); return -1; } s->out_format = FMT_H261; avctx->delay = 0; s->low_delay = 1; s->rtp_mode = 0; /* Sliced encoding not supported */ break; case AV_CODEC_ID_H263: if (!CONFIG_H263_ENCODER) return -1; if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height) == 8) { av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for " "the H.263 codec.\nValid sizes are 128x96, 176x144, " "352x288, 704x576, and 1408x1152." "Try H.263+.\n", s->width, s->height); return -1; } s->out_format = FMT_H263; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_H263P: s->out_format = FMT_H263; s->h263_plus = 1; /* Fx */ s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0; s->modified_quant = s->h263_aic; s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0; s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus; /* /Fx */ /* These are just to be sure */ avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_FLV1: s->out_format = FMT_H263; s->h263_flv = 2; /* format = 1; 11-bit codes */ s->unrestricted_mv = 1; s->rtp_mode = 0; /* don't allow GOB */ avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_RV10: s->out_format = FMT_H263; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_RV20: s->out_format = FMT_H263; avctx->delay = 0; s->low_delay = 1; s->modified_quant = 1; s->h263_aic = 1; s->h263_plus = 1; s->loop_filter = 1; s->unrestricted_mv = 0; break; case AV_CODEC_ID_MPEG4: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->low_delay = s->max_b_frames ? 0 : 1; avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); break; case AV_CODEC_ID_MSMPEG4V2: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 2; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_MSMPEG4V3: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 3; s->flipflop_rounding = 1; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_WMV1: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 4; s->flipflop_rounding = 1; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_WMV2: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 5; s->flipflop_rounding = 1; avctx->delay = 0; s->low_delay = 1; break; default: return -1; } avctx->has_b_frames = !s->low_delay; s->encoding = 1; s->progressive_frame = s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) || s->alternate_scan); /* init */ ff_mpv_idct_init(s); if (ff_mpv_common_init(s) < 0) return -1; if (ARCH_X86) ff_mpv_encode_init_x86(s); ff_fdctdsp_init(&s->fdsp, avctx); ff_me_cmp_init(&s->mecc, avctx); ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx); ff_pixblockdsp_init(&s->pdsp, avctx); ff_qpeldsp_init(&s->qdsp); if (s->msmpeg4_version) { FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int), fail); } FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail); if (s->avctx->noise_reduction) { FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail); } if (CONFIG_H263_ENCODER) ff_h263dsp_init(&s->h263dsp); if (!s->dct_quantize) s->dct_quantize = ff_dct_quantize_c; if (!s->denoise_dct) s->denoise_dct = denoise_dct_c; s->fast_dct_quantize = s->dct_quantize; if (avctx->trellis) s->dct_quantize = dct_quantize_trellis_c; if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant) s->chroma_qscale_table = ff_h263_chroma_qscale_table; if (s->slice_context_count > 1) { s->rtp_mode = 1; if (avctx->codec_id == AV_CODEC_ID_H263 || avctx->codec_id == AV_CODEC_ID_H263P) s->h263_slice_structured = 1; } s->quant_precision = 5; ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp); ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp); if (CONFIG_H261_ENCODER && s->out_format == FMT_H261) ff_h261_encode_init(s); if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) ff_h263_encode_init(s); if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version) if ((ret = ff_msmpeg4_encode_init(s)) < 0) return ret; if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) && s->out_format == FMT_MPEG1) ff_mpeg1_encode_init(s); /* init q matrix */ for (i = 0; i < 64; i++) { int j = s->idsp.idct_permutation[i]; if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 && s->mpeg_quant) { s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) { s->intra_matrix[j] = s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; } else { /* mpeg1/2 */ s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; } if (s->avctx->intra_matrix) s->intra_matrix[j] = s->avctx->intra_matrix[i]; if (s->avctx->inter_matrix) s->inter_matrix[j] = s->avctx->inter_matrix[i]; } /* precompute matrix */ /* for mjpeg, we do include qscale in the matrix */ if (s->out_format != FMT_MJPEG) { ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1); ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0); } if (ff_rate_control_init(s) < 0) return -1; #if FF_API_ERROR_RATE FF_DISABLE_DEPRECATION_WARNINGS if (avctx->error_rate) s->error_rate = avctx->error_rate; FF_ENABLE_DEPRECATION_WARNINGS; #endif #if FF_API_NORMALIZE_AQP FF_DISABLE_DEPRECATION_WARNINGS if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP) s->mpv_flags |= FF_MPV_FLAG_NAQ; FF_ENABLE_DEPRECATION_WARNINGS; #endif #if FF_API_MV0 FF_DISABLE_DEPRECATION_WARNINGS if (avctx->flags & CODEC_FLAG_MV0) s->mpv_flags |= FF_MPV_FLAG_MV0; FF_ENABLE_DEPRECATION_WARNINGS #endif #if FF_API_MPV_OPT FF_DISABLE_DEPRECATION_WARNINGS if (avctx->rc_qsquish != 0.0) s->rc_qsquish = avctx->rc_qsquish; if (avctx->rc_qmod_amp != 0.0) s->rc_qmod_amp = avctx->rc_qmod_amp; if (avctx->rc_qmod_freq) s->rc_qmod_freq = avctx->rc_qmod_freq; if (avctx->rc_buffer_aggressivity != 1.0) s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity; if (avctx->rc_initial_cplx != 0.0) s->rc_initial_cplx = avctx->rc_initial_cplx; if (avctx->lmin) s->lmin = avctx->lmin; if (avctx->lmax) s->lmax = avctx->lmax; if (avctx->rc_eq) { av_freep(&s->rc_eq); s->rc_eq = av_strdup(avctx->rc_eq); if (!s->rc_eq) return AVERROR(ENOMEM); } FF_ENABLE_DEPRECATION_WARNINGS #endif if (avctx->b_frame_strategy == 2) { for (i = 0; i < s->max_b_frames + 2; i++) { s->tmp_frames[i] = av_frame_alloc(); if (!s->tmp_frames[i]) return AVERROR(ENOMEM); s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P; s->tmp_frames[i]->width = s->width >> avctx->brd_scale; s->tmp_frames[i]->height = s->height >> avctx->brd_scale; ret = av_frame_get_buffer(s->tmp_frames[i], 32); if (ret < 0) return ret; } } cpb_props = ff_add_cpb_side_data(avctx); if (!cpb_props) return AVERROR(ENOMEM); cpb_props->max_bitrate = avctx->rc_max_rate; cpb_props->min_bitrate = avctx->rc_min_rate; cpb_props->avg_bitrate = avctx->bit_rate; cpb_props->buffer_size = avctx->rc_buffer_size; return 0; fail: ff_mpv_encode_end(avctx); return AVERROR_UNKNOWN; } | 16,064 |
0 | static inline void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC0(dst, src, fcc_offset); tcg_gen_xori_tl(dst, dst, 0x1); gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset); tcg_gen_and_tl(dst, dst, cpu_tmp0); } | 16,065 |
0 | static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) { unsupported_encoding(s, insn); } | 16,066 |
0 | static int readv_f(int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0, vflag = 0; int c, cnt; char *buf; int64_t offset; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int nr_iov; QEMUIOVector qiov; int pattern = 0; int Pflag = 0; while ((c = getopt(argc, argv, "CP:qv")) != EOF) { switch (c) { case 'C': Cflag = 1; break; case 'P': Pflag = 1; pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; case 'q': qflag = 1; break; case 'v': vflag = 1; break; default: return command_usage(&readv_cmd); } } if (optind > argc - 2) { return command_usage(&readv_cmd); } offset = cvtnum(argv[optind]); if (offset < 0) { printf("non-numeric length argument -- %s\n", argv[optind]); return 0; } optind++; if (offset & 0x1ff) { printf("offset %" PRId64 " is not sector aligned\n", offset); return 0; } nr_iov = argc - optind; buf = create_iovec(&qiov, &argv[optind], nr_iov, 0xab); if (buf == NULL) { return 0; } gettimeofday(&t1, NULL); cnt = do_aio_readv(&qiov, offset, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf("readv failed: %s\n", strerror(-cnt)); goto out; } if (Pflag) { void *cmp_buf = malloc(qiov.size); memset(cmp_buf, pattern, qiov.size); if (memcmp(buf, cmp_buf, qiov.size)) { printf("Pattern verification failed at offset %" PRId64 ", %zd bytes\n", offset, qiov.size); } free(cmp_buf); } if (qflag) { goto out; } if (vflag) { dump_buffer(buf, offset, qiov.size); } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report("read", &t2, offset, qiov.size, total, cnt, Cflag); out: qemu_io_free(buf); return 0; } | 16,067 |
0 | void nbd_client_put(NBDClient *client) { if (--client->refcount == 0) { /* The last reference should be dropped by client->close, * which is called by nbd_client_close. */ assert(client->closing); nbd_unset_handlers(client); close(client->sock); client->sock = -1; if (client->exp) { QTAILQ_REMOVE(&client->exp->clients, client, next); nbd_export_put(client->exp); } g_free(client); } } | 16,069 |
0 | static void arm11mpcore_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V6K); set_feature(&cpu->env, ARM_FEATURE_VFP); set_feature(&cpu->env, ARM_FEATURE_VAPA); cpu->midr = ARM_CPUID_ARM11MPCORE; cpu->reset_fpsid = 0x410120b4; cpu->mvfr0 = 0x11111111; cpu->mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; cpu->id_dfr0 = 0; cpu->id_afr0 = 0x2; cpu->id_mmfr0 = 0x01100103; cpu->id_mmfr1 = 0x10020302; cpu->id_mmfr2 = 0x01222000; cpu->id_isar0 = 0x00100011; cpu->id_isar1 = 0x12002111; cpu->id_isar2 = 0x11221011; cpu->id_isar3 = 0x01102131; cpu->id_isar4 = 0x141; } | 16,070 |
0 | void pci_device_hot_add(Monitor *mon, const QDict *qdict) { PCIDevice *dev = NULL; const char *pci_addr = qdict_get_str(qdict, "pci_addr"); const char *type = qdict_get_str(qdict, "type"); const char *opts = qdict_get_try_str(qdict, "opts"); /* strip legacy tag */ if (!strncmp(pci_addr, "pci_addr=", 9)) { pci_addr += 9; } if (!opts) { opts = ""; } if (!strcmp(pci_addr, "auto")) pci_addr = NULL; if (strcmp(type, "nic") == 0) { dev = qemu_pci_hot_add_nic(mon, pci_addr, opts); } else if (strcmp(type, "storage") == 0) { dev = qemu_pci_hot_add_storage(mon, pci_addr, opts); } else { monitor_printf(mon, "invalid type: %s\n", type); } if (dev) { monitor_printf(mon, "OK domain %d, bus %d, slot %d, function %d\n", pci_find_domain(dev->bus), pci_bus_num(dev->bus), PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); } else monitor_printf(mon, "failed to add %s\n", opts); } | 16,071 |
0 | float32 helper_fsqrts(CPUSPARCState *env, float32 src) { float32 ret; clear_float_exceptions(env); ret = float32_sqrt(src, &env->fp_status); check_ieee_exceptions(env); return ret; } | 16,072 |
0 | ser_read(void *opaque, target_phys_addr_t addr, unsigned int size) { struct etrax_serial *s = opaque; D(CPUCRISState *env = s->env); uint32_t r = 0; addr >>= 2; switch (addr) { case R_STAT_DIN: r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15]; if (s->rx_fifo_len) { r |= 1 << STAT_DAV; } r |= 1 << STAT_TR_RDY; r |= 1 << STAT_TR_IDLE; break; case RS_STAT_DIN: r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15]; if (s->rx_fifo_len) { r |= 1 << STAT_DAV; s->rx_fifo_len--; } r |= 1 << STAT_TR_RDY; r |= 1 << STAT_TR_IDLE; break; default: r = s->regs[addr]; D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr, r)); break; } return r; } | 16,073 |
0 | static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice *vdev, int nr) { PCIDevice *pdev = &vdev->pdev; VFIOQuirk *quirk; if (!vdev->has_vga || nr != 0 || pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) { return; } quirk = g_malloc0(sizeof(*quirk)); quirk->vdev = vdev; quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1; quirk->data.address_match = 0x88000; quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1; quirk->data.bar = nr; memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk, "vfio-nvidia-bar0-88000-quirk", TARGET_PAGE_ALIGN(quirk->data.address_mask + 1)); memory_region_add_subregion_overlap(&vdev->bars[nr].mem, quirk->data.address_match & TARGET_PAGE_MASK, &quirk->mem, 1); QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); } | 16,075 |
0 | static subpage_t *subpage_init(target_phys_addr_t base) { subpage_t *mmio; mmio = g_malloc0(sizeof(subpage_t)); mmio->base = base; memory_region_init_io(&mmio->iomem, &subpage_ops, mmio, "subpage", TARGET_PAGE_SIZE); mmio->iomem.subpage = true; #if defined(DEBUG_SUBPAGE) printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, mmio, base, TARGET_PAGE_SIZE, subpage_memory); #endif subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned); return mmio; } | 16,077 |
0 | static abi_long do_bind(int sockfd, abi_ulong target_addr, socklen_t addrlen) { void *addr; abi_long ret; if (addrlen < 0) return -TARGET_EINVAL; addr = alloca(addrlen+1); ret = target_to_host_sockaddr(addr, target_addr, addrlen); if (ret) return ret; return get_errno(bind(sockfd, addr, addrlen)); } | 16,078 |
0 | int net_client_init(Monitor *mon, const char *device, const char *p) { static const char * const fd_params[] = { "vlan", "name", "fd", NULL }; char buf[1024]; int vlan_id, ret; VLANState *vlan; char *name = NULL; vlan_id = 0; if (get_param_value(buf, sizeof(buf), "vlan", p)) { vlan_id = strtol(buf, NULL, 0); } vlan = qemu_find_vlan(vlan_id); if (get_param_value(buf, sizeof(buf), "name", p)) { name = qemu_strdup(buf); } if (!strcmp(device, "nic")) { static const char * const nic_params[] = { "vlan", "name", "macaddr", "model", "addr", NULL }; NICInfo *nd; uint8_t *macaddr; int idx = nic_get_free_idx(); if (check_params(buf, sizeof(buf), nic_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", buf, p); ret = -1; goto out; } if (idx == -1 || nb_nics >= MAX_NICS) { config_error(mon, "Too Many NICs\n"); ret = -1; goto out; } nd = &nd_table[idx]; macaddr = nd->macaddr; macaddr[0] = 0x52; macaddr[1] = 0x54; macaddr[2] = 0x00; macaddr[3] = 0x12; macaddr[4] = 0x34; macaddr[5] = 0x56 + idx; if (get_param_value(buf, sizeof(buf), "macaddr", p)) { if (parse_macaddr(macaddr, buf) < 0) { config_error(mon, "invalid syntax for ethernet address\n"); ret = -1; goto out; } } if (get_param_value(buf, sizeof(buf), "model", p)) { nd->model = strdup(buf); } if (get_param_value(buf, sizeof(buf), "addr", p)) { nd->devaddr = strdup(buf); } nd->vlan = vlan; nd->name = name; nd->used = 1; name = NULL; nb_nics++; vlan->nb_guest_devs++; ret = idx; } else if (!strcmp(device, "none")) { if (*p != '\0') { config_error(mon, "'none' takes no parameters\n"); ret = -1; goto out; } /* does nothing. It is needed to signal that no network cards are wanted */ ret = 0; } else #ifdef CONFIG_SLIRP if (!strcmp(device, "user")) { static const char * const slirp_params[] = { "vlan", "name", "hostname", "restrict", "ip", NULL }; int restricted = 0; char *ip = NULL; if (check_params(buf, sizeof(buf), slirp_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", buf, p); ret = -1; goto out; } if (get_param_value(buf, sizeof(buf), "hostname", p)) { pstrcpy(slirp_hostname, sizeof(slirp_hostname), buf); } if (get_param_value(buf, sizeof(buf), "restrict", p)) { restricted = (buf[0] == 'y') ? 1 : 0; } if (get_param_value(buf, sizeof(buf), "ip", p)) { ip = qemu_strdup(buf); } vlan->nb_host_devs++; ret = net_slirp_init(vlan, device, name, restricted, ip); qemu_free(ip); } else if (!strcmp(device, "channel")) { long port; char name[20], *devname; struct VMChannel *vmc; port = strtol(p, &devname, 10); devname++; if (port < 1 || port > 65535) { config_error(mon, "vmchannel wrong port number\n"); ret = -1; goto out; } vmc = malloc(sizeof(struct VMChannel)); snprintf(name, 20, "vmchannel%ld", port); vmc->hd = qemu_chr_open(name, devname, NULL); if (!vmc->hd) { config_error(mon, "could not open vmchannel device '%s'\n", devname); ret = -1; goto out; } vmc->port = port; slirp_add_exec(3, vmc->hd, 4, port); qemu_chr_add_handlers(vmc->hd, vmchannel_can_read, vmchannel_read, NULL, vmc); ret = 0; } else #endif #ifdef _WIN32 if (!strcmp(device, "tap")) { static const char * const tap_params[] = { "vlan", "name", "ifname", NULL }; char ifname[64]; if (check_params(buf, sizeof(buf), tap_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", buf, p); ret = -1; goto out; } if (get_param_value(ifname, sizeof(ifname), "ifname", p) <= 0) { config_error(mon, "tap: no interface name\n"); ret = -1; goto out; } vlan->nb_host_devs++; ret = tap_win32_init(vlan, device, name, ifname); } else #elif defined (_AIX) #else if (!strcmp(device, "tap")) { char ifname[64], chkbuf[64]; char setup_script[1024], down_script[1024]; TAPState *s; int fd; vlan->nb_host_devs++; if (get_param_value(buf, sizeof(buf), "fd", p) > 0) { if (check_params(chkbuf, sizeof(chkbuf), fd_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", chkbuf, p); ret = -1; goto out; } fd = strtol(buf, NULL, 0); fcntl(fd, F_SETFL, O_NONBLOCK); s = net_tap_fd_init(vlan, device, name, fd); } else { static const char * const tap_params[] = { "vlan", "name", "ifname", "script", "downscript", NULL }; if (check_params(chkbuf, sizeof(chkbuf), tap_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", chkbuf, p); ret = -1; goto out; } if (get_param_value(ifname, sizeof(ifname), "ifname", p) <= 0) { ifname[0] = '\0'; } if (get_param_value(setup_script, sizeof(setup_script), "script", p) == 0) { pstrcpy(setup_script, sizeof(setup_script), DEFAULT_NETWORK_SCRIPT); } if (get_param_value(down_script, sizeof(down_script), "downscript", p) == 0) { pstrcpy(down_script, sizeof(down_script), DEFAULT_NETWORK_DOWN_SCRIPT); } s = net_tap_init(vlan, device, name, ifname, setup_script, down_script); } if (s != NULL) { ret = 0; } else { ret = -1; } } else #endif if (!strcmp(device, "socket")) { char chkbuf[64]; if (get_param_value(buf, sizeof(buf), "fd", p) > 0) { int fd; if (check_params(chkbuf, sizeof(chkbuf), fd_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", chkbuf, p); ret = -1; goto out; } fd = strtol(buf, NULL, 0); ret = -1; if (net_socket_fd_init(vlan, device, name, fd, 1)) ret = 0; } else if (get_param_value(buf, sizeof(buf), "listen", p) > 0) { static const char * const listen_params[] = { "vlan", "name", "listen", NULL }; if (check_params(chkbuf, sizeof(chkbuf), listen_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", chkbuf, p); ret = -1; goto out; } ret = net_socket_listen_init(vlan, device, name, buf); } else if (get_param_value(buf, sizeof(buf), "connect", p) > 0) { static const char * const connect_params[] = { "vlan", "name", "connect", NULL }; if (check_params(chkbuf, sizeof(chkbuf), connect_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", chkbuf, p); ret = -1; goto out; } ret = net_socket_connect_init(vlan, device, name, buf); } else if (get_param_value(buf, sizeof(buf), "mcast", p) > 0) { static const char * const mcast_params[] = { "vlan", "name", "mcast", NULL }; if (check_params(chkbuf, sizeof(chkbuf), mcast_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", chkbuf, p); ret = -1; goto out; } ret = net_socket_mcast_init(vlan, device, name, buf); } else { config_error(mon, "Unknown socket options: %s\n", p); ret = -1; goto out; } vlan->nb_host_devs++; } else #ifdef CONFIG_VDE if (!strcmp(device, "vde")) { static const char * const vde_params[] = { "vlan", "name", "sock", "port", "group", "mode", NULL }; char vde_sock[1024], vde_group[512]; int vde_port, vde_mode; if (check_params(buf, sizeof(buf), vde_params, p) < 0) { config_error(mon, "invalid parameter '%s' in '%s'\n", buf, p); ret = -1; goto out; } vlan->nb_host_devs++; if (get_param_value(vde_sock, sizeof(vde_sock), "sock", p) <= 0) { vde_sock[0] = '\0'; } if (get_param_value(buf, sizeof(buf), "port", p) > 0) { vde_port = strtol(buf, NULL, 10); } else { vde_port = 0; } if (get_param_value(vde_group, sizeof(vde_group), "group", p) <= 0) { vde_group[0] = '\0'; } if (get_param_value(buf, sizeof(buf), "mode", p) > 0) { vde_mode = strtol(buf, NULL, 8); } else { vde_mode = 0700; } ret = net_vde_init(vlan, device, name, vde_sock, vde_port, vde_group, vde_mode); } else #endif if (!strcmp(device, "dump")) { int len = 65536; if (get_param_value(buf, sizeof(buf), "len", p) > 0) { len = strtol(buf, NULL, 0); } if (!get_param_value(buf, sizeof(buf), "file", p)) { snprintf(buf, sizeof(buf), "qemu-vlan%d.pcap", vlan_id); } ret = net_dump_init(mon, vlan, device, name, buf, len); } else { config_error(mon, "Unknown network device: %s\n", device); ret = -1; goto out; } if (ret < 0) { config_error(mon, "Could not initialize device '%s'\n", device); } out: qemu_free(name); return ret; } | 16,079 |
0 | void tcg_target_qemu_prologue (TCGContext *s) { int i, frame_size; frame_size = 0 + 8 /* back chain */ + 8 /* CR */ + 8 /* LR */ + 8 /* compiler doubleword */ + 8 /* link editor doubleword */ + 8 /* TOC save area */ + TCG_STATIC_CALL_ARGS_SIZE + ARRAY_SIZE (tcg_target_callee_save_regs) * 8 ; frame_size = (frame_size + 15) & ~15; tcg_out32 (s, MFSPR | RT (0) | LR); tcg_out32 (s, STDU | RS (1) | RA (1) | (-frame_size & 0xffff)); for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i) tcg_out32 (s, (STD | RS (tcg_target_callee_save_regs[i]) | RA (1) | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE) ) ); tcg_out32 (s, STD | RS (0) | RA (1) | (frame_size + 20)); tcg_out32 (s, STD | RS (2) | RA (1) | (frame_size + 40)); tcg_out32 (s, MTSPR | RS (3) | CTR); tcg_out32 (s, BCCTR | BO_ALWAYS); tb_ret_addr = s->code_ptr; for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i) tcg_out32 (s, (LD | RT (tcg_target_callee_save_regs[i]) | RA (1) | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE) ) ); tcg_out32 (s, LD | RT (0) | RA (1) | (frame_size + 20)); tcg_out32 (s, LD | RT (2) | RA (1) | (frame_size + 40)); tcg_out32 (s, MTSPR | RS (0) | LR); tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size); tcg_out32 (s, BCLR | BO_ALWAYS); } | 16,080 |
0 | static void pxa2xx_pm_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { PXA2xxState *s = (PXA2xxState *) opaque; switch (addr) { case PMCR: /* Clear the write-one-to-clear bits... */ s->pm_regs[addr >> 2] &= ~(value & 0x2a); /* ...and set the plain r/w bits */ s->pm_regs[addr >> 2] &= ~0x15; s->pm_regs[addr >> 2] |= value & 0x15; break; case PSSR: /* Read-clean registers */ case RCSR: case PKSR: s->pm_regs[addr >> 2] &= ~value; break; default: /* Read-write registers */ if (!(addr & 3)) { s->pm_regs[addr >> 2] = value; break; } printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr); break; } } | 16,081 |
0 | static unsigned int dec_rfe_etc(DisasContext *dc) { cris_cc_mask(dc, 0); if (dc->op2 == 15) /* ignore halt. */ return 2; switch (dc->op2 & 7) { case 2: /* rfe. */ DIS(fprintf(logfile, "rfe\n")); cris_evaluate_flags(dc); tcg_gen_helper_0_0(helper_rfe); dc->is_jmp = DISAS_UPDATE; break; case 5: /* rfn. */ DIS(fprintf(logfile, "rfn\n")); cris_evaluate_flags(dc); tcg_gen_helper_0_0(helper_rfn); dc->is_jmp = DISAS_UPDATE; break; case 6: DIS(fprintf(logfile, "break %d\n", dc->op1)); cris_evaluate_flags (dc); /* break. */ tcg_gen_movi_tl(env_pc, dc->pc + 2); /* Breaks start at 16 in the exception vector. */ t_gen_mov_env_TN(trap_vector, tcg_const_tl(dc->op1 + 16)); t_gen_raise_exception(EXCP_BREAK); dc->is_jmp = DISAS_UPDATE; break; default: printf ("op2=%x\n", dc->op2); BUG(); break; } return 2; } | 16,082 |
0 | ram_addr_t get_current_ram_size(void) { MemoryDeviceInfoList *info_list = NULL; MemoryDeviceInfoList **prev = &info_list; MemoryDeviceInfoList *info; ram_addr_t size = ram_size; qmp_pc_dimm_device_list(qdev_get_machine(), &prev); for (info = info_list; info; info = info->next) { MemoryDeviceInfo *value = info->value; if (value) { switch (value->kind) { case MEMORY_DEVICE_INFO_KIND_DIMM: size += value->dimm->size; break; default: break; } } } qapi_free_MemoryDeviceInfoList(info_list); return size; } | 16,084 |
0 | static int ipvideo_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; IpvideoContext *s = avctx->priv_data; AVFrame *frame = data; int ret; int send_buffer; int frame_format; int video_data_size; if (av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, NULL)) { av_frame_unref(s->last_frame); av_frame_unref(s->second_last_frame); } if (buf_size < 6) return AVERROR_INVALIDDATA; frame_format = AV_RL8(buf); send_buffer = AV_RL8(buf + 1); video_data_size = AV_RL16(buf + 2); s->decoding_map_size = AV_RL16(buf + 4); if (frame_format != 0x11) av_log(avctx, AV_LOG_ERROR, "Frame type 0x%02X unsupported\n", frame_format); if (! s->decoding_map_size) { av_log(avctx, AV_LOG_ERROR, "Empty decoding map\n"); return AVERROR_INVALIDDATA; } bytestream2_init(&s->stream_ptr, buf + 6, video_data_size); /* decoding map contains 4 bits of information per 8x8 block */ s->decoding_map = buf + 6 + video_data_size; /* ensure we can't overread the packet */ if (buf_size < 6 + s->decoding_map_size + video_data_size) { av_log(avctx, AV_LOG_ERROR, "Invalid IP packet size\n"); return AVERROR_INVALIDDATA; } if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; if (!s->is_16bpp) { int size; const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, &size); if (pal && size == AVPALETTE_SIZE) { frame->palette_has_changed = 1; memcpy(s->pal, pal, AVPALETTE_SIZE); } else if (pal) { av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", size); } } ipvideo_decode_opcodes(s, frame); *got_frame = send_buffer; /* shuffle frames */ av_frame_unref(s->second_last_frame); FFSWAP(AVFrame*, s->second_last_frame, s->last_frame); if ((ret = av_frame_ref(s->last_frame, frame)) < 0) return ret; /* report that the buffer was completely consumed */ return buf_size; } | 16,085 |
0 | static ssize_t fd_put_buffer(void *opaque, const void *data, size_t size) { FdMigrationState *s = opaque; ssize_t ret; do { ret = write(s->fd, data, size); } while (ret == -1 && errno == EINTR); if (ret == -1) ret = -errno; if (ret == -EAGAIN) qemu_set_fd_handler2(s->fd, NULL, NULL, fd_put_notify, s); return ret; } | 16,087 |
0 | static void tap_send(void *opaque) { TAPState *s = opaque; int size; int packets = 0; while (qemu_can_send_packet(&s->nc)) { uint8_t *buf = s->buf; size = tap_read_packet(s->fd, s->buf, sizeof(s->buf)); if (size <= 0) { break; } if (s->host_vnet_hdr_len && !s->using_vnet_hdr) { buf += s->host_vnet_hdr_len; size -= s->host_vnet_hdr_len; } size = qemu_send_packet_async(&s->nc, buf, size, tap_send_completed); if (size == 0) { tap_read_poll(s, false); break; } else if (size < 0) { break; } /* * When the host keeps receiving more packets while tap_send() is * running we can hog the QEMU global mutex. Limit the number of * packets that are processed per tap_send() callback to prevent * stalling the guest. */ packets++; if (packets >= 50) { break; } } } | 16,088 |
0 | pflash_t *pflash_cfi01_register(target_phys_addr_t base, ram_addr_t off, BlockDriverState *bs, uint32_t sector_len, int nb_blocs, int width, uint16_t id0, uint16_t id1, uint16_t id2, uint16_t id3) { pflash_t *pfl; target_phys_addr_t total_len; total_len = sector_len * nb_blocs; /* XXX: to be fixed */ #if 0 if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) && total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024)) return NULL; #endif pfl = qemu_mallocz(sizeof(pflash_t)); /* FIXME: Allocate ram ourselves. */ pfl->storage = qemu_get_ram_ptr(off); pfl->fl_mem = cpu_register_io_memory( pflash_read_ops, pflash_write_ops, pfl); pfl->off = off; cpu_register_physical_memory(base, total_len, off | pfl->fl_mem | IO_MEM_ROMD); pfl->bs = bs; if (pfl->bs) { /* read the initial flash content */ bdrv_read(pfl->bs, 0, pfl->storage, total_len >> 9); } #if 0 /* XXX: there should be a bit to set up read-only, * the same way the hardware does (with WP pin). */ pfl->ro = 1; #else pfl->ro = 0; #endif pfl->timer = qemu_new_timer(vm_clock, pflash_timer, pfl); pfl->base = base; pfl->sector_len = sector_len; pfl->total_len = total_len; pfl->width = width; pfl->wcycle = 0; pfl->cmd = 0; pfl->status = 0; pfl->ident[0] = id0; pfl->ident[1] = id1; pfl->ident[2] = id2; pfl->ident[3] = id3; /* Hardcoded CFI table */ pfl->cfi_len = 0x52; /* Standard "QRY" string */ pfl->cfi_table[0x10] = 'Q'; pfl->cfi_table[0x11] = 'R'; pfl->cfi_table[0x12] = 'Y'; /* Command set (Intel) */ pfl->cfi_table[0x13] = 0x01; pfl->cfi_table[0x14] = 0x00; /* Primary extended table address (none) */ pfl->cfi_table[0x15] = 0x31; pfl->cfi_table[0x16] = 0x00; /* Alternate command set (none) */ pfl->cfi_table[0x17] = 0x00; pfl->cfi_table[0x18] = 0x00; /* Alternate extended table (none) */ pfl->cfi_table[0x19] = 0x00; pfl->cfi_table[0x1A] = 0x00; /* Vcc min */ pfl->cfi_table[0x1B] = 0x45; /* Vcc max */ pfl->cfi_table[0x1C] = 0x55; /* Vpp min (no Vpp pin) */ pfl->cfi_table[0x1D] = 0x00; /* Vpp max (no Vpp pin) */ pfl->cfi_table[0x1E] = 0x00; /* Reserved */ pfl->cfi_table[0x1F] = 0x07; /* Timeout for min size buffer write */ pfl->cfi_table[0x20] = 0x07; /* Typical timeout for block erase */ pfl->cfi_table[0x21] = 0x0a; /* Typical timeout for full chip erase (4096 ms) */ pfl->cfi_table[0x22] = 0x00; /* Reserved */ pfl->cfi_table[0x23] = 0x04; /* Max timeout for buffer write */ pfl->cfi_table[0x24] = 0x04; /* Max timeout for block erase */ pfl->cfi_table[0x25] = 0x04; /* Max timeout for chip erase */ pfl->cfi_table[0x26] = 0x00; /* Device size */ pfl->cfi_table[0x27] = ctz32(total_len); // + 1; /* Flash device interface (8 & 16 bits) */ pfl->cfi_table[0x28] = 0x02; pfl->cfi_table[0x29] = 0x00; /* Max number of bytes in multi-bytes write */ pfl->cfi_table[0x2A] = 0x0B; pfl->cfi_table[0x2B] = 0x00; /* Number of erase block regions (uniform) */ pfl->cfi_table[0x2C] = 0x01; /* Erase block region 1 */ pfl->cfi_table[0x2D] = nb_blocs - 1; pfl->cfi_table[0x2E] = (nb_blocs - 1) >> 8; pfl->cfi_table[0x2F] = sector_len >> 8; pfl->cfi_table[0x30] = sector_len >> 16; /* Extended */ pfl->cfi_table[0x31] = 'P'; pfl->cfi_table[0x32] = 'R'; pfl->cfi_table[0x33] = 'I'; pfl->cfi_table[0x34] = '1'; pfl->cfi_table[0x35] = '1'; pfl->cfi_table[0x36] = 0x00; pfl->cfi_table[0x37] = 0x00; pfl->cfi_table[0x38] = 0x00; pfl->cfi_table[0x39] = 0x00; pfl->cfi_table[0x3a] = 0x00; pfl->cfi_table[0x3b] = 0x00; pfl->cfi_table[0x3c] = 0x00; return pfl; } | 16,091 |
0 | static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; uint32_t addr = si->param & 0x7fffe000u; cpu_synchronize_state(cs); if (!address_space_access_valid(&address_space_memory, addr, sizeof(struct LowCore), false)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } /* cpu has to be stopped */ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } cpu->env.psa = addr; cpu_synchronize_post_init(cs); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } | 16,092 |
1 | static void memory_map_init(void) { system_memory = qemu_malloc(sizeof(*system_memory)); memory_region_init(system_memory, "system", UINT64_MAX); set_system_memory_map(system_memory); } | 16,094 |
1 | static void qxl_realize_primary(PCIDevice *dev, Error **errp) { PCIQXLDevice *qxl = PCI_QXL(dev); VGACommonState *vga = &qxl->vga; Error *local_err = NULL; qxl->id = 0; qxl_init_ramsize(qxl); vga->vbe_size = qxl->vgamem_size; vga->vram_size_mb = qxl->vga.vram_size >> 20; vga_common_init(vga, OBJECT(dev), true); vga_init(vga, OBJECT(dev), pci_address_space(dev), pci_address_space_io(dev), false); portio_list_init(&qxl->vga_port_list, OBJECT(dev), qxl_vga_portio_list, vga, "vga"); portio_list_set_flush_coalesced(&qxl->vga_port_list); portio_list_add(&qxl->vga_port_list, pci_address_space_io(dev), 0x3b0); vga->con = graphic_console_init(DEVICE(dev), 0, &qxl_ops, qxl); qemu_spice_display_init_common(&qxl->ssd); qxl_realize_common(qxl, &local_err); if (local_err) { error_propagate(errp, local_err); return; } qxl->ssd.dcl.ops = &display_listener_ops; qxl->ssd.dcl.con = vga->con; register_displaychangelistener(&qxl->ssd.dcl); } | 16,095 |
1 | const char *path(const char *name) { /* Only do absolute paths: quick and dirty, but should mostly be OK. Could do relative by tracking cwd. */ if (!base || name[0] != '/') return name; return follow_path(base, name) ?: name; } | 16,096 |
1 | static int aac_decode_er_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, GetBitContext *gb) { AACContext *ac = avctx->priv_data; const MPEG4AudioConfig *const m4ac = &ac->oc[1].m4ac; ChannelElement *che; int err, i; int samples = m4ac->frame_length_short ? 960 : 1024; int chan_config = m4ac->chan_config; int aot = m4ac->object_type; if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD) samples >>= 1; ac->frame = data; if ((err = frame_configure_elements(avctx)) < 0) return err; // The FF_PROFILE_AAC_* defines are all object_type - 1 // This may lead to an undefined profile being signaled ac->avctx->profile = aot - 1; ac->tags_mapped = 0; if (chan_config < 0 || (chan_config >= 8 && chan_config < 11) || chan_config >= 13) { avpriv_request_sample(avctx, "Unknown ER channel configuration %d", chan_config); return AVERROR_INVALIDDATA; } for (i = 0; i < tags_per_config[chan_config]; i++) { const int elem_type = aac_channel_layout_map[chan_config-1][i][0]; const int elem_id = aac_channel_layout_map[chan_config-1][i][1]; if (!(che=get_che(ac, elem_type, elem_id))) { av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n", elem_type, elem_id); return AVERROR_INVALIDDATA; } che->present = 1; if (aot != AOT_ER_AAC_ELD) skip_bits(gb, 4); switch (elem_type) { case TYPE_SCE: err = decode_ics(ac, &che->ch[0], gb, 0, 0); break; case TYPE_CPE: err = decode_cpe(ac, gb, che); break; case TYPE_LFE: err = decode_ics(ac, &che->ch[0], gb, 0, 0); break; } if (err < 0) return err; } spectral_to_sample(ac); ac->frame->nb_samples = samples; ac->frame->sample_rate = avctx->sample_rate; *got_frame_ptr = 1; skip_bits_long(gb, get_bits_left(gb)); return 0; } | 16,097 |
1 | static void versatile_init(MachineState *machine, int board_id) { ObjectClass *cpu_oc; Object *cpuobj; ARMCPU *cpu; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); qemu_irq pic[32]; qemu_irq sic[32]; DeviceState *dev, *sysctl; SysBusDevice *busdev; DeviceState *pl041; PCIBus *pci_bus; NICInfo *nd; I2CBus *i2c; int n; int done_smc = 0; DriveInfo *dinfo; if (!machine->cpu_model) { machine->cpu_model = "arm926"; cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, machine->cpu_model); if (!cpu_oc) { fprintf(stderr, "Unable to find CPU definition\n"); cpuobj = object_new(object_class_get_name(cpu_oc)); /* By default ARM1176 CPUs have EL3 enabled. This board does not * currently support EL3 so the CPU EL3 property is disabled before * realization. if (object_property_find(cpuobj, "has_el3", NULL)) { object_property_set_bool(cpuobj, false, "has_el3", &error_fatal); object_property_set_bool(cpuobj, true, "realized", &error_fatal); cpu = ARM_CPU(cpuobj); memory_region_allocate_system_memory(ram, NULL, "versatile.ram", machine->ram_size); /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero. */ memory_region_add_subregion(sysmem, 0, ram); sysctl = qdev_create(NULL, "realview_sysctl"); qdev_prop_set_uint32(sysctl, "sys_id", 0x41007004); qdev_prop_set_uint32(sysctl, "proc_id", 0x02000000); qdev_init_nofail(sysctl); sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000); dev = sysbus_create_varargs("pl190", 0x10140000, qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ), qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ), NULL); for (n = 0; n < 32; n++) { pic[n] = qdev_get_gpio_in(dev, n); dev = sysbus_create_simple(TYPE_VERSATILE_PB_SIC, 0x10003000, NULL); for (n = 0; n < 32; n++) { sysbus_connect_irq(SYS_BUS_DEVICE(dev), n, pic[n]); sic[n] = qdev_get_gpio_in(dev, n); sysbus_create_simple("pl050_keyboard", 0x10006000, sic[3]); sysbus_create_simple("pl050_mouse", 0x10007000, sic[4]); dev = qdev_create(NULL, "versatile_pci"); busdev = SYS_BUS_DEVICE(dev); qdev_init_nofail(dev); sysbus_mmio_map(busdev, 0, 0x10001000); /* PCI controller regs */ sysbus_mmio_map(busdev, 1, 0x41000000); /* PCI self-config */ sysbus_mmio_map(busdev, 2, 0x42000000); /* PCI config */ sysbus_mmio_map(busdev, 3, 0x43000000); /* PCI I/O */ sysbus_mmio_map(busdev, 4, 0x44000000); /* PCI memory window 1 */ sysbus_mmio_map(busdev, 5, 0x50000000); /* PCI memory window 2 */ sysbus_mmio_map(busdev, 6, 0x60000000); /* PCI memory window 3 */ sysbus_connect_irq(busdev, 0, sic[27]); sysbus_connect_irq(busdev, 1, sic[28]); sysbus_connect_irq(busdev, 2, sic[29]); sysbus_connect_irq(busdev, 3, sic[30]); pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci"); for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if (!done_smc && (!nd->model || strcmp(nd->model, "smc91c111") == 0)) { smc91c111_init(nd, 0x10010000, sic[25]); done_smc = 1; } else { pci_nic_init_nofail(nd, pci_bus, "rtl8139", NULL); if (machine_usb(machine)) { pci_create_simple(pci_bus, -1, "pci-ohci"); n = drive_get_max_bus(IF_SCSI); while (n >= 0) { pci_create_simple(pci_bus, -1, "lsi53c895a"); n--; pl011_create(0x101f1000, pic[12], serial_hds[0]); pl011_create(0x101f2000, pic[13], serial_hds[1]); pl011_create(0x101f3000, pic[14], serial_hds[2]); pl011_create(0x10009000, sic[6], serial_hds[3]); sysbus_create_simple("pl080", 0x10130000, pic[17]); sysbus_create_simple("sp804", 0x101e2000, pic[4]); sysbus_create_simple("sp804", 0x101e3000, pic[5]); sysbus_create_simple("pl061", 0x101e4000, pic[6]); sysbus_create_simple("pl061", 0x101e5000, pic[7]); sysbus_create_simple("pl061", 0x101e6000, pic[8]); sysbus_create_simple("pl061", 0x101e7000, pic[9]); /* The versatile/PB actually has a modified Color LCD controller that includes hardware cursor support from the PL111. */ dev = sysbus_create_simple("pl110_versatile", 0x10120000, pic[16]); /* Wire up the mux control signals from the SYS_CLCD register */ qdev_connect_gpio_out(sysctl, 0, qdev_get_gpio_in(dev, 0)); sysbus_create_varargs("pl181", 0x10005000, sic[22], sic[1], NULL); sysbus_create_varargs("pl181", 0x1000b000, sic[23], sic[2], NULL); /* Add PL031 Real Time Clock. */ sysbus_create_simple("pl031", 0x101e8000, pic[10]); dev = sysbus_create_simple("versatile_i2c", 0x10002000, NULL); i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); i2c_create_slave(i2c, "ds1338", 0x68); /* Add PL041 AACI Interface to the LM4549 codec */ pl041 = qdev_create(NULL, "pl041"); qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); qdev_init_nofail(pl041); sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, 0x10004000); sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, sic[24]); /* Memory map for Versatile/PB: */ /* 0x10000000 System registers. */ /* 0x10001000 PCI controller config registers. */ /* 0x10002000 Serial bus interface. */ /* 0x10003000 Secondary interrupt controller. */ /* 0x10004000 AACI (audio). */ /* 0x10005000 MMCI0. */ /* 0x10006000 KMI0 (keyboard). */ /* 0x10007000 KMI1 (mouse). */ /* 0x10008000 Character LCD Interface. */ /* 0x10009000 UART3. */ /* 0x1000a000 Smart card 1. */ /* 0x1000b000 MMCI1. */ /* 0x10010000 Ethernet. */ /* 0x10020000 USB. */ /* 0x10100000 SSMC. */ /* 0x10110000 MPMC. */ /* 0x10120000 CLCD Controller. */ /* 0x10130000 DMA Controller. */ /* 0x10140000 Vectored interrupt controller. */ /* 0x101d0000 AHB Monitor Interface. */ /* 0x101e0000 System Controller. */ /* 0x101e1000 Watchdog Interface. */ /* 0x101e2000 Timer 0/1. */ /* 0x101e3000 Timer 2/3. */ /* 0x101e4000 GPIO port 0. */ /* 0x101e5000 GPIO port 1. */ /* 0x101e6000 GPIO port 2. */ /* 0x101e7000 GPIO port 3. */ /* 0x101e8000 RTC. */ /* 0x101f0000 Smart card 0. */ /* 0x101f1000 UART0. */ /* 0x101f2000 UART1. */ /* 0x101f3000 UART2. */ /* 0x101f4000 SSPI. */ /* 0x34000000 NOR Flash */ dinfo = drive_get(IF_PFLASH, 0, 0); if (!pflash_cfi01_register(VERSATILE_FLASH_ADDR, NULL, "versatile.flash", VERSATILE_FLASH_SIZE, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, VERSATILE_FLASH_SECT_SIZE, VERSATILE_FLASH_SIZE / VERSATILE_FLASH_SECT_SIZE, 4, 0x0089, 0x0018, 0x0000, 0x0, 0)) { fprintf(stderr, "qemu: Error registering flash memory.\n"); versatile_binfo.ram_size = machine->ram_size; versatile_binfo.kernel_filename = machine->kernel_filename; versatile_binfo.kernel_cmdline = machine->kernel_cmdline; versatile_binfo.initrd_filename = machine->initrd_filename; versatile_binfo.board_id = board_id; arm_load_kernel(cpu, &versatile_binfo); | 16,098 |
1 | int css_do_ssch(SubchDev *sch, ORB *orb) { SCSW *s = &sch->curr_status.scsw; PMCW *p = &sch->curr_status.pmcw; int ret; if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { ret = -ENODEV; goto out; } if (s->ctrl & SCSW_STCTL_STATUS_PEND) { ret = -EINPROGRESS; goto out; } if (s->ctrl & (SCSW_FCTL_START_FUNC | SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { ret = -EBUSY; goto out; } /* If monitoring is active, update counter. */ if (channel_subsys.chnmon_active) { css_update_chnmon(sch); } sch->orb = *orb; sch->channel_prog = orb->cpa; /* Trigger the start function. */ s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); s->flags &= ~SCSW_FLAGS_MASK_PNO; ret = do_subchannel_work(sch); out: return ret; } | 16,099 |
1 | static int mxf_edit_unit_absolute_offset(MXFContext *mxf, MXFIndexTable *index_table, int64_t edit_unit, int64_t *edit_unit_out, int64_t *offset_out, int nag) { int i; int offset_temp = 0; for (i = 0; i < index_table->nb_segments; i++) { MXFIndexTableSegment *s = index_table->segments[i]; edit_unit = FFMAX(edit_unit, s->index_start_position); /* clamp if trying to seek before start */ if (edit_unit < s->index_start_position + s->index_duration) { int64_t index = edit_unit - s->index_start_position; if (s->edit_unit_byte_count) offset_temp += s->edit_unit_byte_count * index; else if (s->nb_index_entries) { if (s->nb_index_entries == 2 * s->index_duration + 1) index *= 2; /* Avid index */ if (index < 0 || index > s->nb_index_entries) { av_log(mxf->fc, AV_LOG_ERROR, "IndexSID %i segment at %"PRId64" IndexEntryArray too small\n", index_table->index_sid, s->index_start_position); return AVERROR_INVALIDDATA; } offset_temp = s->stream_offset_entries[index]; } else { av_log(mxf->fc, AV_LOG_ERROR, "IndexSID %i segment at %"PRId64" missing EditUnitByteCount and IndexEntryArray\n", index_table->index_sid, s->index_start_position); return AVERROR_INVALIDDATA; } if (edit_unit_out) *edit_unit_out = edit_unit; return mxf_absolute_bodysid_offset(mxf, index_table->body_sid, offset_temp, offset_out); } else { /* EditUnitByteCount == 0 for VBR indexes, which is fine since they use explicit StreamOffsets */ offset_temp += s->edit_unit_byte_count * s->index_duration; } } if (nag) av_log(mxf->fc, AV_LOG_ERROR, "failed to map EditUnit %"PRId64" in IndexSID %i to an offset\n", edit_unit, index_table->index_sid); return AVERROR_INVALIDDATA; } | 16,100 |
1 | QEMUPutLEDEntry *qemu_add_led_event_handler(QEMUPutLEDEvent *func, void *opaque) { QEMUPutLEDEntry *s; s = g_malloc0(sizeof(QEMUPutLEDEntry)); s->put_led = func; s->opaque = opaque; QTAILQ_INSERT_TAIL(&led_handlers, s, next); return s; } | 16,101 |
1 | static int mp3_read_probe(AVProbeData *p) { int max_frames, first_frames; int fsize, frames, sample_rate; uint32_t header; uint8_t *buf, *buf2, *end; AVCodecContext avctx; if(id3v2_match(p->buf)) return AVPROBE_SCORE_MAX/2+1; // this must be less than mpeg-ps because some retards put id3v2 tags before mpeg-ps files max_frames = 0; buf = p->buf; end = buf + FFMIN(4096, p->buf_size - sizeof(uint32_t)); for(; buf < end; buf++) { buf2 = buf; for(frames = 0; buf2 < end; frames++) { header = AV_RB32(buf2); fsize = ff_mpa_decode_header(&avctx, header, &sample_rate); if(fsize < 0) break; buf2 += fsize; } max_frames = FFMAX(max_frames, frames); if(buf == p->buf) first_frames= frames; } if (first_frames>=3) return AVPROBE_SCORE_MAX/2+1; else if(max_frames>=3) return AVPROBE_SCORE_MAX/4; else if(max_frames>=1) return 1; else return 0; } | 16,102 |
1 | static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs) { BDRVQcow2State *s = bs->opaque; ImageInfoSpecific *spec_info = g_new(ImageInfoSpecific, 1); *spec_info = (ImageInfoSpecific){ .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, .u.qcow2.data = g_new(ImageInfoSpecificQCow2, 1), }; if (s->qcow_version == 2) { *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ .compat = g_strdup("0.10"), .refcount_bits = s->refcount_bits, }; } else if (s->qcow_version == 3) { *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ .compat = g_strdup("1.1"), .lazy_refcounts = s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS, .has_lazy_refcounts = true, .corrupt = s->incompatible_features & QCOW2_INCOMPAT_CORRUPT, .has_corrupt = true, .refcount_bits = s->refcount_bits, }; } else { /* if this assertion fails, this probably means a new version was * added without having it covered here */ assert(false); } return spec_info; } | 16,103 |
1 | static int gif_video_probe(AVProbeData * pd) { const uint8_t *p, *p_end; int bits_per_pixel, has_global_palette, ext_code, ext_len; int gce_flags, gce_disposal; if (pd->buf_size < 24 || memcmp(pd->buf, gif89a_sig, 6) != 0) return 0; p_end = pd->buf + pd->buf_size; p = pd->buf + 6; bits_per_pixel = (p[4] & 0x07) + 1; has_global_palette = (p[4] & 0x80); p += 7; if (has_global_palette) p += (1 << bits_per_pixel) * 3; for(;;) { if (p >= p_end) return 0; if (*p != '!') break; p++; if (p >= p_end) return 0; ext_code = *p++; if (p >= p_end) return 0; ext_len = *p++; if (ext_code == 0xf9) { if (p >= p_end) return 0; /* if GCE extension found with gce_disposal != 0: it is likely to be an animation */ gce_flags = *p++; gce_disposal = (gce_flags >> 2) & 0x7; if (gce_disposal != 0) return AVPROBE_SCORE_MAX; else return 0; } for(;;) { if (ext_len == 0) break; p += ext_len; if (p >= p_end) return 0; ext_len = *p++; } } return 0; } | 16,104 |
0 | static int vcr2_init_sequence(AVCodecContext *avctx) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; int i, v, ret; /* start new MPEG-1 context decoding */ s->out_format = FMT_MPEG1; if (s1->mpeg_enc_ctx_allocated) { ff_mpv_common_end(s); } s->width = avctx->coded_width; s->height = avctx->coded_height; avctx->has_b_frames = 0; // true? s->low_delay = 1; avctx->pix_fmt = mpeg_get_pixelformat(avctx); #if FF_API_XVMC if ((avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel) && avctx->idct_algo == FF_IDCT_AUTO) #else if (avctx->hwaccel && avctx->idct_algo == FF_IDCT_AUTO) #endif /* FF_API_XVMC */ avctx->idct_algo = FF_IDCT_SIMPLE; ff_mpv_idct_init(s); if ((ret = ff_mpv_common_init(s)) < 0) return ret; s1->mpeg_enc_ctx_allocated = 1; for (i = 0; i < 64; i++) { int j = s->idsp.idct_permutation[i]; v = ff_mpeg1_default_intra_matrix[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; v = ff_mpeg1_default_non_intra_matrix[i]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } s->progressive_sequence = 1; s->progressive_frame = 1; s->picture_structure = PICT_FRAME; s->frame_pred_frame_dct = 1; s->chroma_format = 1; s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO; s1->save_width = s->width; s1->save_height = s->height; s1->save_progressive_seq = s->progressive_sequence; return 0; } | 16,105 |
0 | static void writer_print_time(WriterContext *wctx, const char *key, int64_t ts, const AVRational *time_base, int is_duration) { char buf[128]; if ((!is_duration && ts == AV_NOPTS_VALUE) || (is_duration && ts == 0)) { writer_print_string(wctx, key, "N/A", 1); } else { double d = ts * av_q2d(*time_base); struct unit_value uv; uv.val.d = d; uv.unit = unit_second_str; value_string(buf, sizeof(buf), uv); writer_print_string(wctx, key, buf, 0); } } | 16,106 |
1 | static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) { int x, y, i, ret; /* when we pass a frame to the encoder, it may keep a reference to it * internally; * make sure we do not overwrite it here */ ret = av_frame_make_writable(pict); if (ret < 0) exit(1); i = frame_index; /* Y */ for (y = 0; y < height; y++) for (x = 0; x < width; x++) pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; /* Cb and Cr */ for (y = 0; y < height / 2; y++) { for (x = 0; x < width / 2; x++) { pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; } } } | 16,107 |
1 | static int get_int32(QEMUFile *f, void *pv, size_t size) { int32_t *v = pv; qemu_get_sbe32s(f, v); return 0; } | 16,108 |
1 | static long do_rt_sigreturn_v1(CPUARMState *env) { abi_ulong frame_addr; struct rt_sigframe_v1 *frame = NULL; sigset_t host_set; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ frame_addr = env->regs[13]; trace_user_do_rt_sigreturn(env, frame_addr); if (frame_addr & 7) { goto badframe; } if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); set_sigmask(&host_set); if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { goto badframe; } if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) goto badframe; #if 0 /* Send SIGTRAP if we're single-stepping */ if (ptrace_cancel_bpt(current)) send_sig(SIGTRAP, current, 1); #endif unlock_user_struct(frame, frame_addr, 0); return -TARGET_QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV /* , current */); return 0; } | 16,110 |
1 | static int decode_q_branch(SnowContext *s, int level, int x, int y){ const int w= s->b_width << s->block_max_depth; const int rem_depth= s->block_max_depth - level; const int index= (x + y*w) << rem_depth; int trx= (x+1)<<rem_depth; const BlockNode *left = x ? &s->block[index-1] : &null_block; const BlockNode *top = y ? &s->block[index-w] : &null_block; const BlockNode *tl = y && x ? &s->block[index-w-1] : left; const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt int s_context= 2*left->level + 2*top->level + tl->level + tr->level; int res; if(s->keyframe){ set_blocks(s, level, x, y, null_block.color[0], null_block.color[1], null_block.color[2], null_block.mx, null_block.my, null_block.ref, BLOCK_INTRA); return 0; } if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){ int type, mx, my; int l = left->color[0]; int cb= left->color[1]; int cr= left->color[2]; unsigned ref = 0; int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref); int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx)); int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my)); type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0; if(type){ pred_mv(s, &mx, &my, 0, left, top, tr); l += get_symbol(&s->c, &s->block_state[32], 1); if (s->nb_planes > 2) { cb+= get_symbol(&s->c, &s->block_state[64], 1); cr+= get_symbol(&s->c, &s->block_state[96], 1); } }else{ if(s->ref_frames > 1) ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0); if (ref >= s->ref_frames) { av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n"); return AVERROR_INVALIDDATA; } pred_mv(s, &mx, &my, ref, left, top, tr); mx+= get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1); my+= get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1); } set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type); }else{ if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 || (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 || (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 || (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0) return res; } return 0; } | 16,111 |
1 | cl_program av_opencl_compile(const char *program_name, const char *build_opts) { int i; cl_int status, build_status; int kernel_code_idx = 0; const char *kernel_source; size_t kernel_code_len; char* ptr = NULL; cl_program program = NULL; size_t log_size; char *log = NULL; LOCK_OPENCL; for (i = 0; i < opencl_ctx.kernel_code_count; i++) { // identify a program using a unique name within the kernel source ptr = av_stristr(opencl_ctx.kernel_code[i].kernel_string, program_name); if (ptr && !opencl_ctx.kernel_code[i].is_compiled) { kernel_source = opencl_ctx.kernel_code[i].kernel_string; kernel_code_len = strlen(opencl_ctx.kernel_code[i].kernel_string); kernel_code_idx = i; break; } } if (!kernel_source) { av_log(&opencl_ctx, AV_LOG_ERROR, "Unable to find OpenCL kernel source '%s'\n", program_name); goto end; } /* create a CL program from kernel source */ program = clCreateProgramWithSource(opencl_ctx.context, 1, &kernel_source, &kernel_code_len, &status); if(status != CL_SUCCESS) { av_log(&opencl_ctx, AV_LOG_ERROR, "Unable to create OpenCL program '%s': %s\n", program_name, av_opencl_errstr(status)); program = NULL; goto end; } build_status = clBuildProgram(program, 1, &(opencl_ctx.device_id), build_opts, NULL, NULL); status = clGetProgramBuildInfo(program, opencl_ctx.device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size); if (status != CL_SUCCESS) { av_log(&opencl_ctx, AV_LOG_WARNING, "Failed to get compilation log: %s\n", av_opencl_errstr(status)); } else { log = av_malloc(log_size); if (log) { status = clGetProgramBuildInfo(program, opencl_ctx.device_id, CL_PROGRAM_BUILD_LOG, log_size, log, NULL); if (status != CL_SUCCESS) { av_log(&opencl_ctx, AV_LOG_WARNING, "Failed to get compilation log: %s\n", av_opencl_errstr(status)); } else { int level = build_status == CL_SUCCESS ? AV_LOG_DEBUG : AV_LOG_ERROR; av_log(&opencl_ctx, level, "Compilation log:\n%s\n", log); } } av_freep(&log); } if (build_status != CL_SUCCESS) { av_log(&opencl_ctx, AV_LOG_ERROR, "Compilation failed with OpenCL program '%s': %s\n", program_name, av_opencl_errstr(build_status)); program = NULL; goto end; } opencl_ctx.kernel_code[kernel_code_idx].is_compiled = 1; end: UNLOCK_OPENCL; return program; } | 16,113 |
1 | static void output_segment_list(OutputStream *os, AVIOContext *out, DASHContext *c) { int i, start_index = 0, start_number = 1; if (c->window_size) { start_index = FFMAX(os->nb_segments - c->window_size, 0); start_number = FFMAX(os->segment_index - c->window_size, 1); } if (c->use_template) { int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : AV_TIME_BASE; avio_printf(out, "\t\t\t\t<SegmentTemplate timescale=\"%d\" ", timescale); if (!c->use_timeline) avio_printf(out, "duration=\"%d\" ", c->last_duration); avio_printf(out, "initialization=\"%s\" media=\"%s\" startNumber=\"%d\">\n", c->init_seg_name, c->media_seg_name, c->use_timeline ? start_number : 1); if (c->use_timeline) { avio_printf(out, "\t\t\t\t\t<SegmentTimeline>\n"); for (i = start_index; i < os->nb_segments; ) { Segment *seg = os->segments[i]; int repeat = 0; avio_printf(out, "\t\t\t\t\t\t<S "); if (i == start_index) avio_printf(out, "t=\"%"PRId64"\" ", seg->time); avio_printf(out, "d=\"%d\" ", seg->duration); while (i + repeat + 1 < os->nb_segments && os->segments[i + repeat + 1]->duration == seg->duration) repeat++; if (repeat > 0) avio_printf(out, "r=\"%d\" ", repeat); avio_printf(out, "/>\n"); i += 1 + repeat; } avio_printf(out, "\t\t\t\t\t</SegmentTimeline>\n"); } avio_printf(out, "\t\t\t\t</SegmentTemplate>\n"); } else if (c->single_file) { avio_printf(out, "\t\t\t\t<BaseURL>%s</BaseURL>\n", os->initfile); avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%d\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number); avio_printf(out, "\t\t\t\t\t<Initialization range=\"%"PRId64"-%"PRId64"\" />\n", os->init_start_pos, os->init_start_pos + os->init_range_length - 1); for (i = start_index; i < os->nb_segments; i++) { Segment *seg = os->segments[i]; avio_printf(out, "\t\t\t\t\t<SegmentURL mediaRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->range_length - 1); if (seg->index_length) avio_printf(out, "indexRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->index_length - 1); avio_printf(out, "/>\n"); } avio_printf(out, "\t\t\t\t</SegmentList>\n"); } else { avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%d\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number); avio_printf(out, "\t\t\t\t\t<Initialization sourceURL=\"%s\" />\n", os->initfile); for (i = start_index; i < os->nb_segments; i++) { Segment *seg = os->segments[i]; avio_printf(out, "\t\t\t\t\t<SegmentURL media=\"%s\" />\n", seg->file); } avio_printf(out, "\t\t\t\t</SegmentList>\n"); } } | 16,114 |
1 | int av_cold ff_ivi_init_tiles(IVIPlaneDesc *planes, int tile_width, int tile_height) { int p, b, x, y, x_tiles, y_tiles, t_width, t_height; IVIBandDesc *band; IVITile *tile, *ref_tile; for (p = 0; p < 3; p++) { t_width = !p ? tile_width : (tile_width + 3) >> 2; t_height = !p ? tile_height : (tile_height + 3) >> 2; if (!p && planes[0].num_bands == 4) { t_width >>= 1; t_height >>= 1; } for (b = 0; b < planes[p].num_bands; b++) { band = &planes[p].bands[b]; x_tiles = IVI_NUM_TILES(band->width, t_width); y_tiles = IVI_NUM_TILES(band->height, t_height); band->num_tiles = x_tiles * y_tiles; av_freep(&band->tiles); band->tiles = av_mallocz(band->num_tiles * sizeof(IVITile)); if (!band->tiles) return AVERROR(ENOMEM); tile = band->tiles; /* use the first luma band as reference for motion vectors * and quant */ ref_tile = planes[0].bands[0].tiles; for (y = 0; y < band->height; y += t_height) { for (x = 0; x < band->width; x += t_width) { tile->xpos = x; tile->ypos = y; tile->width = FFMIN(band->width - x, t_width); tile->height = FFMIN(band->height - y, t_height); tile->is_empty = tile->data_size = 0; /* calculate number of macroblocks */ tile->num_MBs = IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size); av_freep(&tile->mbs); tile->mbs = av_malloc(tile->num_MBs * sizeof(IVIMbInfo)); if (!tile->mbs) return AVERROR(ENOMEM); tile->ref_mbs = 0; if (p || b) { tile->ref_mbs = ref_tile->mbs; ref_tile++; } tile++; } } }// for b }// for p return 0; } | 16,115 |
1 | int spapr_h_cas_compose_response(sPAPRMachineState *spapr, target_ulong addr, target_ulong size, bool cpu_update) { void *fdt, *fdt_skel; sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 }; sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine()); size -= sizeof(hdr); /* Create sceleton */ fdt_skel = g_malloc0(size); _FDT((fdt_create(fdt_skel, size))); _FDT((fdt_begin_node(fdt_skel, ""))); _FDT((fdt_end_node(fdt_skel))); _FDT((fdt_finish(fdt_skel))); fdt = g_malloc0(size); _FDT((fdt_open_into(fdt_skel, fdt, size))); g_free(fdt_skel); /* Fixup cpu nodes */ if (cpu_update) { _FDT((spapr_fixup_cpu_dt(fdt, spapr))); } /* Generate ibm,dynamic-reconfiguration-memory node if required */ if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) { g_assert(smc->dr_lmb_enabled); _FDT((spapr_populate_drconf_memory(spapr, fdt))); } /* Pack resulting tree */ _FDT((fdt_pack(fdt))); if (fdt_totalsize(fdt) + sizeof(hdr) > size) { trace_spapr_cas_failed(size); return -1; } cpu_physical_memory_write(addr, &hdr, sizeof(hdr)); cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt)); trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr)); g_free(fdt); return 0; } | 16,116 |
1 | int do_store_msr (CPUPPCState *env, target_ulong value) { int enter_pm; value &= env->msr_mask; if (((value >> MSR_IR) & 1) != msr_ir || ((value >> MSR_DR) & 1) != msr_dr) { /* Flush all tlb when changing translation mode */ tlb_flush(env, 1); env->interrupt_request |= CPU_INTERRUPT_EXITTB; } #if !defined (CONFIG_USER_ONLY) if (unlikely((env->flags & POWERPC_FLAG_TGPR) && ((value >> MSR_TGPR) & 1) != msr_tgpr)) { /* Swap temporary saved registers with GPRs */ swap_gpr_tgpr(env); } if (unlikely((value >> MSR_EP) & 1) != msr_ep) { /* Change the exception prefix on PowerPC 601 */ env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; } #endif #if defined (TARGET_PPC64) msr_sf = (value >> MSR_SF) & 1; msr_isf = (value >> MSR_ISF) & 1; msr_hv = (value >> MSR_HV) & 1; #endif msr_ucle = (value >> MSR_UCLE) & 1; msr_vr = (value >> MSR_VR) & 1; /* VR / SPE */ msr_ap = (value >> MSR_AP) & 1; msr_sa = (value >> MSR_SA) & 1; msr_key = (value >> MSR_KEY) & 1; msr_pow = (value >> MSR_POW) & 1; /* POW / WE */ msr_tgpr = (value >> MSR_TGPR) & 1; /* TGPR / CE */ msr_ile = (value >> MSR_ILE) & 1; msr_ee = (value >> MSR_EE) & 1; msr_pr = (value >> MSR_PR) & 1; msr_fp = (value >> MSR_FP) & 1; msr_me = (value >> MSR_ME) & 1; msr_fe0 = (value >> MSR_FE0) & 1; msr_se = (value >> MSR_SE) & 1; /* SE / DWE / UBLE */ msr_be = (value >> MSR_BE) & 1; /* BE / DE */ msr_fe1 = (value >> MSR_FE1) & 1; msr_al = (value >> MSR_AL) & 1; msr_ip = (value >> MSR_IP) & 1; msr_ir = (value >> MSR_IR) & 1; /* IR / IS */ msr_dr = (value >> MSR_DR) & 1; /* DR / DS */ msr_pe = (value >> MSR_PE) & 1; /* PE / EP */ msr_px = (value >> MSR_PX) & 1; /* PX / PMM */ msr_ri = (value >> MSR_RI) & 1; msr_le = (value >> MSR_LE) & 1; do_compute_hflags(env); enter_pm = 0; switch (env->excp_model) { case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: /* Don't handle SLEEP mode: we should disable all clocks... * No dynamic power-management. */ if (msr_pow == 1 && (env->spr[SPR_HID0] & 0x00C00000) != 0) enter_pm = 1; break; case POWERPC_EXCP_604: if (msr_pow == 1) enter_pm = 1; break; case POWERPC_EXCP_7x0: if (msr_pow == 1 && (env->spr[SPR_HID0] & 0x00E00000) != 0) enter_pm = 1; break; default: break; } return enter_pm; } | 16,117 |
1 | static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref) { AResampleContext *aresample = inlink->dst->priv; const int n_in = insamplesref->nb_samples; int64_t delay; int n_out = n_in * aresample->ratio + 32; AVFilterLink *const outlink = inlink->dst->outputs[0]; AVFrame *outsamplesref; int ret; delay = swr_get_delay(aresample->swr, outlink->sample_rate); if (delay > 0) n_out += FFMIN(delay, FFMAX(4096, n_out)); outsamplesref = ff_get_audio_buffer(outlink, n_out); if(!outsamplesref) return AVERROR(ENOMEM); av_frame_copy_props(outsamplesref, insamplesref); outsamplesref->format = outlink->format; outsamplesref->channels = outlink->channels; outsamplesref->channel_layout = outlink->channel_layout; outsamplesref->sample_rate = outlink->sample_rate; if(insamplesref->pts != AV_NOPTS_VALUE) { int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den); int64_t outpts= swr_next_pts(aresample->swr, inpts); aresample->next_pts = outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate); } else { outsamplesref->pts = AV_NOPTS_VALUE; } n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, (void *)insamplesref->extended_data, n_in); if (n_out <= 0) { av_frame_free(&outsamplesref); av_frame_free(&insamplesref); return 0; } aresample->more_data = outsamplesref->nb_samples == n_out; // Indicate that there is probably more data in our buffers outsamplesref->nb_samples = n_out; ret = ff_filter_frame(outlink, outsamplesref); av_frame_free(&insamplesref); return ret; } | 16,118 |
1 | static void mem_add(MemoryListener *listener, MemoryRegionSection *section) { AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); MemoryRegionSection now = *section, remain = *section; if ((now.offset_within_address_space & ~TARGET_PAGE_MASK) || (now.size < TARGET_PAGE_SIZE)) { now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space) - now.offset_within_address_space, now.size); register_subpage(d, &now); remain.size -= now.size; remain.offset_within_address_space += now.size; remain.offset_within_region += now.size; } while (remain.size >= TARGET_PAGE_SIZE) { now = remain; if (remain.offset_within_region & ~TARGET_PAGE_MASK) { now.size = TARGET_PAGE_SIZE; register_subpage(d, &now); } else { now.size &= TARGET_PAGE_MASK; register_multipage(d, &now); } remain.size -= now.size; remain.offset_within_address_space += now.size; remain.offset_within_region += now.size; } now = remain; if (now.size) { register_subpage(d, &now); } } | 16,119 |
1 | static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AnsiContext *s = avctx->priv_data; uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; int ret, i, count; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; if (!avctx->frame_number) { for (i=0; i<avctx->height; i++) memset(s->frame->data[0]+ i*s->frame->linesize[0], 0, avctx->width); memset(s->frame->data[1], 0, AVPALETTE_SIZE); } s->frame->pict_type = AV_PICTURE_TYPE_I; s->frame->palette_has_changed = 1; set_palette((uint32_t *)s->frame->data[1]); if (!s->first_frame) { erase_screen(avctx); s->first_frame = 1; } while(buf < buf_end) { switch(s->state) { case STATE_NORMAL: switch (buf[0]) { case 0x00: //NUL case 0x07: //BEL case 0x1A: //SUB /* ignore */ break; case 0x08: //BS s->x = FFMAX(s->x - 1, 0); break; case 0x09: //HT i = s->x / FONT_WIDTH; count = ((i + 8) & ~7) - i; for (i = 0; i < count; i++) draw_char(avctx, ' '); break; case 0x0A: //LF hscroll(avctx); case 0x0D: //CR s->x = 0; break; case 0x0C: //FF erase_screen(avctx); break; case 0x1B: //ESC s->state = STATE_ESCAPE; break; default: draw_char(avctx, buf[0]); } break; case STATE_ESCAPE: if (buf[0] == '[') { s->state = STATE_CODE; s->nb_args = 0; s->args[0] = -1; } else { s->state = STATE_NORMAL; draw_char(avctx, 0x1B); continue; } break; case STATE_CODE: switch(buf[0]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (s->nb_args < MAX_NB_ARGS) s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0'; break; case ';': s->nb_args++; if (s->nb_args < MAX_NB_ARGS) s->args[s->nb_args] = 0; break; case 'M': s->state = STATE_MUSIC_PREAMBLE; break; case '=': case '?': /* ignore */ break; default: if (s->nb_args > MAX_NB_ARGS) av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args); if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] >= 0) s->nb_args++; if ((ret = execute_code(avctx, buf[0])) < 0) return ret; s->state = STATE_NORMAL; } break; case STATE_MUSIC_PREAMBLE: if (buf[0] == 0x0E || buf[0] == 0x1B) s->state = STATE_NORMAL; /* ignore music data */ break; } buf++; } *got_frame = 1; if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; return buf_size; } | 16,120 |
1 | static void load_symbols(struct elfhdr *hdr, int fd) { unsigned int i, nsyms; struct elf_shdr sechdr, symtab, strtab; char *strings; struct syminfo *s; struct elf_sym *syms; lseek(fd, hdr->e_shoff, SEEK_SET); for (i = 0; i < hdr->e_shnum; i++) { if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) return; #ifdef BSWAP_NEEDED bswap_shdr(&sechdr); #endif if (sechdr.sh_type == SHT_SYMTAB) { symtab = sechdr; lseek(fd, hdr->e_shoff + sizeof(sechdr) * sechdr.sh_link, SEEK_SET); if (read(fd, &strtab, sizeof(strtab)) != sizeof(strtab)) return; #ifdef BSWAP_NEEDED bswap_shdr(&strtab); #endif goto found; } } return; /* Shouldn't happen... */ found: /* Now know where the strtab and symtab are. Snarf them. */ s = malloc(sizeof(*s)); syms = malloc(symtab.sh_size); if (!syms) return; s->disas_strtab = strings = malloc(strtab.sh_size); if (!s->disas_strtab) return; lseek(fd, symtab.sh_offset, SEEK_SET); if (read(fd, syms, symtab.sh_size) != symtab.sh_size) return; nsyms = symtab.sh_size / sizeof(struct elf_sym); i = 0; while (i < nsyms) { #ifdef BSWAP_NEEDED bswap_sym(syms + i); #endif // Throw away entries which we do not need. if (syms[i].st_shndx == SHN_UNDEF || syms[i].st_shndx >= SHN_LORESERVE || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { nsyms--; if (i < nsyms) { syms[i] = syms[nsyms]; } continue; } #if defined(TARGET_ARM) || defined (TARGET_MIPS) /* The bottom address bit marks a Thumb or MIPS16 symbol. */ syms[i].st_value &= ~(target_ulong)1; #endif i++; } syms = realloc(syms, nsyms * sizeof(*syms)); qsort(syms, nsyms, sizeof(*syms), symcmp); lseek(fd, strtab.sh_offset, SEEK_SET); if (read(fd, strings, strtab.sh_size) != strtab.sh_size) return; s->disas_num_syms = nsyms; #if ELF_CLASS == ELFCLASS32 s->disas_symtab.elf32 = syms; s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx; #else s->disas_symtab.elf64 = syms; s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx; #endif s->next = syminfos; syminfos = s; } | 16,121 |
1 | av_cold int ff_snow_common_init(AVCodecContext *avctx){ SnowContext *s = avctx->priv_data; int width, height; int i, j; s->avctx= avctx; s->max_ref_frames=1; //just make sure it's not an invalid value in case of no initial keyframe ff_me_cmp_init(&s->mecc, avctx); ff_hpeldsp_init(&s->hdsp, avctx->flags); ff_videodsp_init(&s->vdsp, 8); ff_dwt_init(&s->dwt); ff_h264qpel_init(&s->h264qpel, 8); #define mcf(dx,dy)\ s->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\ s->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\ s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\ s->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\ s->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\ s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4]; mcf( 0, 0) mcf( 4, 0) mcf( 8, 0) mcf(12, 0) mcf( 0, 4) mcf( 4, 4) mcf( 8, 4) mcf(12, 4) mcf( 0, 8) mcf( 4, 8) mcf( 8, 8) mcf(12, 8) mcf( 0,12) mcf( 4,12) mcf( 8,12) mcf(12,12) #define mcfh(dx,dy)\ s->hdsp.put_pixels_tab [0][dy/4+dx/8]=\ s->hdsp.put_no_rnd_pixels_tab[0][dy/4+dx/8]=\ mc_block_hpel ## dx ## dy ## 16;\ s->hdsp.put_pixels_tab [1][dy/4+dx/8]=\ s->hdsp.put_no_rnd_pixels_tab[1][dy/4+dx/8]=\ mc_block_hpel ## dx ## dy ## 8; mcfh(0, 0) mcfh(8, 0) mcfh(0, 8) mcfh(8, 8) init_qexp(); // dec += FFMAX(s->chroma_h_shift, s->chroma_v_shift); width= s->avctx->width; height= s->avctx->height; FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->spatial_idwt_buffer, width, height * sizeof(IDWTELEM), fail); FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->spatial_dwt_buffer, width, height * sizeof(DWTELEM), fail); //FIXME this does not belong here FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->temp_dwt_buffer, width, sizeof(DWTELEM), fail); FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->temp_idwt_buffer, width, sizeof(IDWTELEM), fail); FF_ALLOC_ARRAY_OR_GOTO(avctx, s->run_buffer, ((width + 1) >> 1), ((height + 1) >> 1) * sizeof(*s->run_buffer), fail); for(i=0; i<MAX_REF_FRAMES; i++) { for(j=0; j<MAX_REF_FRAMES; j++) ff_scale_mv_ref[i][j] = 256*(i+1)/(j+1); s->last_picture[i] = av_frame_alloc(); if (!s->last_picture[i]) goto fail; } s->mconly_picture = av_frame_alloc(); s->current_picture = av_frame_alloc(); if (!s->mconly_picture || !s->current_picture) goto fail; return 0; fail: return AVERROR(ENOMEM); } | 16,123 |
1 | static int hdev_open(BlockDriverState *bs, const char *filename, int flags) { BDRVRawState *s = bs->opaque; #if defined(__APPLE__) && defined(__MACH__) if (strstart(filename, "/dev/cdrom", NULL)) { kern_return_t kernResult; io_iterator_t mediaIterator; char bsdPath[ MAXPATHLEN ]; int fd; kernResult = FindEjectableCDMedia( &mediaIterator ); kernResult = GetBSDPath( mediaIterator, bsdPath, sizeof( bsdPath ) ); if ( bsdPath[ 0 ] != '\0' ) { strcat(bsdPath,"s0"); /* some CDs don't have a partition 0 */ fd = qemu_open(bsdPath, O_RDONLY | O_BINARY | O_LARGEFILE); if (fd < 0) { bsdPath[strlen(bsdPath)-1] = '1'; } else { qemu_close(fd); } filename = bsdPath; } if ( mediaIterator ) IOObjectRelease( mediaIterator ); } #endif s->type = FTYPE_FILE; #if defined(__linux__) { char resolved_path[ MAXPATHLEN ], *temp; temp = realpath(filename, resolved_path); if (temp && strstart(temp, "/dev/sg", NULL)) { bs->sg = 1; } } #endif return raw_open_common(bs, filename, flags, 0); } | 16,125 |
1 | void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc) { #ifdef KVM_CAP_MCE struct kvm_x86_mce mce = { .bank = bank, .status = status, .mcg_status = mcg_status, .addr = addr, .misc = misc, }; struct kvm_x86_mce_data data = { .env = cenv, .mce = &mce, }; run_on_cpu(cenv, kvm_do_inject_x86_mce, &data); #endif } | 16,127 |
1 | static int asf_read_marker(AVFormatContext *s, int64_t size) { AVIOContext *pb = s->pb; ASFContext *asf = s->priv_data; int i, count, name_len, ret; char name[1024]; avio_rl64(pb); // reserved 16 bytes avio_rl64(pb); // ... count = avio_rl32(pb); // markers count avio_rl16(pb); // reserved 2 bytes name_len = avio_rl16(pb); // name length for (i = 0; i < name_len; i++) avio_r8(pb); // skip the name for (i = 0; i < count; i++) { int64_t pres_time; int name_len; avio_rl64(pb); // offset, 8 bytes pres_time = avio_rl64(pb); // presentation time pres_time -= asf->hdr.preroll * 10000; avio_rl16(pb); // entry length avio_rl32(pb); // send time avio_rl32(pb); // flags name_len = avio_rl32(pb); // name length if ((ret = avio_get_str16le(pb, name_len * 2, name, sizeof(name))) < name_len) avio_skip(pb, name_len - ret); avpriv_new_chapter(s, i, (AVRational) { 1, 10000000 }, pres_time, AV_NOPTS_VALUE, name); } return 0; } | 16,128 |
1 | static void xhci_events_update(XHCIState *xhci, int v) { XHCIInterrupter *intr = &xhci->intr[v]; dma_addr_t erdp; unsigned int dp_idx; bool do_irq = 0; if (xhci->usbsts & USBSTS_HCH) { return; } erdp = xhci_addr64(intr->erdp_low, intr->erdp_high); if (erdp < intr->er_start || erdp >= (intr->er_start + TRB_SIZE*intr->er_size)) { DPRINTF("xhci: ERDP out of bounds: "DMA_ADDR_FMT"\n", erdp); DPRINTF("xhci: ER[%d] at "DMA_ADDR_FMT" len %d\n", v, intr->er_start, intr->er_size); xhci_die(xhci); return; } dp_idx = (erdp - intr->er_start) / TRB_SIZE; assert(dp_idx < intr->er_size); /* NEC didn't read section 4.9.4 of the spec (v1.0 p139 top Note) and thus * deadlocks when the ER is full. Hack it by holding off events until * the driver decides to free at least half of the ring */ if (intr->er_full) { int er_free = dp_idx - intr->er_ep_idx; if (er_free <= 0) { er_free += intr->er_size; } if (er_free < (intr->er_size/2)) { DPRINTF("xhci_events_update(): event ring still " "more than half full (hack)\n"); return; } } while (intr->ev_buffer_put != intr->ev_buffer_get) { assert(intr->er_full); if (((intr->er_ep_idx+1) % intr->er_size) == dp_idx) { DPRINTF("xhci_events_update(): event ring full again\n"); #ifndef ER_FULL_HACK XHCIEvent full = {ER_HOST_CONTROLLER, CC_EVENT_RING_FULL_ERROR}; xhci_write_event(xhci, &full, v); #endif do_irq = 1; break; } XHCIEvent *event = &intr->ev_buffer[intr->ev_buffer_get]; xhci_write_event(xhci, event, v); intr->ev_buffer_get++; do_irq = 1; if (intr->ev_buffer_get == EV_QUEUE) { intr->ev_buffer_get = 0; } } if (do_irq) { xhci_intr_raise(xhci, v); } if (intr->er_full && intr->ev_buffer_put == intr->ev_buffer_get) { DPRINTF("xhci_events_update(): event ring no longer full\n"); intr->er_full = 0; } } | 16,129 |
1 | static int process_line(URLContext *h, char *line, int line_count, int *new_location) { HTTPContext *s = h->priv_data; char *tag, *p, *end; /* end of header */ if (line[0] == '\0') return 0; p = line; if (line_count == 0) { while (!isspace(*p) && *p != '\0') p++; while (isspace(*p)) p++; s->http_code = strtol(p, &end, 10); av_dlog(NULL, "http_code=%d\n", s->http_code); /* error codes are 4xx and 5xx, but regard 401 as a success, so we * don't abort until all headers have been parsed. */ if (s->http_code >= 400 && s->http_code < 600 && s->http_code != 401) { end += strspn(end, SPACE_CHARS); av_log(h, AV_LOG_WARNING, "HTTP error %d %s\n", s->http_code, end); return -1; } } else { while (*p != '\0' && *p != ':') p++; if (*p != ':') return 1; *p = '\0'; tag = line; p++; while (isspace(*p)) p++; if (!av_strcasecmp(tag, "Location")) { strcpy(s->location, p); *new_location = 1; } else if (!av_strcasecmp (tag, "Content-Length") && s->filesize == -1) { s->filesize = atoll(p); } else if (!av_strcasecmp (tag, "Content-Range")) { /* "bytes $from-$to/$document_size" */ const char *slash; if (!strncmp (p, "bytes ", 6)) { p += 6; s->off = atoll(p); if ((slash = strchr(p, '/')) && strlen(slash) > 0) s->filesize = atoll(slash+1); } h->is_streamed = 0; /* we _can_ in fact seek */ } else if (!av_strcasecmp(tag, "Accept-Ranges") && !strncmp(p, "bytes", 5)) { h->is_streamed = 0; } else if (!av_strcasecmp (tag, "Transfer-Encoding") && !av_strncasecmp(p, "chunked", 7)) { s->filesize = -1; s->chunksize = 0; } else if (!av_strcasecmp (tag, "WWW-Authenticate")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp (tag, "Authentication-Info")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp (tag, "Connection")) { if (!strcmp(p, "close")) s->willclose = 1; } } return 1; } | 16,130 |
1 | static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride) { const int index = size2index[log2h][log2w]; const int h = 1 << log2h; int code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table, BLOCK_TYPE_VLC_BITS, 1); uint16_t *start = (uint16_t *)f->last_picture.data[0]; uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w); av_assert2(code >= 0 && code <= 6); if (code == 0) { if (f->g.buffer_end - f->g.buffer < 1) { av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n"); return; } src += f->mv[bytestream2_get_byte(&f->g)]; if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } mcdc(dst, src, log2w, h, stride, 1, 0); } else if (code == 1) { log2h--; decode_p_block(f, dst, src, log2w, log2h, stride); decode_p_block(f, dst + (stride << log2h), src + (stride << log2h), log2w, log2h, stride); } else if (code == 2) { log2w--; decode_p_block(f, dst , src, log2w, log2h, stride); decode_p_block(f, dst + (1 << log2w), src + (1 << log2w), log2w, log2h, stride); } else if (code == 3 && f->version < 2) { mcdc(dst, src, log2w, h, stride, 1, 0); } else if (code == 4) { if (f->g.buffer_end - f->g.buffer < 1) { av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n"); return; } src += f->mv[bytestream2_get_byte(&f->g)]; if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } if (f->g2.buffer_end - f->g2.buffer < 1){ av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); return; } mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16(&f->g2)); } else if (code == 5) { if (f->g2.buffer_end - f->g2.buffer < 1) { av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); return; } mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16(&f->g2)); } else if (code == 6) { if (f->g2.buffer_end - f->g2.buffer < 2) { av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); return; } if (log2w) { dst[0] = bytestream2_get_le16(&f->g2); dst[1] = bytestream2_get_le16(&f->g2); } else { dst[0] = bytestream2_get_le16(&f->g2); dst[stride] = bytestream2_get_le16(&f->g2); } } } | 16,131 |
1 | void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){ long i = width; while(i & 0x1F) { i--; b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; } asm volatile ( "jmp 2f \n\t" "1: \n\t" "mov %6, %%"REG_a" \n\t" "mov %4, %%"REG_S" \n\t" snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_sra("1","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") "pcmpeqd %%xmm1, %%xmm1 \n\t" "psllw $15, %%xmm1 \n\t" "psrlw $14, %%xmm1 \n\t" "mov %5, %%"REG_a" \n\t" snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_load(REG_a,"xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_sub("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_store(REG_a,"xmm1","xmm3","xmm5","xmm7") "mov %3, %%"REG_c" \n\t" snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add(REG_c,"xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_store(REG_S,"xmm0","xmm2","xmm4","xmm6") "mov %2, %%"REG_a" \n\t" snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6") "pcmpeqd %%xmm1, %%xmm1 \n\t" "psllw $15, %%xmm1 \n\t" "psrlw $14, %%xmm1 \n\t" "mov %1, %%"REG_S" \n\t" snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_store(REG_c,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add(REG_S,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_sra("1","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_store(REG_a,"xmm0","xmm2","xmm4","xmm6") "2: \n\t" "sub $32, %%"REG_d" \n\t" "jge 1b \n\t" :"+d"(i) : "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5): "%"REG_a"","%"REG_S"","%"REG_c""); } | 16,133 |
1 | static int kvm_has_msr_star(CPUState *env) { static int has_msr_star; int ret; /* first time */ if (has_msr_star == 0) { struct kvm_msr_list msr_list, *kvm_msr_list; has_msr_star = -1; /* Obtain MSR list from KVM. These are the MSRs that we must * save/restore */ msr_list.nmsrs = 0; ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list); if (ret < 0) return 0; /* Old kernel modules had a bug and could write beyond the provided memory. Allocate at least a safe amount of 1K. */ kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) + msr_list.nmsrs * sizeof(msr_list.indices[0]))); kvm_msr_list->nmsrs = msr_list.nmsrs; ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); if (ret >= 0) { int i; for (i = 0; i < kvm_msr_list->nmsrs; i++) { if (kvm_msr_list->indices[i] == MSR_STAR) { has_msr_star = 1; break; } } } free(kvm_msr_list); } if (has_msr_star == 1) return 1; return 0; } | 16,134 |
1 | static CharDriverState *qemu_chr_open_stdio(ChardevStdio *opts) { CharDriverState *chr; if (is_daemonized()) { error_report("cannot use stdio with -daemonize"); return NULL; } old_fd0_flags = fcntl(0, F_GETFL); tcgetattr (0, &oldtty); qemu_set_nonblock(0); atexit(term_exit); chr = qemu_chr_open_fd(0, 1); chr->chr_close = qemu_chr_close_stdio; chr->chr_set_echo = qemu_chr_set_echo_stdio; if (opts->has_signal) { stdio_allow_signal = opts->signal; } qemu_chr_fe_set_echo(chr, false); return chr; } | 16,136 |
0 | void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) { CPUState *cs = CPU(x86_env_get_cpu(env)); uint32_t int_ctl; qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n", exit_code, exit_info_1, ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)), env->eip); if (env->hflags & HF_INHIBIT_IRQ_MASK) { stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK); env->hflags &= ~HF_INHIBIT_IRQ_MASK; } else { stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); } /* Save the VM state in the vmcb */ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es), &env->segs[R_ES]); svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), &env->segs[R_CS]); svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), &env->segs[R_SS]); svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), &env->segs[R_DS]); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base); stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); int_ctl = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); int_ctl |= env->v_tpr & V_TPR_MASK; if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { int_ctl |= V_IRQ_MASK; } stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env)); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK); /* Reload the host state from vm_hsave */ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); env->hflags &= ~HF_SVMI_MASK; env->intercept = 0; env->intercept_exceptions = 0; cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; env->tsc_offset = 0; env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base)); env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit)); env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base)); env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit)); cpu_x86_update_cr0(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); cpu_x86_update_cr4(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.cr4))); cpu_x86_update_cr3(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.cr3))); /* we need to set the efer after the crs so the hidden flags get set properly */ cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.efer))); env->eflags = 0; cpu_load_eflags(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK | VM_MASK)); CC_OP = CC_OP_EFLAGS; svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es), R_ES); svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS); svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS); svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); env->eip = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip)); env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rsp)); env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rax)); env->dr[6] = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.dr6)); env->dr[7] = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.dr7)); /* other setups */ cpu_x86_set_cpl(env, 0); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code); stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1); stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.event_inj))); stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err))); stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); env->hflags2 &= ~HF2_GIF_MASK; /* FIXME: Resets the current ASID register to zero (host ASID). */ /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ /* Clears the TSC_OFFSET inside the processor. */ /* If the host is in PAE mode, the processor reloads the host's PDPEs from the page table indicated the host's CR3. If the PDPEs contain illegal state, the processor causes a shutdown. */ /* Disables all breakpoints in the host DR7 register. */ /* Checks the reloaded host state for consistency. */ /* If the host's rIP reloaded by #VMEXIT is outside the limit of the host's code segment or non-canonical (in the case of long mode), a #GP fault is delivered inside the host. */ /* remove any pending exception */ cs->exception_index = -1; env->error_code = 0; env->old_exception = -1; cpu_loop_exit(cs); } | 16,137 |
0 | static void eval_coefs(int *coefs, const int *refl) { int buffer[10]; int *b1 = buffer; int *b2 = coefs; int x, y; for (x=0; x < 10; x++) { b1[x] = refl[x] << 4; for (y=0; y < x; y++) b1[y] = ((refl[x] * b2[x-y-1]) >> 12) + b2[y]; FFSWAP(int *, b1, b2); } for (x=0; x < 10; x++) coefs[x] >>= 4; } | 16,139 |
0 | void kvmppc_hash64_free_pteg(uint64_t token) { struct kvm_get_htab_buf *htab_buf; htab_buf = container_of((void *)(uintptr_t) token, struct kvm_get_htab_buf, hpte); g_free(htab_buf); return; } | 16,141 |
0 | static void kvm_s390_flic_realize(DeviceState *dev, Error **errp) { S390FLICState *fs = S390_FLIC_COMMON(dev); KVMS390FLICState *flic_state = KVM_S390_FLIC(dev); struct kvm_create_device cd = {0}; struct kvm_device_attr test_attr = {0}; int ret; Error *errp_local = NULL; KVM_S390_FLIC_GET_CLASS(dev)->parent_realize(dev, &errp_local); if (errp_local) { goto fail; } flic_state->fd = -1; if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { error_setg_errno(&errp_local, errno, "KVM is missing capability" " KVM_CAP_DEVICE_CTRL"); trace_flic_no_device_api(errno); goto fail; } cd.type = KVM_DEV_TYPE_FLIC; ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); if (ret < 0) { error_setg_errno(&errp_local, errno, "Creating the KVM device failed"); trace_flic_create_device(errno); goto fail; } flic_state->fd = cd.fd; /* Check clear_io_irq support */ test_attr.group = KVM_DEV_FLIC_CLEAR_IO_IRQ; flic_state->clear_io_supported = !ioctl(flic_state->fd, KVM_HAS_DEVICE_ATTR, test_attr); fs->ais_supported = false; return; fail: error_propagate(errp, errp_local); } | 16,142 |
0 | static void ide_cd_change_cb(void *opaque, bool load) { IDEState *s = opaque; uint64_t nb_sectors; s->tray_open = !load; bdrv_get_geometry(s->bs, &nb_sectors); s->nb_sectors = nb_sectors; /* * First indicate to the guest that a CD has been removed. That's * done on the next command the guest sends us. * * Then we set UNIT_ATTENTION, by which the guest will * detect a new CD in the drive. See ide_atapi_cmd() for details. */ s->cdrom_changed = 1; s->events.new_media = true; s->events.eject_request = false; ide_set_irq(s->bus); } | 16,143 |
0 | static int conditional_wait(DBDMA_channel *ch) { dbdma_cmd *current = &ch->current; uint16_t wait; uint16_t sel_mask, sel_value; uint32_t status; int cond; DBDMA_DPRINTF("conditional_wait\n"); wait = le16_to_cpu(current->command) & WAIT_MASK; switch(wait) { case WAIT_NEVER: /* don't wait */ return 0; case WAIT_ALWAYS: /* always wait */ return 1; } status = be32_to_cpu(ch->regs[DBDMA_STATUS]) & DEVSTAT; sel_mask = (be32_to_cpu(ch->regs[DBDMA_WAIT_SEL]) >> 16) & 0x0f; sel_value = be32_to_cpu(ch->regs[DBDMA_WAIT_SEL]) & 0x0f; cond = (status & sel_mask) == (sel_value & sel_mask); switch(wait) { case WAIT_IFSET: /* wait if condition bit is 1 */ if (cond) return 1; return 0; case WAIT_IFCLR: /* wait if condition bit is 0 */ if (!cond) return 1; return 0; } return 0; } | 16,144 |
0 | static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret) { IDEState *s = opaque; int data_offset, n; if (ret < 0) { ide_atapi_io_error(s, ret); goto eot; } if (s->io_buffer_size > 0) { /* * For a cdrom read sector command (s->lba != -1), * adjust the lba for the next s->io_buffer_size chunk * and dma the current chunk. * For a command != read (s->lba == -1), just transfer * the reply data. */ if (s->lba != -1) { if (s->cd_sector_size == 2352) { n = 1; cd_data_to_raw(s->io_buffer, s->lba); } else { n = s->io_buffer_size >> 11; } s->lba += n; } s->packet_transfer_size -= s->io_buffer_size; if (s->bus->dma->ops->rw_buf(s->bus->dma, 1) == 0) goto eot; } if (s->packet_transfer_size <= 0) { s->status = READY_STAT | SEEK_STAT; s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; ide_set_irq(s->bus); goto eot; } s->io_buffer_index = 0; if (s->cd_sector_size == 2352) { n = 1; s->io_buffer_size = s->cd_sector_size; data_offset = 16; } else { n = s->packet_transfer_size >> 11; if (n > (IDE_DMA_BUF_SECTORS / 4)) n = (IDE_DMA_BUF_SECTORS / 4); s->io_buffer_size = n * 2048; data_offset = 0; } #ifdef DEBUG_AIO printf("aio_read_cd: lba=%u n=%d\n", s->lba, n); #endif s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset); s->bus->dma->iov.iov_len = n * 4 * 512; qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1); s->bus->dma->aiocb = bdrv_aio_readv(s->bs, (int64_t)s->lba << 2, &s->bus->dma->qiov, n * 4, ide_atapi_cmd_read_dma_cb, s); return; eot: block_acct_done(bdrv_get_stats(s->bs), &s->acct); ide_set_inactive(s, false); } | 16,145 |
0 | static int nbd_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVNBDState *s = bs->opaque; char *export = NULL; int result, sock; /* Pop the config into our state object. Exit if invalid. */ result = nbd_config(s, options, &export); if (result != 0) { return result; } /* establish TCP connection, return error if it fails * TODO: Configurable retry-until-timeout behaviour. */ sock = nbd_establish_connection(bs); if (sock < 0) { return sock; } /* NBD handshake */ result = nbd_client_session_init(&s->client, bs, sock, export); g_free(export); return result; } | 16,146 |
0 | static int pl061_load(QEMUFile *f, void *opaque, int version_id) { pl061_state *s = (pl061_state *)opaque; if (version_id != 1) return -EINVAL; s->locked = qemu_get_be32(f); s->data = qemu_get_be32(f); s->old_data = qemu_get_be32(f); s->dir = qemu_get_be32(f); s->isense = qemu_get_be32(f); s->ibe = qemu_get_be32(f); s->iev = qemu_get_be32(f); s->im = qemu_get_be32(f); s->istate = qemu_get_be32(f); s->afsel = qemu_get_be32(f); s->dr2r = qemu_get_be32(f); s->dr4r = qemu_get_be32(f); s->dr8r = qemu_get_be32(f); s->odr = qemu_get_be32(f); s->pur = qemu_get_be32(f); s->pdr = qemu_get_be32(f); s->slr = qemu_get_be32(f); s->den = qemu_get_be32(f); s->cr = qemu_get_be32(f); s->float_high = qemu_get_be32(f); return 0; } | 16,148 |
0 | void pcie_host_mmcfg_map(PCIExpressHost *e, hwaddr addr, uint32_t size) { assert(!(size & (size - 1))); /* power of 2 */ assert(size >= PCIE_MMCFG_SIZE_MIN); assert(size <= PCIE_MMCFG_SIZE_MAX); e->size = size; memory_region_init_io(&e->mmio, OBJECT(e), &pcie_mmcfg_ops, e, "pcie-mmcfg", e->size); e->base_addr = addr; memory_region_add_subregion(get_system_memory(), e->base_addr, &e->mmio); } | 16,149 |
0 | const char *small_strptime(const char *p, const char *fmt, struct tm *dt) { int c, val; for(;;) { c = *fmt++; if (c == '\0') { return p; } else if (c == '%') { c = *fmt++; switch(c) { case 'H': val = date_get_num(&p, 0, 23, 2); if (val == -1) return NULL; dt->tm_hour = val; break; case 'M': val = date_get_num(&p, 0, 59, 2); if (val == -1) return NULL; dt->tm_min = val; break; case 'S': val = date_get_num(&p, 0, 59, 2); if (val == -1) return NULL; dt->tm_sec = val; break; case 'Y': val = date_get_num(&p, 0, 9999, 4); if (val == -1) return NULL; dt->tm_year = val - 1900; break; case 'm': val = date_get_num(&p, 1, 12, 2); if (val == -1) return NULL; dt->tm_mon = val - 1; break; case 'd': val = date_get_num(&p, 1, 31, 2); if (val == -1) return NULL; dt->tm_mday = val; break; case '%': goto match; default: return NULL; } } else { match: if (c != *p) return NULL; p++; } } return p; } | 16,150 |
0 | void ppc_set_irq (CPUState *env, int n_IRQ, int level) { if (level) { env->pending_interrupts |= 1 << n_IRQ; cpu_interrupt(env, CPU_INTERRUPT_HARD); } else { env->pending_interrupts &= ~(1 << n_IRQ); if (env->pending_interrupts == 0) cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); } #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: %p n_IRQ %d level %d => pending %08x req %08x\n", __func__, env, n_IRQ, level, env->pending_interrupts, env->interrupt_request); } #endif } | 16,151 |
0 | static void fd_put_buffer(void *opaque, const uint8_t *buf, int64_t pos, int size) { QEMUFileFD *s = opaque; ssize_t len; do { len = write(s->fd, buf, size); } while (len == -1 && errno == EINTR); if (len == -1) len = -errno; /* When the fd becomes writable again, register a callback to do * a put notify */ if (len == -EAGAIN) qemu_set_fd_handler2(s->fd, NULL, NULL, fd_put_notify, s); } | 16,152 |
0 | void laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug) { struct qemu_laio_state *s = aio_ctx; assert(s->io_q.plugged > 0 || !unplug); if (unplug && --s->io_q.plugged > 0) { return; } if (!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { ioq_submit(s); } } | 16,153 |
0 | void acpi_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent, uint8_t s4_val) { ar->pm1.cnt.s4_val = s4_val; ar->wakeup.notify = acpi_notify_wakeup; qemu_register_wakeup_notifier(&ar->wakeup); memory_region_init_io(&ar->pm1.cnt.io, memory_region_owner(parent), &acpi_pm_cnt_ops, ar, "acpi-cnt", 2); memory_region_add_subregion(parent, 4, &ar->pm1.cnt.io); } | 16,154 |
0 | static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) { CPUState *cs; CPU_FOREACH(cs) { async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL); } } | 16,156 |
0 | static void do_test_validate_qmp_introspect(TestInputVisitorData *data, const char *schema_json) { SchemaInfoList *schema = NULL; Visitor *v; v = validate_test_init_raw(data, schema_json); visit_type_SchemaInfoList(v, NULL, &schema, &error_abort); g_assert(schema); qapi_free_SchemaInfoList(schema); } | 16,157 |
0 | static int64_t coroutine_fn qcow_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) { BDRVQcowState *s = bs->opaque; int index_in_cluster, n; uint64_t cluster_offset; qemu_co_mutex_lock(&s->lock); cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); qemu_co_mutex_unlock(&s->lock); index_in_cluster = sector_num & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) n = nb_sectors; *pnum = n; if (!cluster_offset) { return 0; } if ((cluster_offset & QCOW_OFLAG_COMPRESSED) || s->cipher) { return BDRV_BLOCK_DATA; } cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS); *file = bs->file->bs; return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | cluster_offset; } | 16,158 |
0 | int v9fs_co_symlink(V9fsState *s, V9fsFidState *fidp, const char *oldpath, const char *newpath, gid_t gid) { int err; FsCred cred; cred_init(&cred); cred.fc_uid = fidp->uid; cred.fc_gid = gid; cred.fc_mode = 0777; v9fs_co_run_in_worker( { err = s->ops->symlink(&s->ctx, oldpath, newpath, &cred); if (err < 0) { err = -errno; } }); return err; } | 16,159 |
0 | static void cpu_x86_fill_host(x86_def_t *x86_cpu_def) { uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; x86_cpu_def->name = "host"; host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); x86_cpu_def->level = eax; x86_cpu_def->vendor1 = ebx; x86_cpu_def->vendor2 = edx; x86_cpu_def->vendor3 = ecx; host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); x86_cpu_def->stepping = eax & 0x0F; x86_cpu_def->ext_features = ecx; x86_cpu_def->features = edx; if (kvm_enabled() && x86_cpu_def->level >= 7) { x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX); } else { x86_cpu_def->cpuid_7_0_ebx_features = 0; } host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx); x86_cpu_def->xlevel = eax; host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx); x86_cpu_def->ext2_features = edx; x86_cpu_def->ext3_features = ecx; cpu_x86_fill_model_id(x86_cpu_def->model_id); x86_cpu_def->vendor_override = 0; /* Call Centaur's CPUID instruction. */ if (x86_cpu_def->vendor1 == CPUID_VENDOR_VIA_1 && x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 && x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) { host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx); if (eax >= 0xC0000001) { /* Support VIA max extended level */ x86_cpu_def->xlevel2 = eax; host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx); x86_cpu_def->ext4_features = edx; } } /* * Every SVM feature requires emulation support in KVM - so we can't just * read the host features here. KVM might even support SVM features not * available on the host hardware. Just set all bits and mask out the * unsupported ones later. */ x86_cpu_def->svm_features = -1; } | 16,160 |
0 | static inline void mix_3f_2r_to_stereo(AC3DecodeContext *ctx) { int i; float (*output)[256] = ctx->audio_block.block_output; for (i = 0; i < 256; i++) { output[1][i] += (output[2][i] + output[4][i]); output[2][i] += (output[3][i] + output[5][i]); } memset(output[3], 0, sizeof(output[3])); memset(output[4], 0, sizeof(output[4])); memset(output[5], 0, sizeof(output[5])); } | 16,161 |
0 | static int vmdk_open_desc_file(BlockDriverState *bs, int flags, char *buf, Error **errp) { int ret; char ct[128]; BDRVVmdkState *s = bs->opaque; if (vmdk_parse_description(buf, "createType", ct, sizeof(ct))) { error_setg(errp, "invalid VMDK image descriptor"); ret = -EINVAL; goto exit; } if (strcmp(ct, "monolithicFlat") && strcmp(ct, "vmfs") && strcmp(ct, "vmfsSparse") && strcmp(ct, "twoGbMaxExtentSparse") && strcmp(ct, "twoGbMaxExtentFlat")) { error_setg(errp, "Unsupported image type '%s'", ct); ret = -ENOTSUP; goto exit; } s->create_type = g_strdup(ct); s->desc_offset = 0; ret = vmdk_parse_extents(buf, bs, bs->file->exact_filename, errp); exit: return ret; } | 16,162 |
0 | static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) { int addr_reg, data_reg, data_reg2, r0, r1, rbase, mem_index, s_bits, bswap; #ifdef CONFIG_SOFTMMU int r2; void *label1_ptr, *label2_ptr; #endif #if TARGET_LONG_BITS == 64 int addr_reg2; #endif data_reg = *args++; if (opc == 3) data_reg2 = *args++; else data_reg2 = 0; addr_reg = *args++; #if TARGET_LONG_BITS == 64 addr_reg2 = *args++; #endif mem_index = *args; s_bits = opc & 3; #ifdef CONFIG_SOFTMMU r0 = 3; r1 = 4; r2 = 0; rbase = 0; tcg_out32 (s, (RLWINM | RA (r0) | RS (addr_reg) | SH (32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)) | MB (32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS)) | ME (31 - CPU_TLB_ENTRY_BITS) ) ); tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (TCG_AREG0)); tcg_out32 (s, (LWZU | RT (r1) | RA (r0) | offsetof (CPUState, tlb_table[mem_index][0].addr_read) ) ); tcg_out32 (s, (RLWINM | RA (r2) | RS (addr_reg) | SH (0) | MB ((32 - s_bits) & 31) | ME (31 - TARGET_PAGE_BITS) ) ); tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1)); #if TARGET_LONG_BITS == 64 tcg_out32 (s, LWZ | RT (r1) | RA (r0) | 4); tcg_out32 (s, CMP | BF (6) | RA (addr_reg2) | RB (r1)); tcg_out32 (s, CRAND | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ)); #endif label1_ptr = s->code_ptr; #ifdef FAST_PATH tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE); #endif /* slow path */ #if TARGET_LONG_BITS == 32 tcg_out_mov (s, 3, addr_reg); tcg_out_movi (s, TCG_TYPE_I32, 4, mem_index); #else tcg_out_mov (s, 3, addr_reg2); tcg_out_mov (s, 4, addr_reg); tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index); #endif tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1); switch (opc) { case 0|4: tcg_out32 (s, EXTSB | RA (data_reg) | RS (3)); break; case 1|4: tcg_out32 (s, EXTSH | RA (data_reg) | RS (3)); break; case 0: case 1: case 2: if (data_reg != 3) tcg_out_mov (s, data_reg, 3); break; case 3: if (data_reg == 3) { if (data_reg2 == 4) { tcg_out_mov (s, 0, 4); tcg_out_mov (s, 4, 3); tcg_out_mov (s, 3, 0); } else { tcg_out_mov (s, data_reg2, 3); tcg_out_mov (s, 3, 4); } } else { if (data_reg != 4) tcg_out_mov (s, data_reg, 4); if (data_reg2 != 3) tcg_out_mov (s, data_reg2, 3); } break; } label2_ptr = s->code_ptr; tcg_out32 (s, B); /* label1: fast path */ #ifdef FAST_PATH reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr); #endif /* r0 now contains &env->tlb_table[mem_index][index].addr_read */ tcg_out32 (s, (LWZ | RT (r0) | RA (r0) | (ADDEND_OFFSET + offsetof (CPUTLBEntry, addend) - offsetof (CPUTLBEntry, addr_read)) )); /* r0 = env->tlb_table[mem_index][index].addend */ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg)); /* r0 = env->tlb_table[mem_index][index].addend + addr */ #else /* !CONFIG_SOFTMMU */ r0 = addr_reg; r1 = 3; rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; #endif #ifdef TARGET_WORDS_BIGENDIAN bswap = 0; #else bswap = 1; #endif switch (opc) { default: case 0: tcg_out32 (s, LBZX | TAB (data_reg, rbase, r0)); break; case 0|4: tcg_out32 (s, LBZX | TAB (data_reg, rbase, r0)); tcg_out32 (s, EXTSB | RA (data_reg) | RS (data_reg)); break; case 1: if (bswap) tcg_out32 (s, LHBRX | TAB (data_reg, rbase, r0)); else tcg_out32 (s, LHZX | TAB (data_reg, rbase, r0)); break; case 1|4: if (bswap) { tcg_out32 (s, LHBRX | TAB (data_reg, rbase, r0)); tcg_out32 (s, EXTSH | RA (data_reg) | RS (data_reg)); } else tcg_out32 (s, LHAX | TAB (data_reg, rbase, r0)); break; case 2: if (bswap) tcg_out32 (s, LWBRX | TAB (data_reg, rbase, r0)); else tcg_out32 (s, LWZX | TAB (data_reg, rbase, r0)); break; case 3: if (bswap) { tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4); tcg_out32 (s, LWBRX | TAB (data_reg, rbase, r0)); tcg_out32 (s, LWBRX | TAB (data_reg2, rbase, r1)); } else { #ifdef CONFIG_USE_GUEST_BASE tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4); tcg_out32 (s, LWZX | TAB (data_reg2, rbase, r0)); tcg_out32 (s, LWZX | TAB (data_reg, rbase, r1)); #else if (r0 == data_reg2) { tcg_out32 (s, LWZ | RT (0) | RA (r0)); tcg_out32 (s, LWZ | RT (data_reg) | RA (r0) | 4); tcg_out_mov (s, data_reg2, 0); } else { tcg_out32 (s, LWZ | RT (data_reg2) | RA (r0)); tcg_out32 (s, LWZ | RT (data_reg) | RA (r0) | 4); } #endif } break; } #ifdef CONFIG_SOFTMMU reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr); #endif } | 16,165 |
0 | int32_t helper_fstoi(CPUSPARCState *env, float32 src) { int32_t ret; clear_float_exceptions(env); ret = float32_to_int32_round_to_zero(src, &env->fp_status); check_ieee_exceptions(env); return ret; } | 16,166 |
0 | static void version(void) { printf("qemu-" TARGET_ARCH " version " QEMU_VERSION QEMU_PKGVERSION ", Copyright (c) 2003-2008 Fabrice Bellard\n"); } | 16,167 |
0 | static void raw_aio_flush_io_queue(BlockDriverState *bs) { #ifdef CONFIG_LINUX_AIO BDRVRawState *s = bs->opaque; if (s->use_aio) { laio_io_unplug(bs, s->aio_ctx, false); } #endif } | 16,169 |
1 | static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) { #ifdef HAVE_MMX asm volatile( "mov %3, %%"REG_a" \n\t" "movq "MANGLE(w1111)", %%mm5 \n\t" "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t" "pxor %%mm7, %%mm7 \n\t" "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t" "add %%"REG_d", %%"REG_d" \n\t" ASMALIGN(4) "1: \n\t" PREFETCH" 64(%0, %%"REG_d") \n\t" #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) "movq (%0, %%"REG_d"), %%mm0 \n\t" "movq 6(%0, %%"REG_d"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "psrlq $24, %%mm0 \n\t" "psrlq $24, %%mm2 \n\t" PAVGB(%%mm1, %%mm0) PAVGB(%%mm3, %%mm2) "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" #else "movd (%0, %%"REG_d"), %%mm0 \n\t" "movd 3(%0, %%"REG_d"), %%mm2 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "paddw %%mm2, %%mm0 \n\t" "movd 6(%0, %%"REG_d"), %%mm4 \n\t" "movd 9(%0, %%"REG_d"), %%mm2 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "paddw %%mm4, %%mm2 \n\t" "psrlw $1, %%mm0 \n\t" "psrlw $1, %%mm2 \n\t" #endif "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" "pmaddwd %%mm0, %%mm1 \n\t" "pmaddwd %%mm2, %%mm3 \n\t" "pmaddwd %%mm6, %%mm0 \n\t" "pmaddwd %%mm6, %%mm2 \n\t" #ifndef FAST_BGR2YV12 "psrad $8, %%mm0 \n\t" "psrad $8, %%mm1 \n\t" "psrad $8, %%mm2 \n\t" "psrad $8, %%mm3 \n\t" #endif "packssdw %%mm2, %%mm0 \n\t" "packssdw %%mm3, %%mm1 \n\t" "pmaddwd %%mm5, %%mm0 \n\t" "pmaddwd %%mm5, %%mm1 \n\t" "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 "psraw $7, %%mm0 \n\t" #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) "movq 12(%0, %%"REG_d"), %%mm4 \n\t" "movq 18(%0, %%"REG_d"), %%mm2 \n\t" "movq %%mm4, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "psrlq $24, %%mm4 \n\t" "psrlq $24, %%mm2 \n\t" PAVGB(%%mm1, %%mm4) PAVGB(%%mm3, %%mm2) "punpcklbw %%mm7, %%mm4 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" #else "movd 12(%0, %%"REG_d"), %%mm4 \n\t" "movd 15(%0, %%"REG_d"), %%mm2 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "paddw %%mm2, %%mm4 \n\t" "movd 18(%0, %%"REG_d"), %%mm5 \n\t" "movd 21(%0, %%"REG_d"), %%mm2 \n\t" "punpcklbw %%mm7, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "paddw %%mm5, %%mm2 \n\t" "movq "MANGLE(w1111)", %%mm5 \n\t" "psrlw $2, %%mm4 \n\t" "psrlw $2, %%mm2 \n\t" #endif "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" "pmaddwd %%mm4, %%mm1 \n\t" "pmaddwd %%mm2, %%mm3 \n\t" "pmaddwd %%mm6, %%mm4 \n\t" "pmaddwd %%mm6, %%mm2 \n\t" #ifndef FAST_BGR2YV12 "psrad $8, %%mm4 \n\t" "psrad $8, %%mm1 \n\t" "psrad $8, %%mm2 \n\t" "psrad $8, %%mm3 \n\t" #endif "packssdw %%mm2, %%mm4 \n\t" "packssdw %%mm3, %%mm1 \n\t" "pmaddwd %%mm5, %%mm4 \n\t" "pmaddwd %%mm5, %%mm1 \n\t" "add $24, %%"REG_d" \n\t" "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2 "psraw $7, %%mm4 \n\t" "movq %%mm0, %%mm1 \n\t" "punpckldq %%mm4, %%mm0 \n\t" "punpckhdq %%mm4, %%mm1 \n\t" "packsswb %%mm1, %%mm0 \n\t" "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t" "movd %%mm0, (%1, %%"REG_a") \n\t" "punpckhdq %%mm0, %%mm0 \n\t" "movd %%mm0, (%2, %%"REG_a") \n\t" "add $4, %%"REG_a" \n\t" " js 1b \n\t" : : "r" (src1+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width) : "%"REG_a, "%"REG_d ); #else int i; for(i=0; i<width; i++) { int b= src1[6*i + 0] + src1[6*i + 3]; int g= src1[6*i + 1] + src1[6*i + 4]; int r= src1[6*i + 2] + src1[6*i + 5]; dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128; dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128; } #endif assert(src1 == src2); } | 16,170 |
1 | static void test_ivshmem_server(void) { IVState state1, state2, *s1, *s2; ServerThread thread; IvshmemServer server; int ret, vm1, vm2; int nvectors = 2; guint64 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; memset(tmpshmem, 0x42, TMPSHMSIZE); ret = ivshmem_server_init(&server, tmpserver, tmpshm, TMPSHMSIZE, nvectors, g_test_verbose()); g_assert_cmpint(ret, ==, 0); ret = ivshmem_server_start(&server); g_assert_cmpint(ret, ==, 0); setup_vm_with_server(&state1, nvectors); s1 = &state1; setup_vm_with_server(&state2, nvectors); s2 = &state2; g_assert_cmpuint(in_reg(s1, IVPOSITION), ==, 0xffffffff); g_assert_cmpuint(in_reg(s2, IVPOSITION), ==, 0xffffffff); g_assert_cmpuint(qtest_readb(s1->qtest, (uintptr_t)s1->mem_base), ==, 0x00); thread.server = &server; ret = pipe(thread.pipe); g_assert_cmpint(ret, ==, 0); thread.thread = g_thread_new("ivshmem-server", server_thread, &thread); g_assert(thread.thread != NULL); /* waiting until mapping is done */ while (g_get_monotonic_time() < end_time) { g_usleep(1000); if (qtest_readb(s1->qtest, (uintptr_t)s1->mem_base) == 0x42 && qtest_readb(s2->qtest, (uintptr_t)s2->mem_base) == 0x42) { break; } } /* check got different VM ids */ vm1 = in_reg(s1, IVPOSITION); vm2 = in_reg(s2, IVPOSITION); g_assert_cmpuint(vm1, !=, vm2); global_qtest = s1->qtest; ret = qpci_msix_table_size(s1->dev); g_assert_cmpuint(ret, ==, nvectors); /* ping vm2 -> vm1 */ ret = qpci_msix_pending(s1->dev, 0); g_assert_cmpuint(ret, ==, 0); out_reg(s2, DOORBELL, vm1 << 16); do { g_usleep(10000); ret = qpci_msix_pending(s1->dev, 0); } while (ret == 0 && g_get_monotonic_time() < end_time); g_assert_cmpuint(ret, !=, 0); /* ping vm1 -> vm2 */ global_qtest = s2->qtest; ret = qpci_msix_pending(s2->dev, 0); g_assert_cmpuint(ret, ==, 0); out_reg(s1, DOORBELL, vm2 << 16); do { g_usleep(10000); ret = qpci_msix_pending(s2->dev, 0); } while (ret == 0 && g_get_monotonic_time() < end_time); g_assert_cmpuint(ret, !=, 0); qtest_quit(s2->qtest); qtest_quit(s1->qtest); if (qemu_write_full(thread.pipe[1], "q", 1) != 1) { g_error("qemu_write_full: %s", g_strerror(errno)); } g_thread_join(thread.thread); ivshmem_server_close(&server); close(thread.pipe[1]); close(thread.pipe[0]); } | 16,171 |
1 | static int escape124_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { int buf_size = avpkt->size; Escape124Context *s = avctx->priv_data; AVFrame *frame = data; GetBitContext gb; unsigned frame_flags, frame_size; unsigned i; unsigned superblock_index, cb_index = 1, superblock_col_index = 0, superblocks_per_row = avctx->width / 8, skip = -1; uint16_t* old_frame_data, *new_frame_data; unsigned old_stride, new_stride; int ret; if ((ret = init_get_bits8(&gb, avpkt->data, avpkt->size)) < 0) return ret; // This call also guards the potential depth reads for the // codebook unpacking. if (get_bits_left(&gb) < 64) return -1; frame_flags = get_bits_long(&gb, 32); frame_size = get_bits_long(&gb, 32); // Leave last frame unchanged // FIXME: Is this necessary? I haven't seen it in any real samples if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) { if (!s->frame->data[0]) av_log(avctx, AV_LOG_DEBUG, "Skipping frame\n"); *got_frame = 1; if ((ret = av_frame_ref(frame, s->frame)) < 0) return ret; return frame_size; for (i = 0; i < 3; i++) { if (frame_flags & (1 << (17 + i))) { unsigned cb_depth, cb_size; if (i == 2) { // This codebook can be cut off at places other than // powers of 2, leaving some of the entries undefined. cb_size = get_bits_long(&gb, 20); if (!cb_size) { av_log(avctx, AV_LOG_ERROR, "Invalid codebook size 0.\n"); cb_depth = av_log2(cb_size - 1) + 1; } else { cb_depth = get_bits(&gb, 4); if (i == 0) { // This is the most basic codebook: pow(2,depth) entries // for a depth-length key cb_size = 1 << cb_depth; } else { // This codebook varies per superblock // FIXME: I don't think this handles integer overflow // properly cb_size = s->num_superblocks << cb_depth; av_freep(&s->codebooks[i].blocks); s->codebooks[i] = unpack_codebook(&gb, cb_depth, cb_size); if (!s->codebooks[i].blocks) return -1; if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; new_frame_data = (uint16_t*)frame->data[0]; new_stride = frame->linesize[0] / 2; old_frame_data = (uint16_t*)s->frame->data[0]; old_stride = s->frame->linesize[0] / 2; for (superblock_index = 0; superblock_index < s->num_superblocks; superblock_index++) { MacroBlock mb; SuperBlock sb; unsigned multi_mask = 0; if (skip == -1) { // Note that this call will make us skip the rest of the blocks // if the frame prematurely ends skip = decode_skip_count(&gb); if (skip) { copy_superblock(new_frame_data, new_stride, old_frame_data, old_stride); } else { copy_superblock(sb.pixels, 8, old_frame_data, old_stride); while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) { unsigned mask; mb = decode_macroblock(s, &gb, &cb_index, superblock_index); mask = get_bits(&gb, 16); multi_mask |= mask; for (i = 0; i < 16; i++) { if (mask & mask_matrix[i]) { insert_mb_into_sb(&sb, mb, i); if (!get_bits1(&gb)) { unsigned inv_mask = get_bits(&gb, 4); for (i = 0; i < 4; i++) { if (inv_mask & (1 << i)) { multi_mask ^= 0xF << i*4; } else { multi_mask ^= get_bits(&gb, 4) << i*4; for (i = 0; i < 16; i++) { if (multi_mask & mask_matrix[i]) { mb = decode_macroblock(s, &gb, &cb_index, superblock_index); insert_mb_into_sb(&sb, mb, i); } else if (frame_flags & (1 << 16)) { while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) { mb = decode_macroblock(s, &gb, &cb_index, superblock_index); insert_mb_into_sb(&sb, mb, get_bits(&gb, 4)); copy_superblock(new_frame_data, new_stride, sb.pixels, 8); superblock_col_index++; new_frame_data += 8; if (old_frame_data) old_frame_data += 8; if (superblock_col_index == superblocks_per_row) { new_frame_data += new_stride * 8 - superblocks_per_row * 8; if (old_frame_data) old_frame_data += old_stride * 8 - superblocks_per_row * 8; superblock_col_index = 0; skip--; av_log(avctx, AV_LOG_DEBUG, "Escape sizes: %i, %i, %i\n", frame_size, buf_size, get_bits_count(&gb) / 8); av_frame_unref(s->frame); if ((ret = av_frame_ref(s->frame, frame)) < 0) return ret; *got_frame = 1; return frame_size; | 16,172 |
1 | DeviceState *qdev_create(BusState *bus, const char *name) { DeviceState *dev; dev = qdev_try_create(bus, name); if (!dev) { hw_error("Unknown device '%s' for bus '%s'\n", name, bus->info->name); } return dev; } | 16,173 |
1 | static void quantize_and_encode_band_cost_SQUAD_mips(struct AACEncContext *s, PutBitContext *pb, const float *in, float *out, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, const float ROUNDING) { const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; int i; int qc1, qc2, qc3, qc4; uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1]; uint16_t *p_codes = (uint16_t *)ff_aac_spectral_codes[cb-1]; float *p_vec = (float *)ff_aac_codebook_vectors[cb-1]; abs_pow34_v(s->scoefs, in, size); scaled = s->scoefs; for (i = 0; i < size; i += 4) { int curidx; int *in_int = (int *)&in[i]; int t0, t1, t2, t3, t4, t5, t6, t7; const float *vec; qc1 = scaled[i ] * Q34 + ROUND_STANDARD; qc2 = scaled[i+1] * Q34 + ROUND_STANDARD; qc3 = scaled[i+2] * Q34 + ROUND_STANDARD; qc4 = scaled[i+3] * Q34 + ROUND_STANDARD; __asm__ volatile ( ".set push \n\t" ".set noreorder \n\t" "slt %[qc1], $zero, %[qc1] \n\t" "slt %[qc2], $zero, %[qc2] \n\t" "slt %[qc3], $zero, %[qc3] \n\t" "slt %[qc4], $zero, %[qc4] \n\t" "lw %[t0], 0(%[in_int]) \n\t" "lw %[t1], 4(%[in_int]) \n\t" "lw %[t2], 8(%[in_int]) \n\t" "lw %[t3], 12(%[in_int]) \n\t" "srl %[t0], %[t0], 31 \n\t" "srl %[t1], %[t1], 31 \n\t" "srl %[t2], %[t2], 31 \n\t" "srl %[t3], %[t3], 31 \n\t" "subu %[t4], $zero, %[qc1] \n\t" "subu %[t5], $zero, %[qc2] \n\t" "subu %[t6], $zero, %[qc3] \n\t" "subu %[t7], $zero, %[qc4] \n\t" "movn %[qc1], %[t4], %[t0] \n\t" "movn %[qc2], %[t5], %[t1] \n\t" "movn %[qc3], %[t6], %[t2] \n\t" "movn %[qc4], %[t7], %[t3] \n\t" ".set pop \n\t" : [qc1]"+r"(qc1), [qc2]"+r"(qc2), [qc3]"+r"(qc3), [qc4]"+r"(qc4), [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=&r"(t3), [t4]"=&r"(t4), [t5]"=&r"(t5), [t6]"=&r"(t6), [t7]"=&r"(t7) : [in_int]"r"(in_int) : "memory" ); curidx = qc1; curidx *= 3; curidx += qc2; curidx *= 3; curidx += qc3; curidx *= 3; curidx += qc4; curidx += 40; put_bits(pb, p_bits[curidx], p_codes[curidx]); if (out) { vec = &p_vec[curidx*4]; out[i+0] = vec[0] * IQ; out[i+1] = vec[1] * IQ; out[i+2] = vec[2] * IQ; out[i+3] = vec[3] * IQ; } } } | 16,174 |
1 | static void vhost_begin(MemoryListener *listener) { } | 16,175 |
1 | void ff_tls_deinit(void) { #if CONFIG_TLS_OPENSSL_PROTOCOL ff_openssl_deinit(); #endif #if CONFIG_TLS_GNUTLS_PROTOCOL ff_gnutls_deinit(); #endif } | 16,176 |
1 | static void xbr2x(AVFrame * input, AVFrame * output, const uint32_t * r2y) { int x,y; int next_line = output->linesize[0]>>2; for (y = 0; y < input->height; y++) { uint32_t pprev; uint32_t pprev2; uint32_t * E = (uint32_t *)(output->data[0] + y * output->linesize[0] * 2); /* middle. Offset of -8 is given */ uint32_t * sa2 = (uint32_t *)(input->data[0] + y * input->linesize[0] - 8); /* up one */ uint32_t * sa1 = sa2 - (input->linesize[0]>>2); /* up two */ uint32_t * sa0 = sa1 - (input->linesize[0]>>2); /* down one */ uint32_t * sa3 = sa2 + (input->linesize[0]>>2); /* down two */ uint32_t * sa4 = sa3 + (input->linesize[0]>>2); if (y <= 1) { sa0 = sa1; if (y == 0) { sa0 = sa1 = sa2; } } if (y >= input->height - 2) { sa4 = sa3; if (y == input->height - 1) { sa4 = sa3 = sa2; } } pprev = pprev2 = 2; for (x = 0; x < input->width; x++) { uint32_t B1 = sa0[2]; uint32_t PB = sa1[2]; uint32_t PE = sa2[2]; uint32_t PH = sa3[2]; uint32_t H5 = sa4[2]; uint32_t A1 = sa0[pprev]; uint32_t PA = sa1[pprev]; uint32_t PD = sa2[pprev]; uint32_t PG = sa3[pprev]; uint32_t G5 = sa4[pprev]; uint32_t A0 = sa1[pprev2]; uint32_t D0 = sa2[pprev2]; uint32_t G0 = sa3[pprev2]; uint32_t C1 = 0; uint32_t PC = 0; uint32_t PF = 0; uint32_t PI = 0; uint32_t I5 = 0; uint32_t C4 = 0; uint32_t F4 = 0; uint32_t I4 = 0; if (x >= input->width - 2) { if (x == input->width - 1) { C1 = sa0[2]; PC = sa1[2]; PF = sa2[2]; PI = sa3[2]; I5 = sa4[2]; C4 = sa1[2]; F4 = sa2[2]; I4 = sa3[2]; } else { C1 = sa0[3]; PC = sa1[3]; PF = sa2[3]; PI = sa3[3]; I5 = sa4[3]; C4 = sa1[3]; F4 = sa2[3]; I4 = sa3[3]; } } else { C1 = sa0[3]; PC = sa1[3]; PF = sa2[3]; PI = sa3[3]; I5 = sa4[3]; C4 = sa1[4]; F4 = sa2[4]; I4 = sa3[4]; } E[0] = E[1] = E[next_line] = E[next_line + 1] = PE; // 0, 1, 2, 3 FILT2(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, 0, 1, next_line, next_line+1); FILT2(PE, PC, PF, PB, PI, PA, PH, PD, PG, I4, A1, I5, H5, A0, D0, B1, C1, F4, C4, G5, G0, next_line, 0, next_line+1, 1); FILT2(PE, PA, PB, PD, PC, PG, PF, PH, PI, C1, G0, C4, F4, G5, H5, D0, A0, B1, A1, I4, I5, next_line+1, next_line, 1, 0); FILT2(PE, PG, PD, PH, PA, PI, PB, PF, PC, A0, I5, A1, B1, I4, F4, H5, G5, D0, G0, C1, C4, 1, next_line+1, 0, next_line); sa0 += 1; sa1 += 1; sa2 += 1; sa3 += 1; sa4 += 1; E += 2; if (pprev2){ pprev2--; pprev = 1; } } } } | 16,178 |
1 | static inline int32_t mipsdsp_sat_add_i32(int32_t a, int32_t b, CPUMIPSState *env) { int32_t tempI; tempI = a + b; if (MIPSDSP_OVERFLOW(a, b, tempI, 0x80000000)) { if (a > 0) { tempI = 0x7FFFFFFF; } else { tempI = 0x80000000; } set_DSPControl_overflow_flag(1, 20, env); } return tempI; } | 16,179 |
1 | CharDriverState *uart_hci_init(qemu_irq wakeup) { struct csrhci_s *s = (struct csrhci_s *) g_malloc0(sizeof(struct csrhci_s)); s->chr.opaque = s; s->chr.chr_write = csrhci_write; s->chr.chr_ioctl = csrhci_ioctl; s->hci = qemu_next_hci(); s->hci->opaque = s; s->hci->evt_recv = csrhci_out_hci_packet_event; s->hci->acl_recv = csrhci_out_hci_packet_acl; s->out_tm = qemu_new_timer_ns(vm_clock, csrhci_out_tick, s); s->pins = qemu_allocate_irqs(csrhci_pins, s, __csrhci_pins); csrhci_reset(s); return &s->chr; } | 16,180 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.