CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2018-6096
|
https://www.cvedetails.com/cve/CVE-2018-6096/
| null |
https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51
|
36f801fdbec07d116a6f4f07bb363f10897d6a51
|
If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <[email protected]>
Reviewed-by: Philip Jägenstedt <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#533790}
|
bool RenderFrameHostImpl::OnMessageReceived(const IPC::Message &msg) {
if (!render_frame_created_)
return false;
ScopedActiveURL scoped_active_url(this);
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(RenderFrameHostImpl, msg)
IPC_MESSAGE_HANDLER(FrameHostMsg_RenderProcessGone, OnRenderProcessGone)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
if (handled)
return true;
if (delegate_->OnMessageReceived(this, msg))
return true;
RenderFrameProxyHost* proxy =
frame_tree_node_->render_manager()->GetProxyToParent();
if (proxy && proxy->cross_process_frame_connector() &&
proxy->cross_process_frame_connector()->OnMessageReceived(msg))
return true;
handled = true;
IPC_BEGIN_MESSAGE_MAP(RenderFrameHostImpl, msg)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidAddMessageToConsole,
OnDidAddMessageToConsole)
IPC_MESSAGE_HANDLER(FrameHostMsg_Detach, OnDetach)
IPC_MESSAGE_HANDLER(FrameHostMsg_FrameFocused, OnFrameFocused)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidStartProvisionalLoad,
OnDidStartProvisionalLoad)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidFailProvisionalLoadWithError,
OnDidFailProvisionalLoadWithError)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidFailLoadWithError,
OnDidFailLoadWithError)
IPC_MESSAGE_HANDLER(FrameHostMsg_UpdateState, OnUpdateState)
IPC_MESSAGE_HANDLER(FrameHostMsg_OpenURL, OnOpenURL)
IPC_MESSAGE_HANDLER(FrameHostMsg_DocumentOnLoadCompleted,
OnDocumentOnLoadCompleted)
IPC_MESSAGE_HANDLER(FrameHostMsg_BeforeUnload_ACK, OnBeforeUnloadACK)
IPC_MESSAGE_HANDLER(FrameHostMsg_SwapOut_ACK, OnSwapOutACK)
IPC_MESSAGE_HANDLER(FrameHostMsg_ContextMenu, OnContextMenu)
IPC_MESSAGE_HANDLER(FrameHostMsg_JavaScriptExecuteResponse,
OnJavaScriptExecuteResponse)
IPC_MESSAGE_HANDLER(FrameHostMsg_VisualStateResponse,
OnVisualStateResponse)
IPC_MESSAGE_HANDLER_DELAY_REPLY(FrameHostMsg_RunJavaScriptDialog,
OnRunJavaScriptDialog)
IPC_MESSAGE_HANDLER_DELAY_REPLY(FrameHostMsg_RunBeforeUnloadConfirm,
OnRunBeforeUnloadConfirm)
IPC_MESSAGE_HANDLER(FrameHostMsg_RunFileChooser, OnRunFileChooser)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidAccessInitialDocument,
OnDidAccessInitialDocument)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeOpener, OnDidChangeOpener)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidAddContentSecurityPolicies,
OnDidAddContentSecurityPolicies)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeFramePolicy,
OnDidChangeFramePolicy)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeFrameOwnerProperties,
OnDidChangeFrameOwnerProperties)
IPC_MESSAGE_HANDLER(FrameHostMsg_UpdateTitle, OnUpdateTitle)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidBlockFramebust, OnDidBlockFramebust)
IPC_MESSAGE_HANDLER(FrameHostMsg_AbortNavigation, OnAbortNavigation)
IPC_MESSAGE_HANDLER(FrameHostMsg_DispatchLoad, OnDispatchLoad)
IPC_MESSAGE_HANDLER(FrameHostMsg_ForwardResourceTimingToParent,
OnForwardResourceTimingToParent)
IPC_MESSAGE_HANDLER(FrameHostMsg_TextSurroundingSelectionResponse,
OnTextSurroundingSelectionResponse)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_Events, OnAccessibilityEvents)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_LocationChanges,
OnAccessibilityLocationChanges)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_FindInPageResult,
OnAccessibilityFindInPageResult)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_ChildFrameHitTestResult,
OnAccessibilityChildFrameHitTestResult)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_SnapshotResponse,
OnAccessibilitySnapshotResponse)
IPC_MESSAGE_HANDLER(FrameHostMsg_ToggleFullscreen, OnToggleFullscreen)
IPC_MESSAGE_HANDLER(FrameHostMsg_SuddenTerminationDisablerChanged,
OnSuddenTerminationDisablerChanged)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidStartLoading, OnDidStartLoading)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidStopLoading, OnDidStopLoading)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeLoadProgress,
OnDidChangeLoadProgress)
IPC_MESSAGE_HANDLER(FrameHostMsg_SerializeAsMHTMLResponse,
OnSerializeAsMHTMLResponse)
IPC_MESSAGE_HANDLER(FrameHostMsg_SelectionChanged, OnSelectionChanged)
IPC_MESSAGE_HANDLER(FrameHostMsg_FocusedNodeChanged, OnFocusedNodeChanged)
IPC_MESSAGE_HANDLER(FrameHostMsg_SetHasReceivedUserGesture,
OnSetHasReceivedUserGesture)
IPC_MESSAGE_HANDLER(FrameHostMsg_SetHasReceivedUserGestureBeforeNavigation,
OnSetHasReceivedUserGestureBeforeNavigation)
IPC_MESSAGE_HANDLER(FrameHostMsg_ScrollRectToVisibleInParentFrame,
OnScrollRectToVisibleInParentFrame)
IPC_MESSAGE_HANDLER(FrameHostMsg_FrameDidCallFocus, OnFrameDidCallFocus)
#if BUILDFLAG(USE_EXTERNAL_POPUP_MENU)
IPC_MESSAGE_HANDLER(FrameHostMsg_ShowPopup, OnShowPopup)
IPC_MESSAGE_HANDLER(FrameHostMsg_HidePopup, OnHidePopup)
#endif
IPC_MESSAGE_HANDLER(FrameHostMsg_RequestOverlayRoutingToken,
OnRequestOverlayRoutingToken)
IPC_MESSAGE_HANDLER(FrameHostMsg_ShowCreatedWindow, OnShowCreatedWindow)
IPC_MESSAGE_HANDLER(FrameHostMsg_StreamHandleConsumed,
OnStreamHandleConsumed)
IPC_END_MESSAGE_MAP()
return handled;
}
|
bool RenderFrameHostImpl::OnMessageReceived(const IPC::Message &msg) {
if (!render_frame_created_)
return false;
ScopedActiveURL scoped_active_url(this);
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(RenderFrameHostImpl, msg)
IPC_MESSAGE_HANDLER(FrameHostMsg_RenderProcessGone, OnRenderProcessGone)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
if (handled)
return true;
if (delegate_->OnMessageReceived(this, msg))
return true;
RenderFrameProxyHost* proxy =
frame_tree_node_->render_manager()->GetProxyToParent();
if (proxy && proxy->cross_process_frame_connector() &&
proxy->cross_process_frame_connector()->OnMessageReceived(msg))
return true;
handled = true;
IPC_BEGIN_MESSAGE_MAP(RenderFrameHostImpl, msg)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidAddMessageToConsole,
OnDidAddMessageToConsole)
IPC_MESSAGE_HANDLER(FrameHostMsg_Detach, OnDetach)
IPC_MESSAGE_HANDLER(FrameHostMsg_FrameFocused, OnFrameFocused)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidStartProvisionalLoad,
OnDidStartProvisionalLoad)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidFailProvisionalLoadWithError,
OnDidFailProvisionalLoadWithError)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidFailLoadWithError,
OnDidFailLoadWithError)
IPC_MESSAGE_HANDLER(FrameHostMsg_UpdateState, OnUpdateState)
IPC_MESSAGE_HANDLER(FrameHostMsg_OpenURL, OnOpenURL)
IPC_MESSAGE_HANDLER(FrameHostMsg_DocumentOnLoadCompleted,
OnDocumentOnLoadCompleted)
IPC_MESSAGE_HANDLER(FrameHostMsg_BeforeUnload_ACK, OnBeforeUnloadACK)
IPC_MESSAGE_HANDLER(FrameHostMsg_SwapOut_ACK, OnSwapOutACK)
IPC_MESSAGE_HANDLER(FrameHostMsg_ContextMenu, OnContextMenu)
IPC_MESSAGE_HANDLER(FrameHostMsg_JavaScriptExecuteResponse,
OnJavaScriptExecuteResponse)
IPC_MESSAGE_HANDLER(FrameHostMsg_VisualStateResponse,
OnVisualStateResponse)
IPC_MESSAGE_HANDLER_DELAY_REPLY(FrameHostMsg_RunJavaScriptDialog,
OnRunJavaScriptDialog)
IPC_MESSAGE_HANDLER_DELAY_REPLY(FrameHostMsg_RunBeforeUnloadConfirm,
OnRunBeforeUnloadConfirm)
IPC_MESSAGE_HANDLER(FrameHostMsg_RunFileChooser, OnRunFileChooser)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidAccessInitialDocument,
OnDidAccessInitialDocument)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeOpener, OnDidChangeOpener)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidAddContentSecurityPolicies,
OnDidAddContentSecurityPolicies)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeFramePolicy,
OnDidChangeFramePolicy)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeFrameOwnerProperties,
OnDidChangeFrameOwnerProperties)
IPC_MESSAGE_HANDLER(FrameHostMsg_UpdateTitle, OnUpdateTitle)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidBlockFramebust, OnDidBlockFramebust)
IPC_MESSAGE_HANDLER(FrameHostMsg_AbortNavigation, OnAbortNavigation)
IPC_MESSAGE_HANDLER(FrameHostMsg_DispatchLoad, OnDispatchLoad)
IPC_MESSAGE_HANDLER(FrameHostMsg_ForwardResourceTimingToParent,
OnForwardResourceTimingToParent)
IPC_MESSAGE_HANDLER(FrameHostMsg_TextSurroundingSelectionResponse,
OnTextSurroundingSelectionResponse)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_Events, OnAccessibilityEvents)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_LocationChanges,
OnAccessibilityLocationChanges)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_FindInPageResult,
OnAccessibilityFindInPageResult)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_ChildFrameHitTestResult,
OnAccessibilityChildFrameHitTestResult)
IPC_MESSAGE_HANDLER(AccessibilityHostMsg_SnapshotResponse,
OnAccessibilitySnapshotResponse)
IPC_MESSAGE_HANDLER(FrameHostMsg_ToggleFullscreen, OnToggleFullscreen)
IPC_MESSAGE_HANDLER(FrameHostMsg_SuddenTerminationDisablerChanged,
OnSuddenTerminationDisablerChanged)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidStartLoading, OnDidStartLoading)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidStopLoading, OnDidStopLoading)
IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeLoadProgress,
OnDidChangeLoadProgress)
IPC_MESSAGE_HANDLER(FrameHostMsg_SerializeAsMHTMLResponse,
OnSerializeAsMHTMLResponse)
IPC_MESSAGE_HANDLER(FrameHostMsg_SelectionChanged, OnSelectionChanged)
IPC_MESSAGE_HANDLER(FrameHostMsg_FocusedNodeChanged, OnFocusedNodeChanged)
IPC_MESSAGE_HANDLER(FrameHostMsg_SetHasReceivedUserGesture,
OnSetHasReceivedUserGesture)
IPC_MESSAGE_HANDLER(FrameHostMsg_SetHasReceivedUserGestureBeforeNavigation,
OnSetHasReceivedUserGestureBeforeNavigation)
IPC_MESSAGE_HANDLER(FrameHostMsg_ScrollRectToVisibleInParentFrame,
OnScrollRectToVisibleInParentFrame)
#if BUILDFLAG(USE_EXTERNAL_POPUP_MENU)
IPC_MESSAGE_HANDLER(FrameHostMsg_ShowPopup, OnShowPopup)
IPC_MESSAGE_HANDLER(FrameHostMsg_HidePopup, OnHidePopup)
#endif
IPC_MESSAGE_HANDLER(FrameHostMsg_RequestOverlayRoutingToken,
OnRequestOverlayRoutingToken)
IPC_MESSAGE_HANDLER(FrameHostMsg_ShowCreatedWindow, OnShowCreatedWindow)
IPC_MESSAGE_HANDLER(FrameHostMsg_StreamHandleConsumed,
OnStreamHandleConsumed)
IPC_END_MESSAGE_MAP()
return handled;
}
|
C
|
Chrome
| 1 |
CVE-2013-2927
|
https://www.cvedetails.com/cve/CVE-2013-2927/
|
CWE-399
|
https://github.com/chromium/chromium/commit/4d77eed905ce1d00361282e8822a2a3be61d25c0
|
4d77eed905ce1d00361282e8822a2a3be61d25c0
|
Fix a crash in HTMLFormElement::prepareForSubmission.
BUG=297478
TEST=automated with ASAN.
Review URL: https://chromiumcodereview.appspot.com/24910003
git-svn-id: svn://svn.chromium.org/blink/trunk@158428 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
String HTMLFormElement::name() const
{
return getNameAttribute();
}
|
String HTMLFormElement::name() const
{
return getNameAttribute();
}
|
C
|
Chrome
| 0 |
CVE-2019-7308
|
https://www.cvedetails.com/cve/CVE-2019-7308/
|
CWE-189
|
https://github.com/torvalds/linux/commit/d3bd7413e0ca40b60cf60d4003246d067cafdeda
|
d3bd7413e0ca40b60cf60d4003246d067cafdeda
|
bpf: fix sanitation of alu op with pointer / scalar type from different paths
While 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer
arithmetic") took care of rejecting alu op on pointer when e.g. pointer
came from two different map values with different map properties such as
value size, Jann reported that a case was not covered yet when a given
alu op is used in both "ptr_reg += reg" and "numeric_reg += reg" from
different branches where we would incorrectly try to sanitize based
on the pointer's limit. Catch this corner case and reject the program
instead.
Fixes: 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer arithmetic")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
return;
kfree(state->refs);
kfree(state->stack);
kfree(state);
}
|
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
return;
kfree(state->refs);
kfree(state->stack);
kfree(state);
}
|
C
|
linux
| 0 |
CVE-2016-10746
|
https://www.cvedetails.com/cve/CVE-2016-10746/
|
CWE-254
|
https://github.com/libvirt/libvirt/commit/506e9d6c2d4baaf580d489fff0690c0ff2ff588f
|
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
|
virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]>
|
virConnectDomainXMLToNative(virConnectPtr conn,
const char *nativeFormat,
const char *domainXml,
unsigned int flags)
{
VIR_DEBUG("conn=%p, format=%s, xml=%s, flags=%x",
conn, NULLSTR(nativeFormat), NULLSTR(domainXml), flags);
virResetLastError();
virCheckConnectReturn(conn, NULL);
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(nativeFormat, error);
virCheckNonNullArgGoto(domainXml, error);
if (conn->driver->connectDomainXMLToNative) {
char *ret;
ret = conn->driver->connectDomainXMLToNative(conn,
nativeFormat,
domainXml,
flags);
if (!ret)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(conn);
return NULL;
}
|
virConnectDomainXMLToNative(virConnectPtr conn,
const char *nativeFormat,
const char *domainXml,
unsigned int flags)
{
VIR_DEBUG("conn=%p, format=%s, xml=%s, flags=%x",
conn, NULLSTR(nativeFormat), NULLSTR(domainXml), flags);
virResetLastError();
virCheckConnectReturn(conn, NULL);
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(nativeFormat, error);
virCheckNonNullArgGoto(domainXml, error);
if (conn->driver->connectDomainXMLToNative) {
char *ret;
ret = conn->driver->connectDomainXMLToNative(conn,
nativeFormat,
domainXml,
flags);
if (!ret)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(conn);
return NULL;
}
|
C
|
libvirt
| 0 |
CVE-2012-0957
|
https://www.cvedetails.com/cve/CVE-2012-0957/
|
CWE-16
|
https://github.com/torvalds/linux/commit/2702b1526c7278c4d65d78de209a465d4de2885e
|
2702b1526c7278c4d65d78de209a465d4de2885e
|
kernel/sys.c: fix stack memory content leak via UNAME26
Calling uname() with the UNAME26 personality set allows a leak of kernel
stack contents. This fixes it by defensively calculating the length of
copy_to_user() call, making the len argument unsigned, and initializing
the stack buffer to zero (now technically unneeded, but hey, overkill).
CVE-2012-0957
Reported-by: PaX Team <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: PaX Team <[email protected]>
Cc: Brad Spengler <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
syscore_shutdown();
printk(KERN_EMERG "System halted.\n");
kmsg_dump(KMSG_DUMP_HALT);
machine_halt();
}
|
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
syscore_shutdown();
printk(KERN_EMERG "System halted.\n");
kmsg_dump(KMSG_DUMP_HALT);
machine_halt();
}
|
C
|
linux
| 0 |
CVE-2013-7421
|
https://www.cvedetails.com/cve/CVE-2013-7421/
|
CWE-264
|
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
}
|
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
}
|
C
|
linux
| 0 |
CVE-2013-0886
|
https://www.cvedetails.com/cve/CVE-2013-0886/
| null |
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
|
18d67244984a574ba2dd8779faabc0e3e34f4b76
|
Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderWidgetHostViewAura::WasHidden() {
if (host_->is_hidden())
return;
host_->WasHidden();
released_front_lock_ = NULL;
#if defined(OS_WIN)
aura::RootWindow* root_window = window_->GetRootWindow();
if (root_window) {
HWND parent = root_window->GetAcceleratedWidget();
LPARAM lparam = reinterpret_cast<LPARAM>(this);
EnumChildWindows(parent, HideWindowsCallback, lparam);
}
#endif
}
|
void RenderWidgetHostViewAura::WasHidden() {
if (host_->is_hidden())
return;
host_->WasHidden();
released_front_lock_ = NULL;
if (ShouldReleaseFrontSurface() &&
host_->is_accelerated_compositing_active()) {
current_surface_ = 0;
UpdateExternalTexture();
}
AdjustSurfaceProtection();
#if defined(OS_WIN)
aura::RootWindow* root_window = window_->GetRootWindow();
if (root_window) {
HWND parent = root_window->GetAcceleratedWidget();
LPARAM lparam = reinterpret_cast<LPARAM>(this);
EnumChildWindows(parent, HideWindowsCallback, lparam);
}
#endif
}
|
C
|
Chrome
| 1 |
CVE-2019-5827
|
https://www.cvedetails.com/cve/CVE-2019-5827/
|
CWE-190
|
https://github.com/chromium/chromium/commit/517ac71c9ee27f856f9becde8abea7d1604af9d4
|
517ac71c9ee27f856f9becde8abea7d1604af9d4
|
sqlite: backport bugfixes for dbfuzz2
Bug: 952406
Change-Id: Icbec429742048d6674828726c96d8e265c41b595
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1568152
Reviewed-by: Chris Mumford <[email protected]>
Commit-Queue: Darwin Huang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#651030}
|
static u32 countLookasideSlots(LookasideSlot *p){
u32 cnt = 0;
while( p ){
p = p->pNext;
cnt++;
}
return cnt;
}
|
static u32 countLookasideSlots(LookasideSlot *p){
u32 cnt = 0;
while( p ){
p = p->pNext;
cnt++;
}
return cnt;
}
|
C
|
Chrome
| 0 |
CVE-2019-13638
|
https://www.cvedetails.com/cve/CVE-2019-13638/
|
CWE-78
|
https://git.savannah.gnu.org/cgit/patch.git/commit/?id=3fcd042d26d70856e826a42b5f93dc4854d80bf0
|
3fcd042d26d70856e826a42b5f93dc4854d80bf0
| null |
pch_c_function (void)
{
return p_c_function;
}
|
pch_c_function (void)
{
return p_c_function;
}
|
C
|
savannah
| 0 |
CVE-2014-1700
|
https://www.cvedetails.com/cve/CVE-2014-1700/
|
CWE-399
|
https://github.com/chromium/chromium/commit/318530d771586b39056c0da7b8bdad03469a0dc4
|
318530d771586b39056c0da7b8bdad03469a0dc4
|
Move smart deploy to tristate.
BUG=
Review URL: https://codereview.chromium.org/1149383006
Cr-Commit-Position: refs/heads/master@{#333058}
|
MockEventBlocker() {}
|
MockEventBlocker() {}
|
C
|
Chrome
| 0 |
CVE-2016-2070
|
https://www.cvedetails.com/cve/CVE-2016-2070/
|
CWE-189
|
https://github.com/torvalds/linux/commit/8b8a321ff72c785ed5e8b4cf6eda20b35d427390
|
8b8a321ff72c785ed5e8b4cf6eda20b35d427390
|
tcp: fix zero cwnd in tcp_cwnd_reduction
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <[email protected]>
Signed-off-by: Yuchung Cheng <[email protected]>
Signed-off-by: Neal Cardwell <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
const int prior_unsacked, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->undo_marker && tcp_packet_delayed(tp)) {
/* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit.
*/
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
/* We are getting evidence that the reordering degree is higher
* than we realized. If there are no retransmits out then we
* can undo. Otherwise we clock out new packets but do not
* mark more packets lost or retransmit more.
*/
if (tp->retrans_out) {
tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
return true;
}
if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
DBGUNDO(sk, "partial recovery");
tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk);
return true;
}
return false;
}
|
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
const int prior_unsacked, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->undo_marker && tcp_packet_delayed(tp)) {
/* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit.
*/
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
/* We are getting evidence that the reordering degree is higher
* than we realized. If there are no retransmits out then we
* can undo. Otherwise we clock out new packets but do not
* mark more packets lost or retransmit more.
*/
if (tp->retrans_out) {
tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
return true;
}
if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
DBGUNDO(sk, "partial recovery");
tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk);
return true;
}
return false;
}
|
C
|
linux
| 0 |
CVE-2013-6420
|
https://www.cvedetails.com/cve/CVE-2013-6420/
|
CWE-119
|
https://git.php.net/?p=php-src.git;a=commit;h=c1224573c773b6845e83505f717fbf820fc18415
|
c1224573c773b6845e83505f717fbf820fc18415
| null |
PHP_FUNCTION(openssl_x509_export)
{
X509 * cert;
zval ** zcert, *zout;
zend_bool notext = 1;
BIO * bio_out;
long certresource;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Zz|b", &zcert, &zout, ¬ext) == FAILURE) {
return;
}
RETVAL_FALSE;
cert = php_openssl_x509_from_zval(zcert, 0, &certresource TSRMLS_CC);
if (cert == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "cannot get cert from parameter 1");
return;
}
bio_out = BIO_new(BIO_s_mem());
if (!notext) {
X509_print(bio_out, cert);
}
if (PEM_write_bio_X509(bio_out, cert)) {
BUF_MEM *bio_buf;
zval_dtor(zout);
BIO_get_mem_ptr(bio_out, &bio_buf);
ZVAL_STRINGL(zout, bio_buf->data, bio_buf->length, 1);
RETVAL_TRUE;
}
if (certresource == -1 && cert) {
X509_free(cert);
}
BIO_free(bio_out);
}
|
PHP_FUNCTION(openssl_x509_export)
{
X509 * cert;
zval ** zcert, *zout;
zend_bool notext = 1;
BIO * bio_out;
long certresource;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Zz|b", &zcert, &zout, ¬ext) == FAILURE) {
return;
}
RETVAL_FALSE;
cert = php_openssl_x509_from_zval(zcert, 0, &certresource TSRMLS_CC);
if (cert == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "cannot get cert from parameter 1");
return;
}
bio_out = BIO_new(BIO_s_mem());
if (!notext) {
X509_print(bio_out, cert);
}
if (PEM_write_bio_X509(bio_out, cert)) {
BUF_MEM *bio_buf;
zval_dtor(zout);
BIO_get_mem_ptr(bio_out, &bio_buf);
ZVAL_STRINGL(zout, bio_buf->data, bio_buf->length, 1);
RETVAL_TRUE;
}
if (certresource == -1 && cert) {
X509_free(cert);
}
BIO_free(bio_out);
}
|
C
|
php
| 0 |
CVE-2012-2133
|
https://www.cvedetails.com/cve/CVE-2012-2133/
|
CWE-399
|
https://github.com/torvalds/linux/commit/90481622d75715bfcb68501280a917dbfe516029
|
90481622d75715bfcb68501280a917dbfe516029
|
hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <[email protected]>
Signed-off-by: David Gibson <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: Paul Mackerras <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
size_t len, loff_t *ppos)
{
struct hstate *h = hstate_file(filp);
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
unsigned long index = *ppos >> huge_page_shift(h);
unsigned long offset = *ppos & ~huge_page_mask(h);
unsigned long end_index;
loff_t isize;
ssize_t retval = 0;
/* validate length */
if (len == 0)
goto out;
for (;;) {
struct page *page;
unsigned long nr, ret;
int ra;
/* nr is the maximum number of bytes to copy from this page */
nr = huge_page_size(h);
isize = i_size_read(inode);
if (!isize)
goto out;
end_index = (isize - 1) >> huge_page_shift(h);
if (index >= end_index) {
if (index > end_index)
goto out;
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
if (nr <= offset)
goto out;
}
nr = nr - offset;
/* Find the page */
page = find_lock_page(mapping, index);
if (unlikely(page == NULL)) {
/*
* We have a HOLE, zero out the user-buffer for the
* length of the hole or request.
*/
ret = len < nr ? len : nr;
if (clear_user(buf, ret))
ra = -EFAULT;
else
ra = 0;
} else {
unlock_page(page);
/*
* We have the page, copy it to user space buffer.
*/
ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
ret = ra;
page_cache_release(page);
}
if (ra < 0) {
if (retval == 0)
retval = ra;
goto out;
}
offset += ret;
retval += ret;
len -= ret;
index += offset >> huge_page_shift(h);
offset &= ~huge_page_mask(h);
/* short read or no more work */
if ((ret != nr) || (len == 0))
break;
}
out:
*ppos = ((loff_t)index << huge_page_shift(h)) + offset;
return retval;
}
|
static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
size_t len, loff_t *ppos)
{
struct hstate *h = hstate_file(filp);
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
unsigned long index = *ppos >> huge_page_shift(h);
unsigned long offset = *ppos & ~huge_page_mask(h);
unsigned long end_index;
loff_t isize;
ssize_t retval = 0;
/* validate length */
if (len == 0)
goto out;
for (;;) {
struct page *page;
unsigned long nr, ret;
int ra;
/* nr is the maximum number of bytes to copy from this page */
nr = huge_page_size(h);
isize = i_size_read(inode);
if (!isize)
goto out;
end_index = (isize - 1) >> huge_page_shift(h);
if (index >= end_index) {
if (index > end_index)
goto out;
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
if (nr <= offset)
goto out;
}
nr = nr - offset;
/* Find the page */
page = find_lock_page(mapping, index);
if (unlikely(page == NULL)) {
/*
* We have a HOLE, zero out the user-buffer for the
* length of the hole or request.
*/
ret = len < nr ? len : nr;
if (clear_user(buf, ret))
ra = -EFAULT;
else
ra = 0;
} else {
unlock_page(page);
/*
* We have the page, copy it to user space buffer.
*/
ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
ret = ra;
page_cache_release(page);
}
if (ra < 0) {
if (retval == 0)
retval = ra;
goto out;
}
offset += ret;
retval += ret;
len -= ret;
index += offset >> huge_page_shift(h);
offset &= ~huge_page_mask(h);
/* short read or no more work */
if ((ret != nr) || (len == 0))
break;
}
out:
*ppos = ((loff_t)index << huge_page_shift(h)) + offset;
return retval;
}
|
C
|
linux
| 0 |
CVE-2016-3899
|
https://www.cvedetails.com/cve/CVE-2016-3899/
|
CWE-284
|
https://android.googlesource.com/platform/frameworks/av/+/97837bb6cbac21ea679843a0037779d3834bed64
|
97837bb6cbac21ea679843a0037779d3834bed64
|
OMXCodec: check IMemory::pointer() before using allocation
Bug: 29421811
Change-Id: I0a73ba12bae4122f1d89fc92e5ea4f6a96cd1ed1
|
void OMXCodec::setVideoInputFormat(
const char *mime, const sp<MetaData>& meta) {
int32_t width, height, frameRate, bitRate, stride, sliceHeight;
bool success = meta->findInt32(kKeyWidth, &width);
success = success && meta->findInt32(kKeyHeight, &height);
success = success && meta->findInt32(kKeyFrameRate, &frameRate);
success = success && meta->findInt32(kKeyBitRate, &bitRate);
success = success && meta->findInt32(kKeyStride, &stride);
success = success && meta->findInt32(kKeySliceHeight, &sliceHeight);
CHECK(success);
CHECK(stride != 0);
OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
compressionFormat = OMX_VIDEO_CodingAVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
compressionFormat = OMX_VIDEO_CodingHEVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG4;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
compressionFormat = OMX_VIDEO_CodingH263;
} else {
ALOGE("Not a supported video mime type: %s", mime);
CHECK(!"Should not be here. Not a supported video mime type.");
}
OMX_COLOR_FORMATTYPE colorFormat;
CHECK_EQ((status_t)OK, findTargetColorFormat(meta, &colorFormat));
status_t err;
OMX_PARAM_PORTDEFINITIONTYPE def;
OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
CHECK_EQ(setVideoPortFormatType(
kPortIndexInput, OMX_VIDEO_CodingUnused,
colorFormat), (status_t)OK);
InitOMXParams(&def);
def.nPortIndex = kPortIndexInput;
err = mOMX->getParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
def.nBufferSize = getFrameSize(colorFormat,
stride > 0? stride: -stride, sliceHeight);
CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
video_def->nFrameWidth = width;
video_def->nFrameHeight = height;
video_def->nStride = stride;
video_def->nSliceHeight = sliceHeight;
video_def->xFramerate = (frameRate << 16); // Q16 format
video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
video_def->eColorFormat = colorFormat;
err = mOMX->setParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
CHECK_EQ(setVideoPortFormatType(
kPortIndexOutput, compressionFormat, OMX_COLOR_FormatUnused),
(status_t)OK);
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
err = mOMX->getParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
video_def->nFrameWidth = width;
video_def->nFrameHeight = height;
video_def->xFramerate = 0; // No need for output port
video_def->nBitrate = bitRate; // Q16 format
video_def->eCompressionFormat = compressionFormat;
video_def->eColorFormat = OMX_COLOR_FormatUnused;
if (mQuirks & kRequiresLargerEncoderOutputBuffer) {
def.nBufferSize = ((def.nBufferSize * 3) >> 1);
}
err = mOMX->setParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
switch (compressionFormat) {
case OMX_VIDEO_CodingMPEG4:
{
CHECK_EQ(setupMPEG4EncoderParameters(meta), (status_t)OK);
break;
}
case OMX_VIDEO_CodingH263:
CHECK_EQ(setupH263EncoderParameters(meta), (status_t)OK);
break;
case OMX_VIDEO_CodingAVC:
{
CHECK_EQ(setupAVCEncoderParameters(meta), (status_t)OK);
break;
}
default:
CHECK(!"Support for this compressionFormat to be implemented.");
break;
}
}
|
void OMXCodec::setVideoInputFormat(
const char *mime, const sp<MetaData>& meta) {
int32_t width, height, frameRate, bitRate, stride, sliceHeight;
bool success = meta->findInt32(kKeyWidth, &width);
success = success && meta->findInt32(kKeyHeight, &height);
success = success && meta->findInt32(kKeyFrameRate, &frameRate);
success = success && meta->findInt32(kKeyBitRate, &bitRate);
success = success && meta->findInt32(kKeyStride, &stride);
success = success && meta->findInt32(kKeySliceHeight, &sliceHeight);
CHECK(success);
CHECK(stride != 0);
OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
compressionFormat = OMX_VIDEO_CodingAVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
compressionFormat = OMX_VIDEO_CodingHEVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG4;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
compressionFormat = OMX_VIDEO_CodingH263;
} else {
ALOGE("Not a supported video mime type: %s", mime);
CHECK(!"Should not be here. Not a supported video mime type.");
}
OMX_COLOR_FORMATTYPE colorFormat;
CHECK_EQ((status_t)OK, findTargetColorFormat(meta, &colorFormat));
status_t err;
OMX_PARAM_PORTDEFINITIONTYPE def;
OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
CHECK_EQ(setVideoPortFormatType(
kPortIndexInput, OMX_VIDEO_CodingUnused,
colorFormat), (status_t)OK);
InitOMXParams(&def);
def.nPortIndex = kPortIndexInput;
err = mOMX->getParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
def.nBufferSize = getFrameSize(colorFormat,
stride > 0? stride: -stride, sliceHeight);
CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
video_def->nFrameWidth = width;
video_def->nFrameHeight = height;
video_def->nStride = stride;
video_def->nSliceHeight = sliceHeight;
video_def->xFramerate = (frameRate << 16); // Q16 format
video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
video_def->eColorFormat = colorFormat;
err = mOMX->setParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
CHECK_EQ(setVideoPortFormatType(
kPortIndexOutput, compressionFormat, OMX_COLOR_FormatUnused),
(status_t)OK);
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
err = mOMX->getParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
video_def->nFrameWidth = width;
video_def->nFrameHeight = height;
video_def->xFramerate = 0; // No need for output port
video_def->nBitrate = bitRate; // Q16 format
video_def->eCompressionFormat = compressionFormat;
video_def->eColorFormat = OMX_COLOR_FormatUnused;
if (mQuirks & kRequiresLargerEncoderOutputBuffer) {
def.nBufferSize = ((def.nBufferSize * 3) >> 1);
}
err = mOMX->setParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
switch (compressionFormat) {
case OMX_VIDEO_CodingMPEG4:
{
CHECK_EQ(setupMPEG4EncoderParameters(meta), (status_t)OK);
break;
}
case OMX_VIDEO_CodingH263:
CHECK_EQ(setupH263EncoderParameters(meta), (status_t)OK);
break;
case OMX_VIDEO_CodingAVC:
{
CHECK_EQ(setupAVCEncoderParameters(meta), (status_t)OK);
break;
}
default:
CHECK(!"Support for this compressionFormat to be implemented.");
break;
}
}
|
C
|
Android
| 0 |
CVE-2017-11144
|
https://www.cvedetails.com/cve/CVE-2017-11144/
|
CWE-754
|
https://git.php.net/?p=php-src.git;a=commit;h=73cabfedf519298e1a11192699f44d53c529315e
|
73cabfedf519298e1a11192699f44d53c529315e
| null |
PHP_FUNCTION(openssl_pkey_get_details)
{
zval *key;
EVP_PKEY *pkey;
BIO *out;
unsigned int pbio_len;
char *pbio;
zend_long ktype;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &key) == FAILURE) {
return;
}
if ((pkey = (EVP_PKEY *)zend_fetch_resource(Z_RES_P(key), "OpenSSL key", le_key)) == NULL) {
RETURN_FALSE;
}
out = BIO_new(BIO_s_mem());
PEM_write_bio_PUBKEY(out, pkey);
pbio_len = BIO_get_mem_data(out, &pbio);
array_init(return_value);
add_assoc_long(return_value, "bits", EVP_PKEY_bits(pkey));
add_assoc_stringl(return_value, "key", pbio, pbio_len);
/*TODO: Use the real values once the openssl constants are used
* See the enum at the top of this file
*/
switch (EVP_PKEY_base_id(pkey)) {
case EVP_PKEY_RSA:
case EVP_PKEY_RSA2:
{
RSA *rsa = EVP_PKEY_get0_RSA(pkey);
ktype = OPENSSL_KEYTYPE_RSA;
if (rsa != NULL) {
zval z_rsa;
const BIGNUM *n, *e, *d, *p, *q, *dmp1, *dmq1, *iqmp;
RSA_get0_key(rsa, &n, &e, &d);
RSA_get0_factors(rsa, &p, &q);
RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp);
array_init(&z_rsa);
OPENSSL_PKEY_GET_BN(z_rsa, n);
OPENSSL_PKEY_GET_BN(z_rsa, e);
OPENSSL_PKEY_GET_BN(z_rsa, d);
OPENSSL_PKEY_GET_BN(z_rsa, p);
OPENSSL_PKEY_GET_BN(z_rsa, q);
OPENSSL_PKEY_GET_BN(z_rsa, dmp1);
OPENSSL_PKEY_GET_BN(z_rsa, dmq1);
OPENSSL_PKEY_GET_BN(z_rsa, iqmp);
add_assoc_zval(return_value, "rsa", &z_rsa);
}
}
break;
case EVP_PKEY_DSA:
case EVP_PKEY_DSA2:
case EVP_PKEY_DSA3:
case EVP_PKEY_DSA4:
{
DSA *dsa = EVP_PKEY_get0_DSA(pkey);
ktype = OPENSSL_KEYTYPE_DSA;
if (dsa != NULL) {
zval z_dsa;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DSA_get0_pqg(dsa, &p, &q, &g);
DSA_get0_key(dsa, &pub_key, &priv_key);
array_init(&z_dsa);
OPENSSL_PKEY_GET_BN(z_dsa, p);
OPENSSL_PKEY_GET_BN(z_dsa, q);
OPENSSL_PKEY_GET_BN(z_dsa, g);
OPENSSL_PKEY_GET_BN(z_dsa, priv_key);
OPENSSL_PKEY_GET_BN(z_dsa, pub_key);
add_assoc_zval(return_value, "dsa", &z_dsa);
}
}
break;
case EVP_PKEY_DH:
{
DH *dh = EVP_PKEY_get0_DH(pkey);
ktype = OPENSSL_KEYTYPE_DH;
if (dh != NULL) {
zval z_dh;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DH_get0_pqg(dh, &p, &q, &g);
DH_get0_key(dh, &pub_key, &priv_key);
array_init(&z_dh);
OPENSSL_PKEY_GET_BN(z_dh, p);
OPENSSL_PKEY_GET_BN(z_dh, g);
OPENSSL_PKEY_GET_BN(z_dh, priv_key);
OPENSSL_PKEY_GET_BN(z_dh, pub_key);
add_assoc_zval(return_value, "dh", &z_dh);
}
}
break;
#ifdef HAVE_EVP_PKEY_EC
case EVP_PKEY_EC:
ktype = OPENSSL_KEYTYPE_EC;
if (EVP_PKEY_get0_EC_KEY(pkey) != NULL) {
zval ec;
const EC_GROUP *ec_group;
int nid;
char *crv_sn;
ASN1_OBJECT *obj;
char oir_buf[80];
ec_group = EC_KEY_get0_group(EVP_PKEY_get1_EC_KEY(pkey));
nid = EC_GROUP_get_curve_name(ec_group);
if (nid == NID_undef) {
break;
}
array_init(&ec);
crv_sn = (char*) OBJ_nid2sn(nid);
if (crv_sn != NULL) {
add_assoc_string(&ec, "curve_name", crv_sn);
}
obj = OBJ_nid2obj(nid);
if (obj != NULL) {
int oir_len = OBJ_obj2txt(oir_buf, sizeof(oir_buf), obj, 1);
add_assoc_stringl(&ec, "curve_oid", (char*)oir_buf, oir_len);
ASN1_OBJECT_free(obj);
}
add_assoc_zval(return_value, "ec", &ec);
}
break;
#endif
default:
ktype = -1;
break;
}
add_assoc_long(return_value, "type", ktype);
BIO_free(out);
}
|
PHP_FUNCTION(openssl_pkey_get_details)
{
zval *key;
EVP_PKEY *pkey;
BIO *out;
unsigned int pbio_len;
char *pbio;
zend_long ktype;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &key) == FAILURE) {
return;
}
if ((pkey = (EVP_PKEY *)zend_fetch_resource(Z_RES_P(key), "OpenSSL key", le_key)) == NULL) {
RETURN_FALSE;
}
out = BIO_new(BIO_s_mem());
PEM_write_bio_PUBKEY(out, pkey);
pbio_len = BIO_get_mem_data(out, &pbio);
array_init(return_value);
add_assoc_long(return_value, "bits", EVP_PKEY_bits(pkey));
add_assoc_stringl(return_value, "key", pbio, pbio_len);
/*TODO: Use the real values once the openssl constants are used
* See the enum at the top of this file
*/
switch (EVP_PKEY_base_id(pkey)) {
case EVP_PKEY_RSA:
case EVP_PKEY_RSA2:
{
RSA *rsa = EVP_PKEY_get0_RSA(pkey);
ktype = OPENSSL_KEYTYPE_RSA;
if (rsa != NULL) {
zval z_rsa;
const BIGNUM *n, *e, *d, *p, *q, *dmp1, *dmq1, *iqmp;
RSA_get0_key(rsa, &n, &e, &d);
RSA_get0_factors(rsa, &p, &q);
RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp);
array_init(&z_rsa);
OPENSSL_PKEY_GET_BN(z_rsa, n);
OPENSSL_PKEY_GET_BN(z_rsa, e);
OPENSSL_PKEY_GET_BN(z_rsa, d);
OPENSSL_PKEY_GET_BN(z_rsa, p);
OPENSSL_PKEY_GET_BN(z_rsa, q);
OPENSSL_PKEY_GET_BN(z_rsa, dmp1);
OPENSSL_PKEY_GET_BN(z_rsa, dmq1);
OPENSSL_PKEY_GET_BN(z_rsa, iqmp);
add_assoc_zval(return_value, "rsa", &z_rsa);
}
}
break;
case EVP_PKEY_DSA:
case EVP_PKEY_DSA2:
case EVP_PKEY_DSA3:
case EVP_PKEY_DSA4:
{
DSA *dsa = EVP_PKEY_get0_DSA(pkey);
ktype = OPENSSL_KEYTYPE_DSA;
if (dsa != NULL) {
zval z_dsa;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DSA_get0_pqg(dsa, &p, &q, &g);
DSA_get0_key(dsa, &pub_key, &priv_key);
array_init(&z_dsa);
OPENSSL_PKEY_GET_BN(z_dsa, p);
OPENSSL_PKEY_GET_BN(z_dsa, q);
OPENSSL_PKEY_GET_BN(z_dsa, g);
OPENSSL_PKEY_GET_BN(z_dsa, priv_key);
OPENSSL_PKEY_GET_BN(z_dsa, pub_key);
add_assoc_zval(return_value, "dsa", &z_dsa);
}
}
break;
case EVP_PKEY_DH:
{
DH *dh = EVP_PKEY_get0_DH(pkey);
ktype = OPENSSL_KEYTYPE_DH;
if (dh != NULL) {
zval z_dh;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DH_get0_pqg(dh, &p, &q, &g);
DH_get0_key(dh, &pub_key, &priv_key);
array_init(&z_dh);
OPENSSL_PKEY_GET_BN(z_dh, p);
OPENSSL_PKEY_GET_BN(z_dh, g);
OPENSSL_PKEY_GET_BN(z_dh, priv_key);
OPENSSL_PKEY_GET_BN(z_dh, pub_key);
add_assoc_zval(return_value, "dh", &z_dh);
}
}
break;
#ifdef HAVE_EVP_PKEY_EC
case EVP_PKEY_EC:
ktype = OPENSSL_KEYTYPE_EC;
if (EVP_PKEY_get0_EC_KEY(pkey) != NULL) {
zval ec;
const EC_GROUP *ec_group;
int nid;
char *crv_sn;
ASN1_OBJECT *obj;
char oir_buf[80];
ec_group = EC_KEY_get0_group(EVP_PKEY_get1_EC_KEY(pkey));
nid = EC_GROUP_get_curve_name(ec_group);
if (nid == NID_undef) {
break;
}
array_init(&ec);
crv_sn = (char*) OBJ_nid2sn(nid);
if (crv_sn != NULL) {
add_assoc_string(&ec, "curve_name", crv_sn);
}
obj = OBJ_nid2obj(nid);
if (obj != NULL) {
int oir_len = OBJ_obj2txt(oir_buf, sizeof(oir_buf), obj, 1);
add_assoc_stringl(&ec, "curve_oid", (char*)oir_buf, oir_len);
ASN1_OBJECT_free(obj);
}
add_assoc_zval(return_value, "ec", &ec);
}
break;
#endif
default:
ktype = -1;
break;
}
add_assoc_long(return_value, "type", ktype);
BIO_free(out);
}
|
C
|
php
| 0 |
CVE-2011-0716
|
https://www.cvedetails.com/cve/CVE-2011-0716/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6b0d6a9b4296fa16a28d10d416db7a770fc03287
|
6b0d6a9b4296fa16a28d10d416db7a770fc03287
|
bridge: Fix mglist corruption that leads to memory corruption
The list mp->mglist is used to indicate whether a multicast group
is active on the bridge interface itself as opposed to one of the
constituent interfaces in the bridge.
Unfortunately the operation that adds the mp->mglist node to the
list neglected to check whether it has already been added. This
leads to list corruption in the form of nodes pointing to itself.
Normally this would be quite obvious as it would cause an infinite
loop when walking the list. However, as this list is never actually
walked (which means that we don't really need it, I'll get rid of
it in a subsequent patch), this instead is hidden until we perform
a delete operation on the affected nodes.
As the same node may now be pointed to by more than one node, the
delete operations can then cause modification of freed memory.
This was observed in practice to cause corruption in 512-byte slabs,
most commonly leading to crashes in jbd2.
Thanks to Josef Bacik for pointing me in the right direction.
Reported-by: Ian Page Hands <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void br_multicast_stop(struct net_bridge *br)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct hlist_node *p, *n;
u32 ver;
int i;
del_timer_sync(&br->multicast_router_timer);
del_timer_sync(&br->multicast_querier_timer);
del_timer_sync(&br->multicast_query_timer);
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
if (!mdb)
goto out;
br->mdb = NULL;
ver = mdb->ver;
for (i = 0; i < mdb->max; i++) {
hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
hlist[ver]) {
del_timer(&mp->timer);
del_timer(&mp->query_timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
}
}
if (mdb->old) {
spin_unlock_bh(&br->multicast_lock);
rcu_barrier_bh();
spin_lock_bh(&br->multicast_lock);
WARN_ON(mdb->old);
}
mdb->old = mdb;
call_rcu_bh(&mdb->rcu, br_mdb_free);
out:
spin_unlock_bh(&br->multicast_lock);
}
|
void br_multicast_stop(struct net_bridge *br)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct hlist_node *p, *n;
u32 ver;
int i;
del_timer_sync(&br->multicast_router_timer);
del_timer_sync(&br->multicast_querier_timer);
del_timer_sync(&br->multicast_query_timer);
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
if (!mdb)
goto out;
br->mdb = NULL;
ver = mdb->ver;
for (i = 0; i < mdb->max; i++) {
hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
hlist[ver]) {
del_timer(&mp->timer);
del_timer(&mp->query_timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
}
}
if (mdb->old) {
spin_unlock_bh(&br->multicast_lock);
rcu_barrier_bh();
spin_lock_bh(&br->multicast_lock);
WARN_ON(mdb->old);
}
mdb->old = mdb;
call_rcu_bh(&mdb->rcu, br_mdb_free);
out:
spin_unlock_bh(&br->multicast_lock);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
|
a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
|
Introduce background.scripts feature for extension manifests.
This optimizes for the common use case where background pages
just include a reference to one or more script files and no
additional HTML.
BUG=107791
Review URL: http://codereview.chromium.org/9150008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@117110 0039d316-1c4b-4281-b951-d872f2087c98
|
~CrashNotificationDelegate() {
}
|
~CrashNotificationDelegate() {
}
|
C
|
Chrome
| 0 |
CVE-2011-1428
|
https://www.cvedetails.com/cve/CVE-2011-1428/
|
CWE-20
|
https://git.savannah.gnu.org/gitweb/?p=weechat.git;a=commit;h=c265cad1c95b84abfd4e8d861f25926ef13b5d91
|
c265cad1c95b84abfd4e8d861f25926ef13b5d91
| null |
hook_timer_time_to_next (struct timeval *tv_timeout)
{
struct t_hook *ptr_hook;
int found;
struct timeval tv_now;
long diff_usec;
hook_timer_check_system_clock ();
found = 0;
tv_timeout->tv_sec = 0;
tv_timeout->tv_usec = 0;
for (ptr_hook = weechat_hooks[HOOK_TYPE_TIMER]; ptr_hook;
ptr_hook = ptr_hook->next_hook)
{
if (!ptr_hook->deleted
&& (!found
|| (util_timeval_cmp (&HOOK_TIMER(ptr_hook, next_exec), tv_timeout) < 0)))
{
found = 1;
tv_timeout->tv_sec = HOOK_TIMER(ptr_hook, next_exec).tv_sec;
tv_timeout->tv_usec = HOOK_TIMER(ptr_hook, next_exec).tv_usec;
}
}
/* no timeout found, return 2 seconds by default */
if (!found)
{
tv_timeout->tv_sec = 2;
tv_timeout->tv_usec = 0;
return;
}
gettimeofday (&tv_now, NULL);
/* next timeout is past date! */
if (util_timeval_cmp (tv_timeout, &tv_now) < 0)
{
tv_timeout->tv_sec = 0;
tv_timeout->tv_usec = 0;
return;
}
tv_timeout->tv_sec = tv_timeout->tv_sec - tv_now.tv_sec;
diff_usec = tv_timeout->tv_usec - tv_now.tv_usec;
if (diff_usec >= 0)
tv_timeout->tv_usec = diff_usec;
else
{
tv_timeout->tv_sec--;
tv_timeout->tv_usec = 1000000 + diff_usec;
}
/*
* to detect clock skew, we ensure there's a call to timers every
* 2 seconds max
*/
if (tv_timeout->tv_sec > 2)
{
tv_timeout->tv_sec = 2;
tv_timeout->tv_usec = 0;
}
}
|
hook_timer_time_to_next (struct timeval *tv_timeout)
{
struct t_hook *ptr_hook;
int found;
struct timeval tv_now;
long diff_usec;
hook_timer_check_system_clock ();
found = 0;
tv_timeout->tv_sec = 0;
tv_timeout->tv_usec = 0;
for (ptr_hook = weechat_hooks[HOOK_TYPE_TIMER]; ptr_hook;
ptr_hook = ptr_hook->next_hook)
{
if (!ptr_hook->deleted
&& (!found
|| (util_timeval_cmp (&HOOK_TIMER(ptr_hook, next_exec), tv_timeout) < 0)))
{
found = 1;
tv_timeout->tv_sec = HOOK_TIMER(ptr_hook, next_exec).tv_sec;
tv_timeout->tv_usec = HOOK_TIMER(ptr_hook, next_exec).tv_usec;
}
}
/* no timeout found, return 2 seconds by default */
if (!found)
{
tv_timeout->tv_sec = 2;
tv_timeout->tv_usec = 0;
return;
}
gettimeofday (&tv_now, NULL);
/* next timeout is past date! */
if (util_timeval_cmp (tv_timeout, &tv_now) < 0)
{
tv_timeout->tv_sec = 0;
tv_timeout->tv_usec = 0;
return;
}
tv_timeout->tv_sec = tv_timeout->tv_sec - tv_now.tv_sec;
diff_usec = tv_timeout->tv_usec - tv_now.tv_usec;
if (diff_usec >= 0)
tv_timeout->tv_usec = diff_usec;
else
{
tv_timeout->tv_sec--;
tv_timeout->tv_usec = 1000000 + diff_usec;
}
/*
* to detect clock skew, we ensure there's a call to timers every
* 2 seconds max
*/
if (tv_timeout->tv_sec > 2)
{
tv_timeout->tv_sec = 2;
tv_timeout->tv_usec = 0;
}
}
|
C
|
savannah
| 0 |
CVE-2015-9289
|
https://www.cvedetails.com/cve/CVE-2015-9289/
|
CWE-119
|
https://github.com/torvalds/linux/commit/1fa2337a315a2448c5434f41e00d56b01a22283c
|
1fa2337a315a2448c5434f41e00d56b01a22283c
|
[media] cx24116: fix a buffer overflow when checking userspace params
The maximum size for a DiSEqC command is 6, according to the
userspace API. However, the code allows to write up much more values:
drivers/media/dvb-frontends/cx24116.c:983 cx24116_send_diseqc_msg() error: buffer overflow 'd->msg' 6 <= 23
Cc: [email protected]
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
unsigned int mode_flags, unsigned int *delay, fe_status_t *status)
{
/*
* It is safe to discard "params" here, as the DVB core will sync
* fe->dtv_property_cache with fepriv->parameters_in, where the
* DVBv3 params are stored. The only practical usage for it indicate
* that re-tuning is needed, e. g. (fepriv->state & FESTATE_RETUNE) is
* true.
*/
*delay = HZ / 5;
if (re_tune) {
int ret = cx24116_set_frontend(fe);
if (ret)
return ret;
}
return cx24116_read_status(fe, status);
}
|
static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
unsigned int mode_flags, unsigned int *delay, fe_status_t *status)
{
/*
* It is safe to discard "params" here, as the DVB core will sync
* fe->dtv_property_cache with fepriv->parameters_in, where the
* DVBv3 params are stored. The only practical usage for it indicate
* that re-tuning is needed, e. g. (fepriv->state & FESTATE_RETUNE) is
* true.
*/
*delay = HZ / 5;
if (re_tune) {
int ret = cx24116_set_frontend(fe);
if (ret)
return ret;
}
return cx24116_read_status(fe, status);
}
|
C
|
linux
| 0 |
CVE-2011-1428
|
https://www.cvedetails.com/cve/CVE-2011-1428/
|
CWE-20
|
https://git.savannah.gnu.org/gitweb/?p=weechat.git;a=commit;h=c265cad1c95b84abfd4e8d861f25926ef13b5d91
|
c265cad1c95b84abfd4e8d861f25926ef13b5d91
| null |
irc_server_apply_command_line_options (struct t_irc_server *server,
int argc, char **argv)
{
int i, index_option;
char *pos, *option_name, *ptr_value, *value_boolean[2] = { "off", "on" };
for (i = 0; i < argc; i++)
{
if (argv[i][0] == '-')
{
pos = strchr (argv[i], '=');
if (pos)
{
option_name = weechat_strndup (argv[i] + 1, pos - argv[i] - 1);
ptr_value = pos + 1;
}
else
{
option_name = strdup (argv[i] + 1);
ptr_value = value_boolean[1];
}
if (option_name)
{
index_option = irc_server_search_option (option_name);
if (index_option < 0)
{
/* look if option is negative, like "-noxxx" */
if (weechat_strncasecmp (argv[i], "-no", 3) == 0)
{
free (option_name);
option_name = strdup (argv[i] + 3);
index_option = irc_server_search_option (option_name);
ptr_value = value_boolean[0];
}
}
if (index_option >= 0)
{
weechat_config_option_set (server->options[index_option],
ptr_value, 1);
}
free (option_name);
}
}
}
}
|
irc_server_apply_command_line_options (struct t_irc_server *server,
int argc, char **argv)
{
int i, index_option;
char *pos, *option_name, *ptr_value, *value_boolean[2] = { "off", "on" };
for (i = 0; i < argc; i++)
{
if (argv[i][0] == '-')
{
pos = strchr (argv[i], '=');
if (pos)
{
option_name = weechat_strndup (argv[i] + 1, pos - argv[i] - 1);
ptr_value = pos + 1;
}
else
{
option_name = strdup (argv[i] + 1);
ptr_value = value_boolean[1];
}
if (option_name)
{
index_option = irc_server_search_option (option_name);
if (index_option < 0)
{
/* look if option is negative, like "-noxxx" */
if (weechat_strncasecmp (argv[i], "-no", 3) == 0)
{
free (option_name);
option_name = strdup (argv[i] + 3);
index_option = irc_server_search_option (option_name);
ptr_value = value_boolean[0];
}
}
if (index_option >= 0)
{
weechat_config_option_set (server->options[index_option],
ptr_value, 1);
}
free (option_name);
}
}
}
}
|
C
|
savannah
| 0 |
CVE-2014-9903
|
https://www.cvedetails.com/cve/CVE-2014-9903/
|
CWE-200
|
https://github.com/torvalds/linux/commit/4efbc454ba68def5ef285b26ebfcfdb605b52755
|
4efbc454ba68def5ef285b26ebfcfdb605b52755
|
sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
|
static bool find_numa_distance(int distance)
{
int i;
if (distance == node_distance(0, 0))
return true;
for (i = 0; i < sched_domains_numa_levels; i++) {
if (sched_domains_numa_distance[i] == distance)
return true;
}
return false;
}
|
static bool find_numa_distance(int distance)
{
int i;
if (distance == node_distance(0, 0))
return true;
for (i = 0; i < sched_domains_numa_levels; i++) {
if (sched_domains_numa_distance[i] == distance)
return true;
}
return false;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a1ce1b69e269a7e61ea0bf0691b90be0cbe9b4c5
|
a1ce1b69e269a7e61ea0bf0691b90be0cbe9b4c5
|
2009-05-04 Kai Brüning <[email protected]>
Reviewed by Eric Seidel.
https://bugs.webkit.org/show_bug.cgi?id=24883
24883: Bad success test in parseXMLDocumentFragment in XMLTokenizerLibxml2.cpp
Fixed test whether all the chunk has been processed to correctly count utf8 bytes.
Test: fast/innerHTML/innerHTML-nbsp.xhtml
* dom/XMLTokenizerLibxml2.cpp:
(WebCore::parseXMLDocumentFragment):
git-svn-id: svn://svn.chromium.org/blink/trunk@43195 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void warningHandler(void* closure, const char* message, ...)
{
va_list args;
va_start(args, message);
getTokenizer(closure)->error(XMLTokenizer::warning, message, args);
va_end(args);
}
|
static void warningHandler(void* closure, const char* message, ...)
{
va_list args;
va_start(args, message);
getTokenizer(closure)->error(XMLTokenizer::warning, message, args);
va_end(args);
}
|
C
|
Chrome
| 0 |
CVE-2019-12818
|
https://www.cvedetails.com/cve/CVE-2019-12818/
|
CWE-476
|
https://github.com/torvalds/linux/commit/58bdd544e2933a21a51eecf17c3f5f94038261b5
|
58bdd544e2933a21a51eecf17c3f5f94038261b5
|
net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails
KASAN report this:
BUG: KASAN: null-ptr-deref in nfc_llcp_build_gb+0x37f/0x540 [nfc]
Read of size 3 at addr 0000000000000000 by task syz-executor.0/5401
CPU: 0 PID: 5401 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xfa/0x1ce lib/dump_stack.c:113
kasan_report+0x171/0x18d mm/kasan/report.c:321
memcpy+0x1f/0x50 mm/kasan/common.c:130
nfc_llcp_build_gb+0x37f/0x540 [nfc]
nfc_llcp_register_device+0x6eb/0xb50 [nfc]
nfc_register_device+0x50/0x1d0 [nfc]
nfcsim_device_new+0x394/0x67d [nfcsim]
? 0xffffffffc1080000
nfcsim_init+0x6b/0x1000 [nfcsim]
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f9cb79dcc58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000280 RDI: 0000000000000003
RBP: 00007f9cb79dcc70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f9cb79dd6bc
R13: 00000000004bcefb R14: 00000000006f7030 R15: 0000000000000004
nfc_llcp_build_tlv will return NULL on fails, caller should check it,
otherwise will trigger a NULL dereference.
Reported-by: Hulk Robot <[email protected]>
Fixes: eda21f16a5ed ("NFC: Set MIU and RW values from CONNECT and CC LLCP frames")
Fixes: d646960f7986 ("NFC: Initial LLCP support")
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb)
{
local->rx_pending = skb;
del_timer(&local->link_timer);
schedule_work(&local->rx_work);
}
|
static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb)
{
local->rx_pending = skb;
del_timer(&local->link_timer);
schedule_work(&local->rx_work);
}
|
C
|
linux
| 0 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
jas_iccprof_t *jas_iccprof_createfrombuf(uchar *buf, int len)
jas_iccprof_t *jas_iccprof_createfrombuf(jas_uchar *buf, int len)
{
jas_stream_t *in;
jas_iccprof_t *prof;
if (!(in = jas_stream_memopen(JAS_CAST(char *, buf), len)))
goto error;
if (!(prof = jas_iccprof_load(in)))
goto error;
jas_stream_close(in);
return prof;
error:
if (in)
jas_stream_close(in);
return 0;
}
|
jas_iccprof_t *jas_iccprof_createfrombuf(uchar *buf, int len)
{
jas_stream_t *in;
jas_iccprof_t *prof;
if (!(in = jas_stream_memopen(JAS_CAST(char *, buf), len)))
goto error;
if (!(prof = jas_iccprof_load(in)))
goto error;
jas_stream_close(in);
return prof;
error:
if (in)
jas_stream_close(in);
return 0;
}
|
C
|
jasper
| 1 |
CVE-2017-14106
|
https://www.cvedetails.com/cve/CVE-2017-14106/
|
CWE-369
|
https://github.com/torvalds/linux/commit/499350a5a6e7512d9ed369ed63a4244b6536f4f8
|
499350a5a6e7512d9ed369ed63a4244b6536f4f8
|
tcp: initialize rcv_mss to TCP_MIN_MSS instead of 0
When tcp_disconnect() is called, inet_csk_delack_init() sets
icsk->icsk_ack.rcv_mss to 0.
This could potentially cause tcp_recvmsg() => tcp_cleanup_rbuf() =>
__tcp_select_window() call path to have division by 0 issue.
So this patch initializes rcv_mss to TCP_MIN_MSS instead of 0.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Wei Wang <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Neal Cardwell <[email protected]>
Signed-off-by: Yuchung Cheng <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
{
struct tcp_repair_window opt;
if (!tp->repair)
return -EPERM;
if (len != sizeof(opt))
return -EINVAL;
if (copy_from_user(&opt, optbuf, sizeof(opt)))
return -EFAULT;
if (opt.max_window < opt.snd_wnd)
return -EINVAL;
if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
return -EINVAL;
if (after(opt.rcv_wup, tp->rcv_nxt))
return -EINVAL;
tp->snd_wl1 = opt.snd_wl1;
tp->snd_wnd = opt.snd_wnd;
tp->max_window = opt.max_window;
tp->rcv_wnd = opt.rcv_wnd;
tp->rcv_wup = opt.rcv_wup;
return 0;
}
|
static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
{
struct tcp_repair_window opt;
if (!tp->repair)
return -EPERM;
if (len != sizeof(opt))
return -EINVAL;
if (copy_from_user(&opt, optbuf, sizeof(opt)))
return -EFAULT;
if (opt.max_window < opt.snd_wnd)
return -EINVAL;
if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
return -EINVAL;
if (after(opt.rcv_wup, tp->rcv_nxt))
return -EINVAL;
tp->snd_wl1 = opt.snd_wl1;
tp->snd_wnd = opt.snd_wnd;
tp->max_window = opt.max_window;
tp->rcv_wnd = opt.rcv_wnd;
tp->rcv_wup = opt.rcv_wup;
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-3835
|
https://www.cvedetails.com/cve/CVE-2016-3835/
|
CWE-200
|
https://android.googlesource.com/platform/hardware/qcom/media/+/7558d03e6498e970b761aa44fff6b2c659202d95
|
7558d03e6498e970b761aa44fff6b2c659202d95
|
DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers
Heap pointers do not point to user virtual addresses in case
of secure session.
Set them to NULL and add checks to avoid accesing them
Bug: 28815329
Bug: 28920116
Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26
|
bool venc_dev::venc_set_config(void *configData, OMX_INDEXTYPE index)
{
DEBUG_PRINT_LOW("Inside venc_set_config");
switch ((int)index) {
case OMX_IndexConfigVideoBitrate:
{
OMX_VIDEO_CONFIG_BITRATETYPE *bit_rate = (OMX_VIDEO_CONFIG_BITRATETYPE *)
configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoBitrate");
if (bit_rate->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (venc_set_target_bitrate(bit_rate->nEncodeBitrate, 1) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Target Bit rate failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoBitrate");
}
break;
}
case OMX_IndexConfigVideoFramerate:
{
OMX_CONFIG_FRAMERATETYPE *frame_rate = (OMX_CONFIG_FRAMERATETYPE *)
configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoFramerate");
if (frame_rate->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (venc_set_encode_framerate(frame_rate->xEncodeFramerate, 1) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Encode Framerate failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoFramerate");
}
break;
}
case QOMX_IndexConfigVideoIntraperiod:
{
DEBUG_PRINT_LOW("venc_set_param:QOMX_IndexConfigVideoIntraperiod");
QOMX_VIDEO_INTRAPERIODTYPE *intraperiod =
(QOMX_VIDEO_INTRAPERIODTYPE *)configData;
if (intraperiod->nPortIndex == (OMX_U32) PORT_INDEX_OUT) {
if (venc_set_intra_period(intraperiod->nPFrames, intraperiod->nBFrames) == false) {
DEBUG_PRINT_ERROR("ERROR: Request for setting intra period failed");
return false;
}
}
break;
}
case OMX_IndexConfigVideoIntraVOPRefresh:
{
OMX_CONFIG_INTRAREFRESHVOPTYPE *intra_vop_refresh = (OMX_CONFIG_INTRAREFRESHVOPTYPE *)
configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoIntraVOPRefresh");
if (intra_vop_refresh->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (venc_set_intra_vop_refresh(intra_vop_refresh->IntraRefreshVOP) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Encode Framerate failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoFramerate");
}
break;
}
case OMX_IndexConfigCommonRotate:
{
OMX_CONFIG_ROTATIONTYPE *config_rotation =
reinterpret_cast<OMX_CONFIG_ROTATIONTYPE*>(configData);
OMX_U32 nFrameWidth;
if (!config_rotation) {
return false;
}
if (true == deinterlace_enabled) {
DEBUG_PRINT_ERROR("ERROR: Rotation is not supported with deinterlacing");
return false;
}
DEBUG_PRINT_HIGH("venc_set_config: updating the new Dims");
nFrameWidth = m_sVenc_cfg.dvs_width;
m_sVenc_cfg.dvs_width = m_sVenc_cfg.dvs_height;
m_sVenc_cfg.dvs_height = nFrameWidth;
if(venc_set_vpe_rotation(config_rotation->nRotation) == false) {
DEBUG_PRINT_ERROR("ERROR: Dimension Change for Rotation failed");
return false;
}
break;
}
case OMX_IndexConfigVideoAVCIntraPeriod:
{
OMX_VIDEO_CONFIG_AVCINTRAPERIOD *avc_iperiod = (OMX_VIDEO_CONFIG_AVCINTRAPERIOD*) configData;
DEBUG_PRINT_LOW("venc_set_param: OMX_IndexConfigVideoAVCIntraPeriod");
if (venc_set_idr_period(avc_iperiod->nPFrames, avc_iperiod->nIDRPeriod)
== false) {
DEBUG_PRINT_ERROR("ERROR: Setting "
"OMX_IndexConfigVideoAVCIntraPeriod failed");
return false;
}
break;
}
case OMX_IndexConfigCommonDeinterlace:
{
OMX_VIDEO_CONFIG_DEINTERLACE *deinterlace = (OMX_VIDEO_CONFIG_DEINTERLACE *) configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigCommonDeinterlace");
if(deinterlace->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (m_sVenc_cfg.dvs_width == m_sVenc_cfg.input_height &&
m_sVenc_cfg.dvs_height == m_sVenc_cfg.input_width)
{
DEBUG_PRINT_ERROR("ERROR: Deinterlace not supported with rotation");
return false;
}
if(venc_set_deinterlace(deinterlace->nEnable) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Deinterlace failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigCommonDeinterlace");
}
break;
}
case OMX_IndexConfigVideoVp8ReferenceFrame:
{
OMX_VIDEO_VP8REFERENCEFRAMETYPE* vp8refframe = (OMX_VIDEO_VP8REFERENCEFRAMETYPE*) configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoVp8ReferenceFrame");
if ((vp8refframe->nPortIndex == (OMX_U32)PORT_INDEX_IN) &&
(vp8refframe->bUseGoldenFrame)) {
if(venc_set_useltr(0x1) == false) {
DEBUG_PRINT_ERROR("ERROR: use goldenframe failed");
return false;
}
} else if((vp8refframe->nPortIndex == (OMX_U32)PORT_INDEX_IN) &&
(vp8refframe->bGoldenFrameRefresh)) {
if(venc_set_markltr(0x1) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting goldenframe failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoVp8ReferenceFrame");
}
break;
}
case OMX_QcomIndexConfigVideoLTRUse:
{
OMX_QCOM_VIDEO_CONFIG_LTRUSE_TYPE* pParam = (OMX_QCOM_VIDEO_CONFIG_LTRUSE_TYPE*)configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_QcomIndexConfigVideoLTRUse");
if (pParam->nPortIndex == (OMX_U32)PORT_INDEX_IN) {
if (venc_set_useltr(pParam->nID) == false) {
DEBUG_PRINT_ERROR("ERROR: Use LTR failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_QcomIndexConfigVideoLTRUse");
}
break;
}
case OMX_QcomIndexConfigVideoLTRMark:
{
OMX_QCOM_VIDEO_CONFIG_LTRMARK_TYPE* pParam = (OMX_QCOM_VIDEO_CONFIG_LTRMARK_TYPE*)configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_QcomIndexConfigVideoLTRMark");
if (pParam->nPortIndex == (OMX_U32)PORT_INDEX_IN) {
if (venc_set_markltr(pParam->nID) == false) {
DEBUG_PRINT_ERROR("ERROR: Mark LTR failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_QcomIndexConfigVideoLTRMark");
}
break;
}
case OMX_QcomIndexConfigPerfLevel:
{
OMX_QCOM_VIDEO_CONFIG_PERF_LEVEL *perf =
(OMX_QCOM_VIDEO_CONFIG_PERF_LEVEL *)configData;
DEBUG_PRINT_LOW("Set perf level: %d", perf->ePerfLevel);
if (!venc_set_perf_level(perf->ePerfLevel)) {
DEBUG_PRINT_ERROR("ERROR: Failed to set perf level to %d", perf->ePerfLevel);
return false;
} else {
performance_level.perflevel = (unsigned int) perf->ePerfLevel;
}
break;
}
case OMX_QcomIndexConfigVideoVencPerfMode:
{
QOMX_EXTNINDEX_VIDEO_PERFMODE *pParam = (QOMX_EXTNINDEX_VIDEO_PERFMODE *) configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_QcomIndexConfigVideoVencPerfMode");
if (venc_set_perf_mode(pParam->nPerfMode) == false) {
DEBUG_PRINT_ERROR("Failed to set V4L2_CID_MPEG_VIDC_VIDEO_PERF_MODE");
return false;
}
break;
}
case OMX_IndexConfigPriority:
{
OMX_PARAM_U32TYPE *priority = (OMX_PARAM_U32TYPE *)configData;
DEBUG_PRINT_LOW("Set_config: priority %u",priority->nU32);
if (!venc_set_session_priority(priority->nU32)) {
DEBUG_PRINT_ERROR("Failed to set priority");
return false;
}
break;
}
case OMX_IndexConfigOperatingRate:
{
OMX_PARAM_U32TYPE *rate = (OMX_PARAM_U32TYPE *)configData;
DEBUG_PRINT_LOW("Set_config: operating rate %d", rate->nU32);
if (!venc_set_operatingrate(rate->nU32)) {
DEBUG_PRINT_ERROR("Failed to set operating rate");
return false;
}
break;
}
default:
DEBUG_PRINT_ERROR("Unsupported config index = %u", index);
break;
}
return true;
}
|
bool venc_dev::venc_set_config(void *configData, OMX_INDEXTYPE index)
{
DEBUG_PRINT_LOW("Inside venc_set_config");
switch ((int)index) {
case OMX_IndexConfigVideoBitrate:
{
OMX_VIDEO_CONFIG_BITRATETYPE *bit_rate = (OMX_VIDEO_CONFIG_BITRATETYPE *)
configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoBitrate");
if (bit_rate->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (venc_set_target_bitrate(bit_rate->nEncodeBitrate, 1) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Target Bit rate failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoBitrate");
}
break;
}
case OMX_IndexConfigVideoFramerate:
{
OMX_CONFIG_FRAMERATETYPE *frame_rate = (OMX_CONFIG_FRAMERATETYPE *)
configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoFramerate");
if (frame_rate->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (venc_set_encode_framerate(frame_rate->xEncodeFramerate, 1) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Encode Framerate failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoFramerate");
}
break;
}
case QOMX_IndexConfigVideoIntraperiod:
{
DEBUG_PRINT_LOW("venc_set_param:QOMX_IndexConfigVideoIntraperiod");
QOMX_VIDEO_INTRAPERIODTYPE *intraperiod =
(QOMX_VIDEO_INTRAPERIODTYPE *)configData;
if (intraperiod->nPortIndex == (OMX_U32) PORT_INDEX_OUT) {
if (venc_set_intra_period(intraperiod->nPFrames, intraperiod->nBFrames) == false) {
DEBUG_PRINT_ERROR("ERROR: Request for setting intra period failed");
return false;
}
}
break;
}
case OMX_IndexConfigVideoIntraVOPRefresh:
{
OMX_CONFIG_INTRAREFRESHVOPTYPE *intra_vop_refresh = (OMX_CONFIG_INTRAREFRESHVOPTYPE *)
configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoIntraVOPRefresh");
if (intra_vop_refresh->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (venc_set_intra_vop_refresh(intra_vop_refresh->IntraRefreshVOP) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Encode Framerate failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoFramerate");
}
break;
}
case OMX_IndexConfigCommonRotate:
{
OMX_CONFIG_ROTATIONTYPE *config_rotation =
reinterpret_cast<OMX_CONFIG_ROTATIONTYPE*>(configData);
OMX_U32 nFrameWidth;
if (!config_rotation) {
return false;
}
if (true == deinterlace_enabled) {
DEBUG_PRINT_ERROR("ERROR: Rotation is not supported with deinterlacing");
return false;
}
DEBUG_PRINT_HIGH("venc_set_config: updating the new Dims");
nFrameWidth = m_sVenc_cfg.dvs_width;
m_sVenc_cfg.dvs_width = m_sVenc_cfg.dvs_height;
m_sVenc_cfg.dvs_height = nFrameWidth;
if(venc_set_vpe_rotation(config_rotation->nRotation) == false) {
DEBUG_PRINT_ERROR("ERROR: Dimension Change for Rotation failed");
return false;
}
break;
}
case OMX_IndexConfigVideoAVCIntraPeriod:
{
OMX_VIDEO_CONFIG_AVCINTRAPERIOD *avc_iperiod = (OMX_VIDEO_CONFIG_AVCINTRAPERIOD*) configData;
DEBUG_PRINT_LOW("venc_set_param: OMX_IndexConfigVideoAVCIntraPeriod");
if (venc_set_idr_period(avc_iperiod->nPFrames, avc_iperiod->nIDRPeriod)
== false) {
DEBUG_PRINT_ERROR("ERROR: Setting "
"OMX_IndexConfigVideoAVCIntraPeriod failed");
return false;
}
break;
}
case OMX_IndexConfigCommonDeinterlace:
{
OMX_VIDEO_CONFIG_DEINTERLACE *deinterlace = (OMX_VIDEO_CONFIG_DEINTERLACE *) configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigCommonDeinterlace");
if(deinterlace->nPortIndex == (OMX_U32)PORT_INDEX_OUT) {
if (m_sVenc_cfg.dvs_width == m_sVenc_cfg.input_height &&
m_sVenc_cfg.dvs_height == m_sVenc_cfg.input_width)
{
DEBUG_PRINT_ERROR("ERROR: Deinterlace not supported with rotation");
return false;
}
if(venc_set_deinterlace(deinterlace->nEnable) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting Deinterlace failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigCommonDeinterlace");
}
break;
}
case OMX_IndexConfigVideoVp8ReferenceFrame:
{
OMX_VIDEO_VP8REFERENCEFRAMETYPE* vp8refframe = (OMX_VIDEO_VP8REFERENCEFRAMETYPE*) configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_IndexConfigVideoVp8ReferenceFrame");
if ((vp8refframe->nPortIndex == (OMX_U32)PORT_INDEX_IN) &&
(vp8refframe->bUseGoldenFrame)) {
if(venc_set_useltr(0x1) == false) {
DEBUG_PRINT_ERROR("ERROR: use goldenframe failed");
return false;
}
} else if((vp8refframe->nPortIndex == (OMX_U32)PORT_INDEX_IN) &&
(vp8refframe->bGoldenFrameRefresh)) {
if(venc_set_markltr(0x1) == false) {
DEBUG_PRINT_ERROR("ERROR: Setting goldenframe failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_IndexConfigVideoVp8ReferenceFrame");
}
break;
}
case OMX_QcomIndexConfigVideoLTRUse:
{
OMX_QCOM_VIDEO_CONFIG_LTRUSE_TYPE* pParam = (OMX_QCOM_VIDEO_CONFIG_LTRUSE_TYPE*)configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_QcomIndexConfigVideoLTRUse");
if (pParam->nPortIndex == (OMX_U32)PORT_INDEX_IN) {
if (venc_set_useltr(pParam->nID) == false) {
DEBUG_PRINT_ERROR("ERROR: Use LTR failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_QcomIndexConfigVideoLTRUse");
}
break;
}
case OMX_QcomIndexConfigVideoLTRMark:
{
OMX_QCOM_VIDEO_CONFIG_LTRMARK_TYPE* pParam = (OMX_QCOM_VIDEO_CONFIG_LTRMARK_TYPE*)configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_QcomIndexConfigVideoLTRMark");
if (pParam->nPortIndex == (OMX_U32)PORT_INDEX_IN) {
if (venc_set_markltr(pParam->nID) == false) {
DEBUG_PRINT_ERROR("ERROR: Mark LTR failed");
return false;
}
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid Port Index for OMX_QcomIndexConfigVideoLTRMark");
}
break;
}
case OMX_QcomIndexConfigPerfLevel:
{
OMX_QCOM_VIDEO_CONFIG_PERF_LEVEL *perf =
(OMX_QCOM_VIDEO_CONFIG_PERF_LEVEL *)configData;
DEBUG_PRINT_LOW("Set perf level: %d", perf->ePerfLevel);
if (!venc_set_perf_level(perf->ePerfLevel)) {
DEBUG_PRINT_ERROR("ERROR: Failed to set perf level to %d", perf->ePerfLevel);
return false;
} else {
performance_level.perflevel = (unsigned int) perf->ePerfLevel;
}
break;
}
case OMX_QcomIndexConfigVideoVencPerfMode:
{
QOMX_EXTNINDEX_VIDEO_PERFMODE *pParam = (QOMX_EXTNINDEX_VIDEO_PERFMODE *) configData;
DEBUG_PRINT_LOW("venc_set_config: OMX_QcomIndexConfigVideoVencPerfMode");
if (venc_set_perf_mode(pParam->nPerfMode) == false) {
DEBUG_PRINT_ERROR("Failed to set V4L2_CID_MPEG_VIDC_VIDEO_PERF_MODE");
return false;
}
break;
}
case OMX_IndexConfigPriority:
{
OMX_PARAM_U32TYPE *priority = (OMX_PARAM_U32TYPE *)configData;
DEBUG_PRINT_LOW("Set_config: priority %u",priority->nU32);
if (!venc_set_session_priority(priority->nU32)) {
DEBUG_PRINT_ERROR("Failed to set priority");
return false;
}
break;
}
case OMX_IndexConfigOperatingRate:
{
OMX_PARAM_U32TYPE *rate = (OMX_PARAM_U32TYPE *)configData;
DEBUG_PRINT_LOW("Set_config: operating rate %d", rate->nU32);
if (!venc_set_operatingrate(rate->nU32)) {
DEBUG_PRINT_ERROR("Failed to set operating rate");
return false;
}
break;
}
default:
DEBUG_PRINT_ERROR("Unsupported config index = %u", index);
break;
}
return true;
}
|
C
|
Android
| 0 |
CVE-2019-12951
|
https://www.cvedetails.com/cve/CVE-2019-12951/
|
CWE-119
|
https://github.com/cesanta/mongoose/commit/b3e0f780c34cea88f057a62213c012aa88fe2deb
|
b3e0f780c34cea88f057a62213c012aa88fe2deb
|
Fix heap-based overflow in parse_mqtt
PUBLISHED_FROM=3306592896298597fff5269634df0c1a1555113b
|
MG_INTERNAL void altbuf_reset(struct altbuf *ab) {
mbuf_free(&ab->m);
ab->len = 0;
}
|
MG_INTERNAL void altbuf_reset(struct altbuf *ab) {
mbuf_free(&ab->m);
ab->len = 0;
}
|
C
|
mongoose
| 0 |
CVE-2014-1743
|
https://www.cvedetails.com/cve/CVE-2014-1743/
|
CWE-399
|
https://github.com/chromium/chromium/commit/6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
sync compositor: pass simple gfx types by const ref
See bug for reasoning
BUG=159273
Review URL: https://codereview.chromium.org/1417893006
Cr-Commit-Position: refs/heads/master@{#356653}
|
explicit BrowserViewRendererUserData(BrowserViewRenderer* ptr) : bvr_(ptr) {}
|
explicit BrowserViewRendererUserData(BrowserViewRenderer* ptr) : bvr_(ptr) {}
|
C
|
Chrome
| 0 |
CVE-2013-2902
|
https://www.cvedetails.com/cve/CVE-2013-2902/
|
CWE-399
|
https://github.com/chromium/chromium/commit/87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
|
87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
|
Removed pinch viewport scroll offset distribution
The associated change in Blink makes the pinch viewport a proper
ScrollableArea meaning the normal path for synchronizing layer scroll
offsets is used.
This is a 2 sided patch, the other CL:
https://codereview.chromium.org/199253002/
BUG=349941
Review URL: https://codereview.chromium.org/210543002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260105 0039d316-1c4b-4281-b951-d872f2087c98
|
void Layer::SetBounds(const gfx::Size& size) {
DCHECK(IsPropertyChangeAllowed());
if (bounds() == size)
return;
bounds_ = size;
SetNeedsCommit();
}
|
void Layer::SetBounds(const gfx::Size& size) {
DCHECK(IsPropertyChangeAllowed());
if (bounds() == size)
return;
bounds_ = size;
SetNeedsCommit();
}
|
C
|
Chrome
| 0 |
CVE-2016-1691
|
https://www.cvedetails.com/cve/CVE-2016-1691/
|
CWE-119
|
https://github.com/chromium/chromium/commit/e3aa8a56706c4abe208934d5c294f7b594b8b693
|
e3aa8a56706c4abe208934d5c294f7b594b8b693
|
Enforce the WebUsbAllowDevicesForUrls policy
This change modifies UsbChooserContext to use the UsbAllowDevicesForUrls
class to consider devices allowed by the WebUsbAllowDevicesForUrls
policy. The WebUsbAllowDevicesForUrls policy overrides the other WebUSB
policies. Unit tests are also added to ensure that the policy is being
enforced correctly.
The design document for this feature is found at:
https://docs.google.com/document/d/1MPvsrWiVD_jAC8ELyk8njFpy6j1thfVU5aWT3TCWE8w
Bug: 854329
Change-Id: I5f82e662ca9dc544da5918eae766b5535a31296b
Reviewed-on: https://chromium-review.googlesource.com/c/1259289
Commit-Queue: Ovidio Henriquez <[email protected]>
Reviewed-by: Reilly Grant <[email protected]>
Reviewed-by: Julian Pastarmov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#597926}
|
void CheckURLIsBlockedInWebContents(content::WebContents* web_contents,
const GURL& url) {
EXPECT_EQ(url, web_contents->GetURL());
base::string16 blocked_page_title;
if (url.has_host()) {
blocked_page_title = base::UTF8ToUTF16(url.host());
} else {
blocked_page_title = base::UTF8ToUTF16(url.spec());
}
EXPECT_EQ(blocked_page_title, web_contents->GetTitle());
bool result = false;
EXPECT_TRUE(content::ExecuteScriptAndExtractBool(
web_contents,
"var textContent = document.body.textContent;"
"var hasError = textContent.indexOf('ERR_BLOCKED_BY_ADMINISTRATOR') >= 0;"
"domAutomationController.send(hasError);",
&result));
EXPECT_TRUE(result);
}
|
void CheckURLIsBlockedInWebContents(content::WebContents* web_contents,
const GURL& url) {
EXPECT_EQ(url, web_contents->GetURL());
base::string16 blocked_page_title;
if (url.has_host()) {
blocked_page_title = base::UTF8ToUTF16(url.host());
} else {
blocked_page_title = base::UTF8ToUTF16(url.spec());
}
EXPECT_EQ(blocked_page_title, web_contents->GetTitle());
bool result = false;
EXPECT_TRUE(content::ExecuteScriptAndExtractBool(
web_contents,
"var textContent = document.body.textContent;"
"var hasError = textContent.indexOf('ERR_BLOCKED_BY_ADMINISTRATOR') >= 0;"
"domAutomationController.send(hasError);",
&result));
EXPECT_TRUE(result);
}
|
C
|
Chrome
| 0 |
CVE-2019-5755
|
https://www.cvedetails.com/cve/CVE-2019-5755/
|
CWE-189
|
https://github.com/chromium/chromium/commit/971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
|
void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!SessionExists(session_id))
return;
DCHECK_EQ(primary_session_id_, session_id);
if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
delegate_listener->OnAudioStart(session_id);
if (SpeechRecognitionEventListener* listener = GetListener(session_id))
listener->OnAudioStart(session_id);
}
|
void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!SessionExists(session_id))
return;
DCHECK_EQ(primary_session_id_, session_id);
if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
delegate_listener->OnAudioStart(session_id);
if (SpeechRecognitionEventListener* listener = GetListener(session_id))
listener->OnAudioStart(session_id);
}
|
C
|
Chrome
| 0 |
CVE-2013-2017
|
https://www.cvedetails.com/cve/CVE-2013-2017/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6ec82562ffc6f297d0de36d65776cff8e5704867
|
6ec82562ffc6f297d0de36d65776cff8e5704867
|
veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
if (net_ratelimit()) {
WARN(1, "%s selects TX queue %d, but "
"real number of TX queues is %d\n",
dev->name, queue_index,
dev->real_num_tx_queues);
}
return 0;
}
return queue_index;
}
|
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
if (net_ratelimit()) {
WARN(1, "%s selects TX queue %d, but "
"real number of TX queues is %d\n",
dev->name, queue_index,
dev->real_num_tx_queues);
}
return 0;
}
return queue_index;
}
|
C
|
linux
| 0 |
CVE-2016-1613
|
https://www.cvedetails.com/cve/CVE-2016-1613/
| null |
https://github.com/chromium/chromium/commit/7394cf6f43d7a86630d3eb1c728fd63c621b5530
|
7394cf6f43d7a86630d3eb1c728fd63c621b5530
|
Connect the LocalDB to TabManager.
Bug: 773382
Change-Id: Iec8fe5226ee175105d51f300f30b4865478ac099
Reviewed-on: https://chromium-review.googlesource.com/1118611
Commit-Queue: Sébastien Marchand <[email protected]>
Reviewed-by: François Doray <[email protected]>
Cr-Commit-Position: refs/heads/master@{#572871}
|
base::ProcessHandle TestLifecycleUnit::GetProcessHandle() const {
return process_handle_;
}
|
base::ProcessHandle TestLifecycleUnit::GetProcessHandle() const {
return process_handle_;
}
|
C
|
Chrome
| 0 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
static int jpc_cod_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_cod_t *cod = &ms->parms.cod;
assert(cod->numlyrs > 0 && cod->compparms.numdlvls <= 32);
assert(cod->compparms.numdlvls == cod->compparms.numrlvls - 1);
if (jpc_putuint8(out, cod->compparms.csty) ||
jpc_putuint8(out, cod->prg) ||
jpc_putuint16(out, cod->numlyrs) ||
jpc_putuint8(out, cod->mctrans)) {
return -1;
}
if (jpc_cox_putcompparms(ms, cstate, out,
(cod->csty & JPC_COX_PRT) != 0, &cod->compparms)) {
return -1;
}
return 0;
}
|
static int jpc_cod_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_cod_t *cod = &ms->parms.cod;
assert(cod->numlyrs > 0 && cod->compparms.numdlvls <= 32);
assert(cod->compparms.numdlvls == cod->compparms.numrlvls - 1);
if (jpc_putuint8(out, cod->compparms.csty) ||
jpc_putuint8(out, cod->prg) ||
jpc_putuint16(out, cod->numlyrs) ||
jpc_putuint8(out, cod->mctrans)) {
return -1;
}
if (jpc_cox_putcompparms(ms, cstate, out,
(cod->csty & JPC_COX_PRT) != 0, &cod->compparms)) {
return -1;
}
return 0;
}
|
C
|
jasper
| 0 |
CVE-2015-3215
|
https://www.cvedetails.com/cve/CVE-2015-3215/
|
CWE-20
|
https://github.com/YanVugenfirer/kvm-guest-drivers-windows/commit/fbfa4d1083ea84c5429992ca3e996d7d4fbc8238
|
fbfa4d1083ea84c5429992ca3e996d7d4fbc8238
|
NetKVM: BZ#1169718: More rigoruous testing of incoming packet
Signed-off-by: Joseph Hindin <[email protected]>
|
static VOID ParaNdis_DeviceFiltersUpdateAddresses(PARANDIS_ADAPTER *pContext)
{
u32 u32UniCastEntries = 0;
pContext->CXPath.SendControlMessage(VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET,
&u32UniCastEntries,
sizeof(u32UniCastEntries),
&pContext->MulticastData,
sizeof(pContext->MulticastData.nofMulticastEntries) + pContext->MulticastData.nofMulticastEntries * ETH_LENGTH_OF_ADDRESS,
2);
}
|
static VOID ParaNdis_DeviceFiltersUpdateAddresses(PARANDIS_ADAPTER *pContext)
{
u32 u32UniCastEntries = 0;
pContext->CXPath.SendControlMessage(VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET,
&u32UniCastEntries,
sizeof(u32UniCastEntries),
&pContext->MulticastData,
sizeof(pContext->MulticastData.nofMulticastEntries) + pContext->MulticastData.nofMulticastEntries * ETH_LENGTH_OF_ADDRESS,
2);
}
|
C
|
kvm-guest-drivers-windows
| 0 |
CVE-2017-5077
|
https://www.cvedetails.com/cve/CVE-2017-5077/
|
CWE-125
|
https://github.com/chromium/chromium/commit/fec26ff33bf372476a70326f3669a35f34a9d474
|
fec26ff33bf372476a70326f3669a35f34a9d474
|
Origins should be represented as url::Origin (not as GURL).
As pointed out in //docs/security/origin-vs-url.md, origins should be
represented as url::Origin (not as GURL). This CL applies this
guideline to predictor-related code and changes the type of the
following fields from GURL to url::Origin:
- OriginRequestSummary::origin
- PreconnectedRequestStats::origin
- PreconnectRequest::origin
The old code did not depend on any non-origin parts of GURL
(like path and/or query). Therefore, this CL has no intended
behavior change.
Bug: 973885
Change-Id: Idd14590b4834cb9d50c74ed747b595fe1a4ba357
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1895167
Commit-Queue: Łukasz Anforowicz <[email protected]>
Reviewed-by: Alex Ilin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#716311}
|
bool ProxyFound(const GURL& url) {
return base::Contains(successful_proxy_lookups_, url.GetOrigin());
}
|
bool ProxyFound(const GURL& url) {
return base::Contains(successful_proxy_lookups_, url.GetOrigin());
}
|
C
|
Chrome
| 0 |
CVE-2018-20685
|
https://www.cvedetails.com/cve/CVE-2018-20685/
|
CWE-706
|
https://github.com/openssh/openssh-portable/commit/6010c0303a422a9c5fa8860c061bf7105eb7f8b2
|
6010c0303a422a9c5fa8860c061bf7105eb7f8b2
|
upstream: disallow empty incoming filename or ones that refer to the
current directory; based on report/patch from Harry Sintonen
OpenBSD-Commit-ID: f27651b30eaee2df49540ab68d030865c04f6de9
|
lostconn(int signo)
{
if (!iamremote)
(void)write(STDERR_FILENO, "lost connection\n", 16);
if (signo)
_exit(1);
else
exit(1);
}
|
lostconn(int signo)
{
if (!iamremote)
(void)write(STDERR_FILENO, "lost connection\n", 16);
if (signo)
_exit(1);
else
exit(1);
}
|
C
|
openssh-portable
| 0 |
CVE-2016-4809
|
https://www.cvedetails.com/cve/CVE-2016-4809/
|
CWE-20
|
https://github.com/libarchive/libarchive/commit/fd7e0c02
|
fd7e0c02
|
Reject cpio symlinks that exceed 1MB
|
atol16(const char *p, unsigned char_cnt)
{
int64_t l;
int digit;
l = 0;
while (char_cnt-- > 0) {
if (*p >= 'a' && *p <= 'f')
digit = *p - 'a' + 10;
else if (*p >= 'A' && *p <= 'F')
digit = *p - 'A' + 10;
else if (*p >= '0' && *p <= '9')
digit = *p - '0';
else
return (l);
p++;
l <<= 4;
l |= digit;
}
return (l);
}
|
atol16(const char *p, unsigned char_cnt)
{
int64_t l;
int digit;
l = 0;
while (char_cnt-- > 0) {
if (*p >= 'a' && *p <= 'f')
digit = *p - 'a' + 10;
else if (*p >= 'A' && *p <= 'F')
digit = *p - 'A' + 10;
else if (*p >= '0' && *p <= '9')
digit = *p - '0';
else
return (l);
p++;
l <<= 4;
l |= digit;
}
return (l);
}
|
C
|
libarchive
| 0 |
CVE-2014-3515
|
https://www.cvedetails.com/cve/CVE-2014-3515/
| null |
https://git.php.net/?p=php-src.git;a=commit;h=88223c5245e9b470e1e6362bfd96829562ffe6ab
|
88223c5245e9b470e1e6362bfd96829562ffe6ab
| null |
static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */
if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) {
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
return intern->std.properties;
} else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) {
spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC);
return spl_array_get_hash_table(other, check_std_props TSRMLS_CC);
} else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) {
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
return intern->std.properties;
} else {
return HASH_OF(intern->array);
}
} /* }}} */
|
static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */
if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) {
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
return intern->std.properties;
} else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) {
spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC);
return spl_array_get_hash_table(other, check_std_props TSRMLS_CC);
} else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) {
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
return intern->std.properties;
} else {
return HASH_OF(intern->array);
}
} /* }}} */
|
C
|
php
| 0 |
CVE-2015-1300
|
https://www.cvedetails.com/cve/CVE-2015-1300/
|
CWE-254
|
https://github.com/chromium/chromium/commit/9c391ac04f9ac478c8b0e43b359c2b43a6c892ab
|
9c391ac04f9ac478c8b0e43b359c2b43a6c892ab
|
Use pdf compositor service for printing when OOPIF is enabled
When OOPIF is enabled (by site-per-process flag or
top-document-isolation feature), use the pdf compositor service for
converting PaintRecord to PDF on renderers.
In the future, this will make compositing PDF from multiple renderers
possible.
[email protected]
BUG=455764
Change-Id: I3c28f03f4358e4228239fe1a33384f85e7716e8f
Reviewed-on: https://chromium-review.googlesource.com/699765
Commit-Queue: Wei Li <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#511616}
|
int HeadlessWebContentsImpl::GetMainFrameTreeNodeId() const {
return web_contents()->GetMainFrame()->GetFrameTreeNodeId();
}
|
int HeadlessWebContentsImpl::GetMainFrameTreeNodeId() const {
return web_contents()->GetMainFrame()->GetFrameTreeNodeId();
}
|
C
|
Chrome
| 0 |
CVE-2017-17857
|
https://www.cvedetails.com/cve/CVE-2017-17857/
|
CWE-119
|
https://github.com/torvalds/linux/commit/ea25f914dc164c8d56b36147ecc86bc65f83c469
|
ea25f914dc164c8d56b36147ecc86bc65f83c469
|
bpf: fix missing error return in check_stack_boundary()
Prevent indirect stack accesses at non-constant addresses, which would
permit reading and corrupting spilled pointers.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
|
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
memcpy(insn + i, &nop, sizeof(nop));
}
}
|
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
memcpy(insn + i, &nop, sizeof(nop));
}
}
|
C
|
linux
| 0 |
CVE-2019-5824
|
https://www.cvedetails.com/cve/CVE-2019-5824/
|
CWE-119
|
https://github.com/chromium/chromium/commit/cfb022640b5eec337b06f88a485487dc92ca1ac1
|
cfb022640b5eec337b06f88a485487dc92ca1ac1
|
[MediaStream] Pass request ID parameters in the right order for OpenDevice()
Prior to this CL, requester_id and page_request_id parameters were
passed in incorrect order from MediaStreamDispatcherHost to
MediaStreamManager for the OpenDevice() operation, which could lead to
errors.
Bug: 948564
Change-Id: Iadcf3fe26adaac50564102138ce212269cf32d62
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1569113
Reviewed-by: Marina Ciocea <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#651255}
|
void MediaStreamDispatcherHost::SetCapturingLinkSecured(
int32_t session_id,
blink::MediaStreamType type,
bool is_secure) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
media_stream_manager_->SetCapturingLinkSecured(render_process_id_, session_id,
type, is_secure);
}
|
void MediaStreamDispatcherHost::SetCapturingLinkSecured(
int32_t session_id,
blink::MediaStreamType type,
bool is_secure) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
media_stream_manager_->SetCapturingLinkSecured(render_process_id_, session_id,
type, is_secure);
}
|
C
|
Chrome
| 0 |
CVE-2013-2885
|
https://www.cvedetails.com/cve/CVE-2013-2885/
|
CWE-399
|
https://github.com/chromium/chromium/commit/79cfdeb5fbe79fa2604d37fba467f371cb436bc3
|
79cfdeb5fbe79fa2604d37fba467f371cb436bc3
|
Fix reentrance of BaseMultipleFieldsDateAndTimeInputType::destroyShadowSubtree.
destroyShadowSubtree could dispatch 'blur' event unexpectedly because
element()->focused() had incorrect information. We make sure it has
correct information by checking if the UA shadow root contains the
focused element.
BUG=257353
Review URL: https://chromiumcodereview.appspot.com/19067004
git-svn-id: svn://svn.chromium.org/blink/trunk@154086 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
bool BaseMultipleFieldsDateAndTimeInputType::hasBadInput() const
{
DateTimeEditElement* edit = dateTimeEditElement();
return element()->value().isEmpty() && edit && edit->anyEditableFieldsHaveValues();
}
|
bool BaseMultipleFieldsDateAndTimeInputType::hasBadInput() const
{
DateTimeEditElement* edit = dateTimeEditElement();
return element()->value().isEmpty() && edit && edit->anyEditableFieldsHaveValues();
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/27c68f543e5eba779902447445dfb05ec3f5bf75
|
27c68f543e5eba779902447445dfb05ec3f5bf75
|
Revert of Add accelerated VP9 decode infrastructure and an implementation for VA-API. (patchset #7 id:260001 of https://codereview.chromium.org/1318863003/ )
Reason for revert:
I think this patch broke compile step for Chromium Linux ChromeOS MSan Builder.
First failing build:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder/builds/8310
All recent builds:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder?numbuilds=200
Sorry for the revert. I'll re-revert if I'm wrong.
Cheers,
Tommy
Original issue's description:
> Add accelerated VP9 decode infrastructure and an implementation for VA-API.
>
> - Add a hardware/platform-independent VP9Decoder class and related
> infrastructure, implementing AcceleratedVideoDecoder interface. VP9Decoder
> performs the initial stages of the decode process, which are to be done
> on host/in software, such as stream parsing and reference frame management.
>
> - Add a VP9Accelerator interface, used by the VP9Decoder to offload the
> remaining stages of the decode process to hardware. VP9Accelerator
> implementations are platform-specific.
>
> - Add the first implementation of VP9Accelerator - VaapiVP9Accelerator - and
> integrate it with VaapiVideoDecodeAccelerator, for devices which provide
> hardware VP9 acceleration through VA-API. Hook it up to the new
> infrastructure and VP9Decoder.
>
> - Extend Vp9Parser to provide functionality required by VP9Decoder and
> VP9Accelerator, including superframe parsing, handling of loop filter
> and segmentation initialization, state persistence across frames and
> resetting when needed. Also add code calculating segmentation dequants
> and loop filter levels.
>
> - Update vp9_parser_unittest to the new Vp9Parser interface and flow.
>
> TEST=vp9_parser_unittest,vda_unittest,Chrome VP9 playback
> BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
> [email protected]
>
> Committed: https://crrev.com/e3cc0a661b8abfdc74f569940949bc1f336ece40
> Cr-Commit-Position: refs/heads/master@{#349312}
[email protected],[email protected],[email protected],[email protected],[email protected]
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
Review URL: https://codereview.chromium.org/1357513002
Cr-Commit-Position: refs/heads/master@{#349443}
|
static void DestroyVAImage(VADisplay va_display, VAImage image) {
if (image.image_id != VA_INVALID_ID)
vaDestroyImage(va_display, image.image_id);
}
|
static void DestroyVAImage(VADisplay va_display, VAImage image) {
if (image.image_id != VA_INVALID_ID)
vaDestroyImage(va_display, image.image_id);
}
|
C
|
Chrome
| 0 |
CVE-2016-0835
|
https://www.cvedetails.com/cve/CVE-2016-0835/
|
CWE-119
|
https://android.googlesource.com/platform/external/libmpeg2/+/ba604d336b40fd4bde1622f64d67135bdbd61301
|
ba604d336b40fd4bde1622f64d67135bdbd61301
|
Fix for handling streams which resulted in negative num_mbs_left
Bug: 26070014
Change-Id: Id9f063a2c72a802d991b92abaf00ec687db5bb0f
|
IMPEG2D_ERROR_CODES_T impeg2d_process_video_header(dec_state_t *ps_dec)
{
stream_t *ps_stream;
ps_stream = &ps_dec->s_bit_stream;
IMPEG2D_ERROR_CODES_T e_error;
impeg2d_next_code(ps_dec, SEQUENCE_HEADER_CODE);
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
e_error = impeg2d_dec_seq_hdr(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
return e_error;
}
}
else
{
return IMPEG2D_BITSTREAM_BUFF_EXCEEDED_ERR;
}
if (impeg2d_bit_stream_nxt(ps_stream,START_CODE_LEN) == EXTENSION_START_CODE)
{
/* MPEG2 Decoder */
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
e_error = impeg2d_dec_seq_ext(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
return e_error;
}
}
else
{
return IMPEG2D_BITSTREAM_BUFF_EXCEEDED_ERR;
}
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
e_error = impeg2d_dec_seq_ext_data(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
return e_error;
}
}
return impeg2d_init_video_state(ps_dec,MPEG_2_VIDEO);
}
else
{
/* MPEG1 Decoder */
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
impeg2d_flush_ext_and_user_data(ps_dec);
}
return impeg2d_init_video_state(ps_dec,MPEG_1_VIDEO);
}
}
|
IMPEG2D_ERROR_CODES_T impeg2d_process_video_header(dec_state_t *ps_dec)
{
stream_t *ps_stream;
ps_stream = &ps_dec->s_bit_stream;
IMPEG2D_ERROR_CODES_T e_error;
impeg2d_next_code(ps_dec, SEQUENCE_HEADER_CODE);
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
e_error = impeg2d_dec_seq_hdr(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
return e_error;
}
}
else
{
return IMPEG2D_BITSTREAM_BUFF_EXCEEDED_ERR;
}
if (impeg2d_bit_stream_nxt(ps_stream,START_CODE_LEN) == EXTENSION_START_CODE)
{
/* MPEG2 Decoder */
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
e_error = impeg2d_dec_seq_ext(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
return e_error;
}
}
else
{
return IMPEG2D_BITSTREAM_BUFF_EXCEEDED_ERR;
}
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
e_error = impeg2d_dec_seq_ext_data(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
return e_error;
}
}
return impeg2d_init_video_state(ps_dec,MPEG_2_VIDEO);
}
else
{
/* MPEG1 Decoder */
if(ps_dec->s_bit_stream.u4_offset < ps_dec->s_bit_stream.u4_max_offset)
{
impeg2d_flush_ext_and_user_data(ps_dec);
}
return impeg2d_init_video_state(ps_dec,MPEG_1_VIDEO);
}
}
|
C
|
Android
| 0 |
CVE-2015-3212
|
https://www.cvedetails.com/cve/CVE-2015-3212/
|
CWE-362
|
https://github.com/torvalds/linux/commit/2d45a02d0166caf2627fe91897c6ffc3b19514c4
|
2d45a02d0166caf2627fe91897c6ffc3b19514c4
|
sctp: fix ASCONF list handling
->auto_asconf_splist is per namespace and mangled by functions like
sctp_setsockopt_auto_asconf() which doesn't guarantee any serialization.
Also, the call to inet_sk_copy_descendant() was backuping
->auto_asconf_list through the copy but was not honoring
->do_auto_asconf, which could lead to list corruption if it was
different between both sockets.
This commit thus fixes the list handling by using ->addr_wq_lock
spinlock to protect the list. A special handling is done upon socket
creation and destruction for that. Error handlig on sctp_init_sock()
will never return an error after having initialized asconf, so
sctp_destroy_sock() can be called without addrq_wq_lock. The lock now
will be take on sctp_close_sock(), before locking the socket, so we
don't do it in inverse order compared to sctp_addr_wq_timeout_handler().
Instead of taking the lock on sctp_sock_migrate() for copying and
restoring the list values, it's preferred to avoid rewritting it by
implementing sctp_copy_descendant().
Issue was found with a test application that kept flipping sysctl
default_auto_asconf on and off, but one could trigger it by issuing
simultaneous setsockopt() calls on multiple sockets or by
creating/destroying sockets fast enough. This is only triggerable
locally.
Fixes: 9f7d653b67ae ("sctp: Add Auto-ASCONF support (core).")
Reported-by: Ji Jianwen <[email protected]>
Suggested-by: Neil Horman <[email protected]>
Suggested-by: Hannes Frederic Sowa <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void sctp_wfree(struct sk_buff *skb)
{
struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
/*
* This undoes what is done via sctp_set_owner_w and sk_mem_charge
*/
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
sock_wfree(skb);
sctp_wake_up_waiters(sk, asoc);
sctp_association_put(asoc);
}
|
static void sctp_wfree(struct sk_buff *skb)
{
struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
/*
* This undoes what is done via sctp_set_owner_w and sk_mem_charge
*/
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
sock_wfree(skb);
sctp_wake_up_waiters(sk, asoc);
sctp_association_put(asoc);
}
|
C
|
linux
| 0 |
CVE-2018-6111
|
https://www.cvedetails.com/cve/CVE-2018-6111/
|
CWE-20
|
https://github.com/chromium/chromium/commit/3c8e4852477d5b1e2da877808c998dc57db9460f
|
3c8e4852477d5b1e2da877808c998dc57db9460f
|
DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#531157}
|
Response ServiceWorkerHandler::Enable() {
if (enabled_)
return Response::OK();
if (!context_)
return CreateContextErrorResponse();
enabled_ = true;
context_watcher_ = new ServiceWorkerContextWatcher(
context_, base::Bind(&ServiceWorkerHandler::OnWorkerRegistrationUpdated,
weak_factory_.GetWeakPtr()),
base::Bind(&ServiceWorkerHandler::OnWorkerVersionUpdated,
weak_factory_.GetWeakPtr()),
base::Bind(&ServiceWorkerHandler::OnErrorReported,
weak_factory_.GetWeakPtr()));
context_watcher_->Start();
return Response::OK();
}
|
Response ServiceWorkerHandler::Enable() {
if (enabled_)
return Response::OK();
if (!context_)
return CreateContextErrorResponse();
enabled_ = true;
context_watcher_ = new ServiceWorkerContextWatcher(
context_, base::Bind(&ServiceWorkerHandler::OnWorkerRegistrationUpdated,
weak_factory_.GetWeakPtr()),
base::Bind(&ServiceWorkerHandler::OnWorkerVersionUpdated,
weak_factory_.GetWeakPtr()),
base::Bind(&ServiceWorkerHandler::OnErrorReported,
weak_factory_.GetWeakPtr()));
context_watcher_->Start();
return Response::OK();
}
|
C
|
Chrome
| 0 |
CVE-2018-6117
|
https://www.cvedetails.com/cve/CVE-2018-6117/
|
CWE-200
|
https://github.com/chromium/chromium/commit/52f6eb4221430b6248fd5a59bec53bfef9fdd9a7
|
52f6eb4221430b6248fd5a59bec53bfef9fdd9a7
|
[md-settings] Clarify Password Saving and Autofill Toggles
This change clarifies the wording around the password saving and
autofill toggles.
Bug: 822465
Cq-Include-Trybots: master.tryserver.chromium.linux:closure_compilation
Change-Id: I91b31fe61cd0754239f7908e8c04c7e69b72f670
Reviewed-on: https://chromium-review.googlesource.com/970541
Commit-Queue: Jan Wilken Dörrie <[email protected]>
Reviewed-by: Vaclav Brozek <[email protected]>
Reviewed-by: Jochen Eisinger <[email protected]>
Cr-Commit-Position: refs/heads/master@{#544661}
|
void AddInternetStrings(content::WebUIDataSource* html_source) {
LocalizedString localized_strings[] = {
{"internetAddConnection", IDS_SETTINGS_INTERNET_ADD_CONNECTION},
{"internetAddConnectionExpandA11yLabel",
IDS_SETTINGS_INTERNET_ADD_CONNECTION_EXPAND_ACCESSIBILITY_LABEL},
{"internetAddConnectionNotAllowed",
IDS_SETTINGS_INTERNET_ADD_CONNECTION_NOT_ALLOWED},
{"internetAddThirdPartyVPN", IDS_SETTINGS_INTERNET_ADD_THIRD_PARTY_VPN},
{"internetAddVPN", IDS_SETTINGS_INTERNET_ADD_VPN},
{"internetAddArcVPN", IDS_SETTINGS_INTERNET_ADD_ARC_VPN},
{"internetAddArcVPNProvider", IDS_SETTINGS_INTERNET_ADD_ARC_VPN_PROVIDER},
{"internetAddWiFi", IDS_SETTINGS_INTERNET_ADD_WIFI},
{"internetConfigName", IDS_SETTINGS_INTERNET_CONFIG_NAME},
{"internetDetailPageTitle", IDS_SETTINGS_INTERNET_DETAIL},
{"internetDeviceEnabling", IDS_SETTINGS_INTERNET_DEVICE_ENABLING},
{"internetDeviceInitializing", IDS_SETTINGS_INTERNET_DEVICE_INITIALIZING},
{"internetJoinType", IDS_SETTINGS_INTERNET_JOIN_TYPE},
{"internetKnownNetworksPageTitle", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS},
{"internetMobileSearching", IDS_SETTINGS_INTERNET_MOBILE_SEARCH},
{"internetNoNetworks", IDS_SETTINGS_INTERNET_NO_NETWORKS},
{"internetPageTitle", IDS_SETTINGS_INTERNET},
{"internetToggleMobileA11yLabel",
IDS_SETTINGS_INTERNET_TOGGLE_MOBILE_ACCESSIBILITY_LABEL},
{"internetToggleTetherLabel", IDS_SETTINGS_INTERNET_TOGGLE_TETHER_LABEL},
{"internetToggleTetherSubtext",
IDS_SETTINGS_INTERNET_TOGGLE_TETHER_SUBTEXT},
{"internetToggleWiFiA11yLabel",
IDS_SETTINGS_INTERNET_TOGGLE_WIFI_ACCESSIBILITY_LABEL},
{"internetToggleWiMAXA11yLabel",
IDS_SETTINGS_INTERNET_TOGGLE_WIMAX_ACCESSIBILITY_LABEL},
{"knownNetworksAll", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_ALL},
{"knownNetworksButton", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_BUTTON},
{"knownNetworksMessage", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MESSAGE},
{"knownNetworksPreferred",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_PREFFERED},
{"knownNetworksMenuAddPreferred",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MENU_ADD_PREFERRED},
{"knownNetworksMenuRemovePreferred",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MENU_REMOVE_PREFERRED},
{"knownNetworksMenuForget",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MENU_FORGET},
{"networkAllowDataRoaming",
IDS_SETTINGS_SETTINGS_NETWORK_ALLOW_DATA_ROAMING},
{"networkAutoConnect", IDS_SETTINGS_INTERNET_NETWORK_AUTO_CONNECT},
{"networkButtonActivate", IDS_SETTINGS_INTERNET_BUTTON_ACTIVATE},
{"networkButtonConfigure", IDS_SETTINGS_INTERNET_BUTTON_CONFIGURE},
{"networkButtonConnect", IDS_SETTINGS_INTERNET_BUTTON_CONNECT},
{"networkButtonDisconnect", IDS_SETTINGS_INTERNET_BUTTON_DISCONNECT},
{"networkButtonForget", IDS_SETTINGS_INTERNET_BUTTON_FORGET},
{"networkButtonViewAccount", IDS_SETTINGS_INTERNET_BUTTON_VIEW_ACCOUNT},
{"networkConnectNotAllowed", IDS_SETTINGS_INTERNET_CONNECT_NOT_ALLOWED},
{"networkIPAddress", IDS_SETTINGS_INTERNET_NETWORK_IP_ADDRESS},
{"networkIPConfigAuto", IDS_SETTINGS_INTERNET_NETWORK_IP_CONFIG_AUTO},
{"networkNameserversLearnMore", IDS_LEARN_MORE},
{"networkPrefer", IDS_SETTINGS_INTERNET_NETWORK_PREFER},
{"networkPrimaryUserControlled",
IDS_SETTINGS_INTERNET_NETWORK_PRIMARY_USER_CONTROLLED},
{"networkSectionAdvanced",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_ADVANCED},
{"networkSectionAdvancedA11yLabel",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_ADVANCED_ACCESSIBILITY_LABEL},
{"networkSectionNetwork", IDS_SETTINGS_INTERNET_NETWORK_SECTION_NETWORK},
{"networkSectionNetworkExpandA11yLabel",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_NETWORK_ACCESSIBILITY_LABEL},
{"networkSectionProxy", IDS_SETTINGS_INTERNET_NETWORK_SECTION_PROXY},
{"networkSectionProxyExpandA11yLabel",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_PROXY_ACCESSIBILITY_LABEL},
{"networkShared", IDS_SETTINGS_INTERNET_NETWORK_SHARED},
{"networkVpnBuiltin", IDS_NETWORK_TYPE_VPN_BUILTIN},
{"networkOutOfRange", IDS_SETTINGS_INTERNET_WIFI_NETWORK_OUT_OF_RANGE},
{"tetherPhoneOutOfRange",
IDS_SETTINGS_INTERNET_TETHER_PHONE_OUT_OF_RANGE},
{"gmscoreNotificationsTitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_TITLE},
{"gmscoreNotificationsOneDeviceSubtitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_ONE_DEVICE_SUBTITLE},
{"gmscoreNotificationsTwoDevicesSubtitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_TWO_DEVICES_SUBTITLE},
{"gmscoreNotificationsManyDevicesSubtitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_MANY_DEVICES_SUBTITLE},
{"gmscoreNotificationsFirstStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_FIRST_STEP},
{"gmscoreNotificationsSecondStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_SECOND_STEP},
{"gmscoreNotificationsThirdStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_THIRD_STEP},
{"gmscoreNotificationsFourthStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_FOURTH_STEP},
{"tetherConnectionDialogTitle",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DIALOG_TITLE},
{"tetherConnectionAvailableDeviceTitle",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_AVAILABLE_DEVICE_TITLE},
{"tetherConnectionBatteryPercentage",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_BATTERY_PERCENTAGE},
{"tetherConnectionExplanation",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_EXPLANATION},
{"tetherConnectionCarrierWarning",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_CARRIER_WARNING},
{"tetherConnectionDescriptionTitle",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_TITLE},
{"tetherConnectionDescriptionMobileData",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_MOBILE_DATA},
{"tetherConnectionDescriptionBattery",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_BATTERY},
{"tetherConnectionDescriptionWiFi",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_WIFI},
{"tetherConnectionNotNowButton",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_NOT_NOW_BUTTON},
{"tetherConnectionConnectButton",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_CONNECT_BUTTON},
{"tetherEnableBluetooth", IDS_ASH_STATUS_TRAY_ENABLE_BLUETOOTH},
};
AddLocalizedStringsBulk(html_source, localized_strings,
arraysize(localized_strings));
html_source->AddBoolean("networkSettingsConfig",
chromeos::switches::IsNetworkSettingsConfigEnabled());
html_source->AddString("networkGoogleNameserversLearnMoreUrl",
chrome::kGoogleNameserversLearnMoreURL);
html_source->AddString(
"internetNoNetworksMobileData",
l10n_util::GetStringFUTF16(
IDS_SETTINGS_INTERNET_NO_NETWORKS_MOBILE_DATA,
GetHelpUrlWithBoard(chrome::kInstantTetheringLearnMoreURL)));
}
|
void AddInternetStrings(content::WebUIDataSource* html_source) {
LocalizedString localized_strings[] = {
{"internetAddConnection", IDS_SETTINGS_INTERNET_ADD_CONNECTION},
{"internetAddConnectionExpandA11yLabel",
IDS_SETTINGS_INTERNET_ADD_CONNECTION_EXPAND_ACCESSIBILITY_LABEL},
{"internetAddConnectionNotAllowed",
IDS_SETTINGS_INTERNET_ADD_CONNECTION_NOT_ALLOWED},
{"internetAddThirdPartyVPN", IDS_SETTINGS_INTERNET_ADD_THIRD_PARTY_VPN},
{"internetAddVPN", IDS_SETTINGS_INTERNET_ADD_VPN},
{"internetAddArcVPN", IDS_SETTINGS_INTERNET_ADD_ARC_VPN},
{"internetAddArcVPNProvider", IDS_SETTINGS_INTERNET_ADD_ARC_VPN_PROVIDER},
{"internetAddWiFi", IDS_SETTINGS_INTERNET_ADD_WIFI},
{"internetConfigName", IDS_SETTINGS_INTERNET_CONFIG_NAME},
{"internetDetailPageTitle", IDS_SETTINGS_INTERNET_DETAIL},
{"internetDeviceEnabling", IDS_SETTINGS_INTERNET_DEVICE_ENABLING},
{"internetDeviceInitializing", IDS_SETTINGS_INTERNET_DEVICE_INITIALIZING},
{"internetJoinType", IDS_SETTINGS_INTERNET_JOIN_TYPE},
{"internetKnownNetworksPageTitle", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS},
{"internetMobileSearching", IDS_SETTINGS_INTERNET_MOBILE_SEARCH},
{"internetNoNetworks", IDS_SETTINGS_INTERNET_NO_NETWORKS},
{"internetPageTitle", IDS_SETTINGS_INTERNET},
{"internetToggleMobileA11yLabel",
IDS_SETTINGS_INTERNET_TOGGLE_MOBILE_ACCESSIBILITY_LABEL},
{"internetToggleTetherLabel", IDS_SETTINGS_INTERNET_TOGGLE_TETHER_LABEL},
{"internetToggleTetherSubtext",
IDS_SETTINGS_INTERNET_TOGGLE_TETHER_SUBTEXT},
{"internetToggleWiFiA11yLabel",
IDS_SETTINGS_INTERNET_TOGGLE_WIFI_ACCESSIBILITY_LABEL},
{"internetToggleWiMAXA11yLabel",
IDS_SETTINGS_INTERNET_TOGGLE_WIMAX_ACCESSIBILITY_LABEL},
{"knownNetworksAll", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_ALL},
{"knownNetworksButton", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_BUTTON},
{"knownNetworksMessage", IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MESSAGE},
{"knownNetworksPreferred",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_PREFFERED},
{"knownNetworksMenuAddPreferred",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MENU_ADD_PREFERRED},
{"knownNetworksMenuRemovePreferred",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MENU_REMOVE_PREFERRED},
{"knownNetworksMenuForget",
IDS_SETTINGS_INTERNET_KNOWN_NETWORKS_MENU_FORGET},
{"networkAllowDataRoaming",
IDS_SETTINGS_SETTINGS_NETWORK_ALLOW_DATA_ROAMING},
{"networkAutoConnect", IDS_SETTINGS_INTERNET_NETWORK_AUTO_CONNECT},
{"networkButtonActivate", IDS_SETTINGS_INTERNET_BUTTON_ACTIVATE},
{"networkButtonConfigure", IDS_SETTINGS_INTERNET_BUTTON_CONFIGURE},
{"networkButtonConnect", IDS_SETTINGS_INTERNET_BUTTON_CONNECT},
{"networkButtonDisconnect", IDS_SETTINGS_INTERNET_BUTTON_DISCONNECT},
{"networkButtonForget", IDS_SETTINGS_INTERNET_BUTTON_FORGET},
{"networkButtonViewAccount", IDS_SETTINGS_INTERNET_BUTTON_VIEW_ACCOUNT},
{"networkConnectNotAllowed", IDS_SETTINGS_INTERNET_CONNECT_NOT_ALLOWED},
{"networkIPAddress", IDS_SETTINGS_INTERNET_NETWORK_IP_ADDRESS},
{"networkIPConfigAuto", IDS_SETTINGS_INTERNET_NETWORK_IP_CONFIG_AUTO},
{"networkNameserversLearnMore", IDS_LEARN_MORE},
{"networkPrefer", IDS_SETTINGS_INTERNET_NETWORK_PREFER},
{"networkPrimaryUserControlled",
IDS_SETTINGS_INTERNET_NETWORK_PRIMARY_USER_CONTROLLED},
{"networkSectionAdvanced",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_ADVANCED},
{"networkSectionAdvancedA11yLabel",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_ADVANCED_ACCESSIBILITY_LABEL},
{"networkSectionNetwork", IDS_SETTINGS_INTERNET_NETWORK_SECTION_NETWORK},
{"networkSectionNetworkExpandA11yLabel",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_NETWORK_ACCESSIBILITY_LABEL},
{"networkSectionProxy", IDS_SETTINGS_INTERNET_NETWORK_SECTION_PROXY},
{"networkSectionProxyExpandA11yLabel",
IDS_SETTINGS_INTERNET_NETWORK_SECTION_PROXY_ACCESSIBILITY_LABEL},
{"networkShared", IDS_SETTINGS_INTERNET_NETWORK_SHARED},
{"networkVpnBuiltin", IDS_NETWORK_TYPE_VPN_BUILTIN},
{"networkOutOfRange", IDS_SETTINGS_INTERNET_WIFI_NETWORK_OUT_OF_RANGE},
{"tetherPhoneOutOfRange",
IDS_SETTINGS_INTERNET_TETHER_PHONE_OUT_OF_RANGE},
{"gmscoreNotificationsTitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_TITLE},
{"gmscoreNotificationsOneDeviceSubtitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_ONE_DEVICE_SUBTITLE},
{"gmscoreNotificationsTwoDevicesSubtitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_TWO_DEVICES_SUBTITLE},
{"gmscoreNotificationsManyDevicesSubtitle",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_MANY_DEVICES_SUBTITLE},
{"gmscoreNotificationsFirstStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_FIRST_STEP},
{"gmscoreNotificationsSecondStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_SECOND_STEP},
{"gmscoreNotificationsThirdStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_THIRD_STEP},
{"gmscoreNotificationsFourthStep",
IDS_SETTINGS_INTERNET_GMSCORE_NOTIFICATIONS_FOURTH_STEP},
{"tetherConnectionDialogTitle",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DIALOG_TITLE},
{"tetherConnectionAvailableDeviceTitle",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_AVAILABLE_DEVICE_TITLE},
{"tetherConnectionBatteryPercentage",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_BATTERY_PERCENTAGE},
{"tetherConnectionExplanation",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_EXPLANATION},
{"tetherConnectionCarrierWarning",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_CARRIER_WARNING},
{"tetherConnectionDescriptionTitle",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_TITLE},
{"tetherConnectionDescriptionMobileData",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_MOBILE_DATA},
{"tetherConnectionDescriptionBattery",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_BATTERY},
{"tetherConnectionDescriptionWiFi",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_DESCRIPTION_WIFI},
{"tetherConnectionNotNowButton",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_NOT_NOW_BUTTON},
{"tetherConnectionConnectButton",
IDS_SETTINGS_INTERNET_TETHER_CONNECTION_CONNECT_BUTTON},
{"tetherEnableBluetooth", IDS_ASH_STATUS_TRAY_ENABLE_BLUETOOTH},
};
AddLocalizedStringsBulk(html_source, localized_strings,
arraysize(localized_strings));
html_source->AddBoolean("networkSettingsConfig",
chromeos::switches::IsNetworkSettingsConfigEnabled());
html_source->AddString("networkGoogleNameserversLearnMoreUrl",
chrome::kGoogleNameserversLearnMoreURL);
html_source->AddString(
"internetNoNetworksMobileData",
l10n_util::GetStringFUTF16(
IDS_SETTINGS_INTERNET_NO_NETWORKS_MOBILE_DATA,
GetHelpUrlWithBoard(chrome::kInstantTetheringLearnMoreURL)));
}
|
C
|
Chrome
| 0 |
CVE-2015-1573
|
https://www.cvedetails.com/cve/CVE-2015-1573/
|
CWE-19
|
https://github.com/torvalds/linux/commit/a2f18db0c68fec96631c10cad9384c196e9008ac
|
a2f18db0c68fec96631c10cad9384c196e9008ac
|
netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_set *set;
struct nft_ctx ctx;
int err;
if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
if (nla[NFTA_SET_TABLE] == NULL)
return -EINVAL;
err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
if (err < 0)
return err;
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
return PTR_ERR(set);
if (set->flags & NFT_SET_INACTIVE)
return -ENOENT;
if (!list_empty(&set->bindings))
return -EBUSY;
return nft_delset(&ctx, set);
}
|
static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_set *set;
struct nft_ctx ctx;
int err;
if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
if (nla[NFTA_SET_TABLE] == NULL)
return -EINVAL;
err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
if (err < 0)
return err;
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
return PTR_ERR(set);
if (set->flags & NFT_SET_INACTIVE)
return -ENOENT;
if (!list_empty(&set->bindings))
return -EBUSY;
return nft_delset(&ctx, set);
}
|
C
|
linux
| 0 |
CVE-2018-21028
|
https://www.cvedetails.com/cve/CVE-2018-21028/
| null |
https://github.com/gpg/boa/pull/1/commits/e139b87835994d007fbd64eead6c1455d7b8cf4e
|
e139b87835994d007fbd64eead6c1455d7b8cf4e
|
misc oom and possible memory leak fix
|
char *strdup(char *s)
{
char *retval;
retval = (char *) malloc(strlen(s) + 1);
if (retval == NULL) {
perror("boa: out of memory in strdup");
exit(1);
}
return strcpy(retval, s);
}
|
char *strdup(char *s)
{
char *retval;
retval = (char *) malloc(strlen(s) + 1);
if (retval == NULL) {
perror("boa: out of memory in strdup");
exit(1);
}
return strcpy(retval, s);
}
|
C
|
boa
| 0 |
CVE-2013-2902
|
https://www.cvedetails.com/cve/CVE-2013-2902/
|
CWE-399
|
https://github.com/chromium/chromium/commit/87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
|
87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
|
Removed pinch viewport scroll offset distribution
The associated change in Blink makes the pinch viewport a proper
ScrollableArea meaning the normal path for synchronizing layer scroll
offsets is used.
This is a 2 sided patch, the other CL:
https://codereview.chromium.org/199253002/
BUG=349941
Review URL: https://codereview.chromium.org/210543002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260105 0039d316-1c4b-4281-b951-d872f2087c98
|
void Layer::RemoveChildOrDependent(Layer* child) {
if (mask_layer_.get() == child) {
mask_layer_->SetParent(NULL);
mask_layer_ = NULL;
SetNeedsFullTreeSync();
return;
}
if (replica_layer_.get() == child) {
replica_layer_->SetParent(NULL);
replica_layer_ = NULL;
SetNeedsFullTreeSync();
return;
}
for (LayerList::iterator iter = children_.begin();
iter != children_.end();
++iter) {
if (iter->get() != child)
continue;
child->SetParent(NULL);
children_.erase(iter);
SetNeedsFullTreeSync();
return;
}
}
|
void Layer::RemoveChildOrDependent(Layer* child) {
if (mask_layer_.get() == child) {
mask_layer_->SetParent(NULL);
mask_layer_ = NULL;
SetNeedsFullTreeSync();
return;
}
if (replica_layer_.get() == child) {
replica_layer_->SetParent(NULL);
replica_layer_ = NULL;
SetNeedsFullTreeSync();
return;
}
for (LayerList::iterator iter = children_.begin();
iter != children_.end();
++iter) {
if (iter->get() != child)
continue;
child->SetParent(NULL);
children_.erase(iter);
SetNeedsFullTreeSync();
return;
}
}
|
C
|
Chrome
| 0 |
CVE-2017-0393
|
https://www.cvedetails.com/cve/CVE-2017-0393/
| null |
https://android.googlesource.com/platform/external/libvpx/+/6886e8e0a9db2dbad723dc37a548233e004b33bc
|
6886e8e0a9db2dbad723dc37a548233e004b33bc
|
vp8:fix threading issues
1 - stops de allocating before threads are closed.
2 - limits threads to mb_rows when mb_rows < partitions
BUG=webm:851
Bug: 30436808
Change-Id: Ie017818ed28103ca9d26d57087f31361b642e09b
(cherry picked from commit 70cca742efa20617c70c3209aa614a70f282f90e)
|
static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf)
{
int i;
unsigned char *src_ptr1, *src_ptr2;
unsigned char *dest_ptr2;
unsigned int Border;
int plane_stride;
int plane_height;
/***********/
/* Y Plane */
/***********/
Border = ybf->border;
plane_stride = ybf->y_stride;
plane_height = ybf->y_height;
src_ptr1 = ybf->y_buffer - Border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)Border; i++)
{
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
/***********/
/* U Plane */
/***********/
plane_stride = ybf->uv_stride;
plane_height = ybf->uv_height;
Border /= 2;
src_ptr1 = ybf->u_buffer - Border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)(Border); i++)
{
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
/***********/
/* V Plane */
/***********/
src_ptr1 = ybf->v_buffer - Border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)(Border); i++)
{
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
}
|
static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf)
{
int i;
unsigned char *src_ptr1, *src_ptr2;
unsigned char *dest_ptr2;
unsigned int Border;
int plane_stride;
int plane_height;
/***********/
/* Y Plane */
/***********/
Border = ybf->border;
plane_stride = ybf->y_stride;
plane_height = ybf->y_height;
src_ptr1 = ybf->y_buffer - Border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)Border; i++)
{
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
/***********/
/* U Plane */
/***********/
plane_stride = ybf->uv_stride;
plane_height = ybf->uv_height;
Border /= 2;
src_ptr1 = ybf->u_buffer - Border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)(Border); i++)
{
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
/***********/
/* V Plane */
/***********/
src_ptr1 = ybf->v_buffer - Border;
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)(Border); i++)
{
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
}
|
C
|
Android
| 0 |
CVE-2016-7117
|
https://www.cvedetails.com/cve/CVE-2016-7117/
|
CWE-19
|
https://github.com/torvalds/linux/commit/34b88a68f26a75e4fded796f1a49c40f82234b7d
|
34b88a68f26a75e4fded796f1a49c40f82234b7d
|
net: Fix use after free in the recvmmsg exit path
The syzkaller fuzzer hit the following use-after-free:
Call Trace:
[<ffffffff8175ea0e>] __asan_report_load8_noabort+0x3e/0x40 mm/kasan/report.c:295
[<ffffffff851cc31a>] __sys_recvmmsg+0x6fa/0x7f0 net/socket.c:2261
[< inline >] SYSC_recvmmsg net/socket.c:2281
[<ffffffff851cc57f>] SyS_recvmmsg+0x16f/0x180 net/socket.c:2270
[<ffffffff86332bb6>] entry_SYSCALL_64_fastpath+0x16/0x7a
arch/x86/entry/entry_64.S:185
And, as Dmitry rightly assessed, that is because we can drop the
reference and then touch it when the underlying recvmsg calls return
some packets and then hit an error, which will make recvmmsg to set
sock->sk->sk_err, oops, fix it.
Reported-and-Tested-by: Dmitry Vyukov <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: Kostya Serebryany <[email protected]>
Cc: Sasha Levin <[email protected]>
Fixes: a2e2725541fa ("net: Introduce recvmmsg socket syscall")
http://lkml.kernel.org/r/[email protected]
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
struct scm_timestamping tss;
int empty = 1;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
if (need_software_tstamp && skb->tstamp.tv64 == 0)
__net_timestamp(skb);
if (need_software_tstamp) {
if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
struct timeval tv;
skb_get_timestamp(skb, &tv);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
sizeof(tv), &tv);
} else {
struct timespec ts;
skb_get_timestampns(skb, &ts);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
sizeof(ts), &ts);
}
}
memset(&tss, 0, sizeof(tss));
if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2))
empty = 0;
if (!empty)
put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(tss), &tss);
}
|
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
struct scm_timestamping tss;
int empty = 1;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
if (need_software_tstamp && skb->tstamp.tv64 == 0)
__net_timestamp(skb);
if (need_software_tstamp) {
if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
struct timeval tv;
skb_get_timestamp(skb, &tv);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
sizeof(tv), &tv);
} else {
struct timespec ts;
skb_get_timestampns(skb, &ts);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
sizeof(ts), &ts);
}
}
memset(&tss, 0, sizeof(tss));
if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2))
empty = 0;
if (!empty)
put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(tss), &tss);
}
|
C
|
linux
| 0 |
CVE-2016-1641
|
https://www.cvedetails.com/cve/CVE-2016-1641/
| null |
https://github.com/chromium/chromium/commit/75ca8ffd7bd7c58ace1144df05e1307d8d707662
|
75ca8ffd7bd7c58ace1144df05e1307d8d707662
|
Don't call WebContents::DownloadImage() callback if the WebContents were deleted
BUG=583718
Review URL: https://codereview.chromium.org/1685343004
Cr-Commit-Position: refs/heads/master@{#375700}
|
SiteInstanceImpl* WebContentsImpl::GetSiteInstance() const {
return GetRenderManager()->current_host()->GetSiteInstance();
}
|
SiteInstanceImpl* WebContentsImpl::GetSiteInstance() const {
return GetRenderManager()->current_host()->GetSiteInstance();
}
|
C
|
Chrome
| 0 |
CVE-2015-1465
|
https://www.cvedetails.com/cve/CVE-2015-1465/
|
CWE-17
|
https://github.com/torvalds/linux/commit/df4d92549f23e1c037e83323aff58a21b3de7fe0
|
df4d92549f23e1c037e83323aff58a21b3de7fe0
|
ipv4: try to cache dst_entries which would cause a redirect
Not caching dst_entries which cause redirects could be exploited by hosts
on the same subnet, causing a severe DoS attack. This effect aggravated
since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()").
Lookups causing redirects will be allocated with DST_NOCACHE set which
will force dst_release to free them via RCU. Unfortunately waiting for
RCU grace period just takes too long, we can end up with >1M dst_entries
waiting to be released and the system will run OOM. rcuos threads cannot
catch up under high softirq load.
Attaching the flag to emit a redirect later on to the specific skb allows
us to cache those dst_entries thus reducing the pressure on allocation
and deallocation.
This issue was discovered by Marcelo Leitner.
Cc: Julian Anastasov <[email protected]>
Signed-off-by: Marcelo Leitner <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: Julian Anastasov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *) dst;
if (!list_empty(&rt->rt_uncached)) {
spin_lock_bh(&rt_uncached_lock);
list_del(&rt->rt_uncached);
spin_unlock_bh(&rt_uncached_lock);
}
}
|
static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *) dst;
if (!list_empty(&rt->rt_uncached)) {
spin_lock_bh(&rt_uncached_lock);
list_del(&rt->rt_uncached);
spin_unlock_bh(&rt_uncached_lock);
}
}
|
C
|
linux
| 0 |
CVE-2016-5158
|
https://www.cvedetails.com/cve/CVE-2016-5158/
|
CWE-190
|
https://github.com/chromium/chromium/commit/6a310d99a741f9ba5e4e537c5ec49d3adbe5876f
|
6a310d99a741f9ba5e4e537c5ec49d3adbe5876f
|
Position info (item n of m) incorrect if hidden focusable items in list
Bug: 836997
Change-Id: I971fa7076f72d51829b36af8e379260d48ca25ec
Reviewed-on: https://chromium-review.googlesource.com/c/1450235
Commit-Queue: Aaron Leventhal <[email protected]>
Reviewed-by: Nektarios Paisios <[email protected]>
Cr-Commit-Position: refs/heads/master@{#628890}
|
void AXTree::UpdateData(const AXTreeData& new_data) {
if (data_ == new_data)
return;
AXTreeData old_data = data_;
data_ = new_data;
for (AXTreeObserver& observer : observers_)
observer.OnTreeDataChanged(this, old_data, new_data);
}
|
void AXTree::UpdateData(const AXTreeData& new_data) {
if (data_ == new_data)
return;
AXTreeData old_data = data_;
data_ = new_data;
for (AXTreeObserver& observer : observers_)
observer.OnTreeDataChanged(this, old_data, new_data);
}
|
C
|
Chrome
| 0 |
CVE-2016-5147
|
https://www.cvedetails.com/cve/CVE-2016-5147/
|
CWE-79
|
https://github.com/chromium/chromium/commit/5472db1c7eca35822219d03be5c817d9a9258c11
|
5472db1c7eca35822219d03be5c817d9a9258c11
|
Always call UpdateCompositedScrollOffset, not just for the root layer
Bug: 927560
Change-Id: I1d5522aae4f11dd3f5b8947bb089bac1bf19bdb4
Reviewed-on: https://chromium-review.googlesource.com/c/1452701
Reviewed-by: Chris Harrelson <[email protected]>
Commit-Queue: Mason Freed <[email protected]>
Cr-Commit-Position: refs/heads/master@{#628942}
|
PaintLayerScrollableArea::DelayScrollOffsetClampScope::NeedsClampList() {
DEFINE_STATIC_LOCAL(
Persistent<HeapVector<Member<PaintLayerScrollableArea>>>,
needs_clamp_list,
(MakeGarbageCollected<HeapVector<Member<PaintLayerScrollableArea>>>()));
return *needs_clamp_list;
}
|
PaintLayerScrollableArea::DelayScrollOffsetClampScope::NeedsClampList() {
DEFINE_STATIC_LOCAL(
Persistent<HeapVector<Member<PaintLayerScrollableArea>>>,
needs_clamp_list,
(MakeGarbageCollected<HeapVector<Member<PaintLayerScrollableArea>>>()));
return *needs_clamp_list;
}
|
C
|
Chrome
| 0 |
CVE-2015-6763
|
https://www.cvedetails.com/cve/CVE-2015-6763/
| null |
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
MacViews: Enable secure text input for password Textfields.
In Cocoa the NSTextInputContext automatically enables secure text input
when activated and it's in the secure text entry mode.
RenderWidgetHostViewMac did the similar thing for ages following the
WebKit example.
views::Textfield needs to do the same thing in a fashion that's
sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions
are possible when the Textfield gets focus, activates the secure text
input mode and the RWHVM loses focus immediately afterwards and disables
the secure text input instead of leaving it in the enabled state.
BUG=818133,677220
Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b
Reviewed-on: https://chromium-review.googlesource.com/943064
Commit-Queue: Michail Pishchagin <[email protected]>
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Avi Drissman <[email protected]>
Reviewed-by: Peter Kasting <[email protected]>
Cr-Commit-Position: refs/heads/master@{#542517}
|
void Textfield::SetCompositionText(const ui::CompositionText& composition) {
if (GetTextInputType() == ui::TEXT_INPUT_TYPE_NONE)
return;
OnBeforeUserAction();
skip_input_method_cancel_composition_ = true;
model_->SetCompositionText(composition);
skip_input_method_cancel_composition_ = false;
UpdateAfterChange(true, true);
OnAfterUserAction();
}
|
void Textfield::SetCompositionText(const ui::CompositionText& composition) {
if (GetTextInputType() == ui::TEXT_INPUT_TYPE_NONE)
return;
OnBeforeUserAction();
skip_input_method_cancel_composition_ = true;
model_->SetCompositionText(composition);
skip_input_method_cancel_composition_ = false;
UpdateAfterChange(true, true);
OnAfterUserAction();
}
|
C
|
Chrome
| 0 |
CVE-2014-7904
|
https://www.cvedetails.com/cve/CVE-2014-7904/
|
CWE-119
|
https://github.com/chromium/chromium/commit/9965adea952e84c925de418e971b204dfda7d6e0
|
9965adea952e84c925de418e971b204dfda7d6e0
|
Replace fixed string uses of AddHeaderFromString
Uses of AddHeaderFromString() with a static string may as well be
replaced with SetHeader(). Do so.
BUG=None
Review-Url: https://codereview.chromium.org/2236933005
Cr-Commit-Position: refs/heads/master@{#418161}
|
HttpCache* MockNetworkLayer::GetCache() {
return NULL;
}
|
HttpCache* MockNetworkLayer::GetCache() {
return NULL;
}
|
C
|
Chrome
| 0 |
CVE-2011-2211
|
https://www.cvedetails.com/cve/CVE-2011-2211/
|
CWE-264
|
https://github.com/torvalds/linux/commit/21c5977a836e399fc710ff2c5367845ed5c2527f
|
21c5977a836e399fc710ff2c5367845ed5c2527f
|
alpha: fix several security issues
Fix several security issues in Alpha-specific syscalls. Untested, but
mostly trivial.
1. Signedness issue in osf_getdomainname allows copying out-of-bounds
kernel memory to userland.
2. Signedness issue in osf_sysinfo allows copying large amounts of
kernel memory to userland.
3. Typo (?) in osf_getsysinfo bounds minimum instead of maximum copy
size, allowing copying large amounts of kernel memory to userland.
4. Usage of user pointer in osf_wait4 while under KERNEL_DS allows
privilege escalation via writing return value of sys_wait4 to kernel
memory.
Signed-off-by: Dan Rosenberg <[email protected]>
Cc: Richard Henderson <[email protected]>
Cc: Ivan Kokshaysky <[email protected]>
Cc: Matt Turner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
osf_fix_iov_len(const struct iovec __user *iov, unsigned long count)
{
unsigned long i;
for (i = 0 ; i < count ; i++) {
int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1;
if (put_user(0, iov_len_high))
return -EFAULT;
}
return 0;
}
|
osf_fix_iov_len(const struct iovec __user *iov, unsigned long count)
{
unsigned long i;
for (i = 0 ; i < count ; i++) {
int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1;
if (put_user(0, iov_len_high))
return -EFAULT;
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-3839
|
https://www.cvedetails.com/cve/CVE-2016-3839/
|
CWE-284
|
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
|
472271b153c5dc53c28beac55480a8d8434b2d5c
|
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
|
static bool hal_open() {
LOG_INFO("%s", __func__);
int fd_array[CH_MAX];
int number_of_ports = vendor->send_command(VENDOR_OPEN_USERIAL, &fd_array);
if (number_of_ports != 1) {
LOG_ERROR("%s opened the wrong number of ports: got %d, expected 1.", __func__, number_of_ports);
goto error;
}
uart_fd = fd_array[0];
if (uart_fd == INVALID_FD) {
LOG_ERROR("%s unable to open the uart serial port.", __func__);
goto error;
}
uart_stream = eager_reader_new(uart_fd, &allocator_malloc, HCI_HAL_SERIAL_BUFFER_SIZE, SIZE_MAX, "hci_single_channel");
if (!uart_stream) {
LOG_ERROR("%s unable to create eager reader for the uart serial port.", __func__);
goto error;
}
stream_has_interpretation = false;
stream_corruption_detected = false;
stream_corruption_bytes_to_ignore = 0;
eager_reader_register(uart_stream, thread_get_reactor(thread), event_uart_has_bytes, NULL);
thread_set_priority(thread, HCI_THREAD_PRIORITY);
thread_set_priority(eager_reader_get_read_thread(uart_stream), HCI_THREAD_PRIORITY);
return true;
error:
interface.close();
return false;
}
|
static bool hal_open() {
LOG_INFO("%s", __func__);
int fd_array[CH_MAX];
int number_of_ports = vendor->send_command(VENDOR_OPEN_USERIAL, &fd_array);
if (number_of_ports != 1) {
LOG_ERROR("%s opened the wrong number of ports: got %d, expected 1.", __func__, number_of_ports);
goto error;
}
uart_fd = fd_array[0];
if (uart_fd == INVALID_FD) {
LOG_ERROR("%s unable to open the uart serial port.", __func__);
goto error;
}
uart_stream = eager_reader_new(uart_fd, &allocator_malloc, HCI_HAL_SERIAL_BUFFER_SIZE, SIZE_MAX, "hci_single_channel");
if (!uart_stream) {
LOG_ERROR("%s unable to create eager reader for the uart serial port.", __func__);
goto error;
}
stream_has_interpretation = false;
stream_corruption_detected = false;
stream_corruption_bytes_to_ignore = 0;
eager_reader_register(uart_stream, thread_get_reactor(thread), event_uart_has_bytes, NULL);
thread_set_priority(thread, HCI_THREAD_PRIORITY);
thread_set_priority(eager_reader_get_read_thread(uart_stream), HCI_THREAD_PRIORITY);
return true;
error:
interface.close();
return false;
}
|
C
|
Android
| 0 |
CVE-2013-4526
|
https://www.cvedetails.com/cve/CVE-2013-4526/
|
CWE-119
|
https://git.qemu.org/?p=qemu.git;a=commit;h=ae2158ad6ce0845b2fae2a22aa7f19c0d7a71ce5
|
ae2158ad6ce0845b2fae2a22aa7f19c0d7a71ce5
| null |
static int ahci_start_transfer(IDEDMA *dma)
{
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
IDEState *s = &ad->port.ifs[0];
uint32_t size = (uint32_t)(s->data_end - s->data_ptr);
/* write == ram -> device */
uint32_t opts = le32_to_cpu(ad->cur_cmd->opts);
int is_write = opts & AHCI_CMD_WRITE;
int is_atapi = opts & AHCI_CMD_ATAPI;
int has_sglist = 0;
if (is_atapi && !ad->done_atapi_packet) {
/* already prepopulated iobuffer */
ad->done_atapi_packet = true;
goto out;
}
if (!ahci_populate_sglist(ad, &s->sg, 0)) {
has_sglist = 1;
}
DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n",
is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata",
has_sglist ? "" : "o");
if (has_sglist && size) {
if (is_write) {
dma_buf_write(s->data_ptr, size, &s->sg);
} else {
dma_buf_read(s->data_ptr, size, &s->sg);
}
}
/* update number of transferred bytes */
ad->cur_cmd->status = cpu_to_le32(le32_to_cpu(ad->cur_cmd->status) + size);
out:
/* declare that we processed everything */
s->data_ptr = s->data_end;
if (has_sglist) {
qemu_sglist_destroy(&s->sg);
}
s->end_transfer_func(s);
if (!(s->status & DRQ_STAT)) {
/* done with DMA */
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_STAT_DSS);
}
return 0;
}
|
static int ahci_start_transfer(IDEDMA *dma)
{
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
IDEState *s = &ad->port.ifs[0];
uint32_t size = (uint32_t)(s->data_end - s->data_ptr);
/* write == ram -> device */
uint32_t opts = le32_to_cpu(ad->cur_cmd->opts);
int is_write = opts & AHCI_CMD_WRITE;
int is_atapi = opts & AHCI_CMD_ATAPI;
int has_sglist = 0;
if (is_atapi && !ad->done_atapi_packet) {
/* already prepopulated iobuffer */
ad->done_atapi_packet = true;
goto out;
}
if (!ahci_populate_sglist(ad, &s->sg, 0)) {
has_sglist = 1;
}
DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n",
is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata",
has_sglist ? "" : "o");
if (has_sglist && size) {
if (is_write) {
dma_buf_write(s->data_ptr, size, &s->sg);
} else {
dma_buf_read(s->data_ptr, size, &s->sg);
}
}
/* update number of transferred bytes */
ad->cur_cmd->status = cpu_to_le32(le32_to_cpu(ad->cur_cmd->status) + size);
out:
/* declare that we processed everything */
s->data_ptr = s->data_end;
if (has_sglist) {
qemu_sglist_destroy(&s->sg);
}
s->end_transfer_func(s);
if (!(s->status & DRQ_STAT)) {
/* done with DMA */
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_STAT_DSS);
}
return 0;
}
|
C
|
qemu
| 0 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
jas_image_t *jas_image_create(int numcmpts, jas_image_cmptparm_t *cmptparms,
int clrspc)
{
jas_image_t *image;
size_t rawsize;
uint_fast32_t inmem;
int cmptno;
jas_image_cmptparm_t *cmptparm;
image = 0;
JAS_DBGLOG(100, ("jas_image_create(%d, %p, %d)\n", numcmpts, cmptparms,
clrspc));
if (!(image = jas_image_create0())) {
goto error;
}
image->clrspc_ = clrspc;
image->maxcmpts_ = numcmpts;
//// image->inmem_ = true;
/* Allocate memory for the per-component information. */
if (!(image->cmpts_ = jas_alloc2(image->maxcmpts_,
sizeof(jas_image_cmpt_t *)))) {
goto error;
}
/* Initialize in case of failure. */
for (cmptno = 0; cmptno < image->maxcmpts_; ++cmptno) {
image->cmpts_[cmptno] = 0;
}
#if 0
/* Compute the approximate raw size of the image. */
rawsize = 0;
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
rawsize += cmptparm->width * cmptparm->height *
(cmptparm->prec + 7) / 8;
}
/* Decide whether to buffer the image data in memory, based on the
raw size of the image. */
inmem = (rawsize < JAS_IMAGE_INMEMTHRESH);
#endif
/* Create the individual image components. */
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
if (!jas_safe_size_mul3(cmptparm->width, cmptparm->height,
(cmptparm->prec + 7), &rawsize)) {
goto error;
}
rawsize /= 8;
inmem = (rawsize < JAS_IMAGE_INMEMTHRESH);
if (!(image->cmpts_[cmptno] = jas_image_cmpt_create(cmptparm->tlx,
cmptparm->tly, cmptparm->hstep, cmptparm->vstep,
cmptparm->width, cmptparm->height, cmptparm->prec,
cmptparm->sgnd, inmem))) {
goto error;
}
++image->numcmpts_;
}
/* Determine the bounding box for all of the components on the
reference grid (i.e., the image area) */
jas_image_setbbox(image);
return image;
error:
if (image) {
jas_image_destroy(image);
}
return 0;
}
|
jas_image_t *jas_image_create(int numcmpts, jas_image_cmptparm_t *cmptparms,
int clrspc)
{
jas_image_t *image;
uint_fast32_t rawsize;
uint_fast32_t inmem;
int cmptno;
jas_image_cmptparm_t *cmptparm;
if (!(image = jas_image_create0())) {
return 0;
}
image->clrspc_ = clrspc;
image->maxcmpts_ = numcmpts;
image->inmem_ = true;
//// image->inmem_ = true;
/* Allocate memory for the per-component information. */
if (!(image->cmpts_ = jas_alloc2(image->maxcmpts_,
sizeof(jas_image_cmpt_t *)))) {
jas_image_destroy(image);
return 0;
}
/* Initialize in case of failure. */
for (cmptno = 0; cmptno < image->maxcmpts_; ++cmptno) {
image->cmpts_[cmptno] = 0;
}
/* Compute the approximate raw size of the image. */
rawsize = 0;
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
rawsize += cmptparm->width * cmptparm->height *
(cmptparm->prec + 7) / 8;
}
/* Decide whether to buffer the image data in memory, based on the
raw size of the image. */
inmem = (rawsize < JAS_IMAGE_INMEMTHRESH);
/* Create the individual image components. */
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
if (!(image->cmpts_[cmptno] = jas_image_cmpt_create(cmptparm->tlx,
cmptparm->tly, cmptparm->hstep, cmptparm->vstep,
cmptparm->width, cmptparm->height, cmptparm->prec,
cmptparm->sgnd, inmem))) {
jas_image_destroy(image);
return 0;
}
++image->numcmpts_;
}
/* Determine the bounding box for all of the components on the
reference grid (i.e., the image area) */
jas_image_setbbox(image);
return image;
}
|
C
|
jasper
| 1 |
CVE-2012-2100
|
https://www.cvedetails.com/cve/CVE-2012-2100/
|
CWE-189
|
https://github.com/torvalds/linux/commit/d50f2ab6f050311dbf7b8f5501b25f0bf64a439b
|
d50f2ab6f050311dbf7b8f5501b25f0bf64a439b
|
ext4: fix undefined behavior in ext4_fill_flex_info()
Commit 503358ae01b70ce6909d19dd01287093f6b6271c ("ext4: avoid divide by
zero when trying to mount a corrupted file system") fixes CVE-2009-4307
by performing a sanity check on s_log_groups_per_flex, since it can be
set to a bogus value by an attacker.
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
if (groups_per_flex < 2) { ... }
This patch fixes two potential issues in the previous commit.
1) The sanity check might only work on architectures like PowerPC.
On x86, 5 bits are used for the shifting amount. That means, given a
large s_log_groups_per_flex value like 36, groups_per_flex = 1 << 36
is essentially 1 << 4 = 16, rather than 0. This will bypass the check,
leaving s_log_groups_per_flex and groups_per_flex inconsistent.
2) The sanity check relies on undefined behavior, i.e., oversized shift.
A standard-confirming C compiler could rewrite the check in unexpected
ways. Consider the following equivalent form, assuming groups_per_flex
is unsigned for simplicity.
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
if (groups_per_flex == 0 || groups_per_flex == 1) {
We compile the code snippet using Clang 3.0 and GCC 4.6. Clang will
completely optimize away the check groups_per_flex == 0, leaving the
patched code as vulnerable as the original. GCC keeps the check, but
there is no guarantee that future versions will do the same.
Signed-off-by: Xi Wang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected]
|
static inline void unregister_as_ext2(void)
{
unregister_filesystem(&ext2_fs_type);
}
|
static inline void unregister_as_ext2(void)
{
unregister_filesystem(&ext2_fs_type);
}
|
C
|
linux
| 0 |
CVE-2017-0377
|
https://www.cvedetails.com/cve/CVE-2017-0377/
|
CWE-200
|
https://github.com/torproject/tor/commit/665baf5ed5c6186d973c46cdea165c0548027350
|
665baf5ed5c6186d973c46cdea165c0548027350
|
Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377.
|
nodelist_remove_routerinfo(routerinfo_t *ri)
{
node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
if (node && node->ri == ri) {
node->ri = NULL;
if (! node_is_usable(node)) {
nodelist_drop_node(node, 1);
node_free(node);
}
}
}
|
nodelist_remove_routerinfo(routerinfo_t *ri)
{
node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
if (node && node->ri == ri) {
node->ri = NULL;
if (! node_is_usable(node)) {
nodelist_drop_node(node, 1);
node_free(node);
}
}
}
|
C
|
tor
| 0 |
CVE-2018-6094
|
https://www.cvedetails.com/cve/CVE-2018-6094/
|
CWE-119
|
https://github.com/chromium/chromium/commit/0749ec24fae74ec32d0567eef0e5ec43c84dbcb9
|
0749ec24fae74ec32d0567eef0e5ec43c84dbcb9
|
Call HeapObjectHeader::checkHeader solely for its side-effect.
This requires changing its signature. This is a preliminary stage to making it
private.
BUG=633030
Review-Url: https://codereview.chromium.org/2698673003
Cr-Commit-Position: refs/heads/master@{#460489}
|
void NormalPageArena::clearFreeLists() {
setAllocationPoint(nullptr, 0);
m_freeList.clear();
}
|
void NormalPageArena::clearFreeLists() {
setAllocationPoint(nullptr, 0);
m_freeList.clear();
}
|
C
|
Chrome
| 0 |
CVE-2013-2929
|
https://www.cvedetails.com/cve/CVE-2013-2929/
|
CWE-264
|
https://github.com/torvalds/linux/commit/d049f74f2dbe71354d43d393ac3a188947811348
|
d049f74f2dbe71354d43d393ac3a188947811348
|
exec/ptrace: fix get_dumpable() incorrect tests
The get_dumpable() return value is not boolean. Most users of the
function actually want to be testing for non-SUID_DUMP_USER(1) rather than
SUID_DUMP_DISABLE(0). The SUID_DUMP_ROOT(2) is also considered a
protected state. Almost all places did this correctly, excepting the two
places fixed in this patch.
Wrong logic:
if (dumpable == SUID_DUMP_DISABLE) { /* be protective */ }
or
if (dumpable == 0) { /* be protective */ }
or
if (!dumpable) { /* be protective */ }
Correct logic:
if (dumpable != SUID_DUMP_USER) { /* be protective */ }
or
if (dumpable != 1) { /* be protective */ }
Without this patch, if the system had set the sysctl fs/suid_dumpable=2, a
user was able to ptrace attach to processes that had dropped privileges to
that user. (This may have been partially mitigated if Yama was enabled.)
The macros have been moved into the file that declares get/set_dumpable(),
which means things like the ia64 code can see them too.
CVE-2013-2929
Reported-by: Vasily Kulikov <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: "Luck, Tony" <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void put_arg_page(struct page *page)
{
put_page(page);
}
|
static void put_arg_page(struct page *page)
{
put_page(page);
}
|
C
|
linux
| 0 |
CVE-2013-7421
|
https://www.cvedetails.com/cve/CVE-2013-7421/
|
CWE-264
|
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static void fcrypt_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
struct {
__be32 l, r;
} X;
memcpy(&X, src, sizeof(X));
F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
memcpy(dst, &X, sizeof(X));
}
|
static void fcrypt_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
struct {
__be32 l, r;
} X;
memcpy(&X, src, sizeof(X));
F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
memcpy(dst, &X, sizeof(X));
}
|
C
|
linux
| 0 |
CVE-2011-3099
|
https://www.cvedetails.com/cve/CVE-2011-3099/
|
CWE-399
|
https://github.com/chromium/chromium/commit/3bbc818ed1a7b63b8290bbde9ae975956748cb8a
|
3bbc818ed1a7b63b8290bbde9ae975956748cb8a
|
[GTK] Inspector should set a default attached height before being attached
https://bugs.webkit.org/show_bug.cgi?id=90767
Reviewed by Xan Lopez.
We are currently using the minimum attached height in
WebKitWebViewBase as the default height for the inspector when
attached. It would be easier for WebKitWebViewBase and embedders
implementing attach() if the inspector already had an attached
height set when it's being attached.
* UIProcess/API/gtk/WebKitWebViewBase.cpp:
(webkitWebViewBaseContainerAdd): Don't initialize
inspectorViewHeight.
(webkitWebViewBaseSetInspectorViewHeight): Allow to set the
inspector view height before having an inpector view, but only
queue a resize when the view already has an inspector view.
* UIProcess/API/gtk/tests/TestInspector.cpp:
(testInspectorDefault):
(testInspectorManualAttachDetach):
* UIProcess/gtk/WebInspectorProxyGtk.cpp:
(WebKit::WebInspectorProxy::platformAttach): Set the default
attached height before attach the inspector view.
git-svn-id: svn://svn.chromium.org/blink/trunk@124479 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static gboolean detachCallback(WebKitWebInspector*, InspectorTest* test)
{
return test->detach();
}
|
static gboolean detachCallback(WebKitWebInspector*, InspectorTest* test)
{
return test->detach();
}
|
C
|
Chrome
| 0 |
CVE-2016-7097
|
https://www.cvedetails.com/cve/CVE-2016-7097/
|
CWE-285
|
https://github.com/torvalds/linux/commit/073931017b49d9458aa351605b43a7e34598caef
|
073931017b49d9458aa351605b43a7e34598caef
|
posix_acl: Clear SGID bit when setting file permissions
When file permissions are modified via chmod(2) and the user is not in
the owning group or capable of CAP_FSETID, the setgid bit is cleared in
inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file
permissions as well as the new ACL, but doesn't clear the setgid bit in
a similar way; this allows to bypass the check in chmod(2). Fix that.
References: CVE-2016-7097
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Jeff Layton <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Andreas Gruenbacher <[email protected]>
|
static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
{
int i, count;
struct posix_acl *acl;
struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
const char *end = value + size;
if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION))
return ERR_PTR(-EINVAL);
count = f2fs_acl_count(size);
if (count < 0)
return ERR_PTR(-EINVAL);
if (count == 0)
return NULL;
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
if ((char *)entry > end)
goto fail;
acl->a_entries[i].e_tag = le16_to_cpu(entry->e_tag);
acl->a_entries[i].e_perm = le16_to_cpu(entry->e_perm);
switch (acl->a_entries[i].e_tag) {
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
entry = (struct f2fs_acl_entry *)((char *)entry +
sizeof(struct f2fs_acl_entry_short));
break;
case ACL_USER:
acl->a_entries[i].e_uid =
make_kuid(&init_user_ns,
le32_to_cpu(entry->e_id));
entry = (struct f2fs_acl_entry *)((char *)entry +
sizeof(struct f2fs_acl_entry));
break;
case ACL_GROUP:
acl->a_entries[i].e_gid =
make_kgid(&init_user_ns,
le32_to_cpu(entry->e_id));
entry = (struct f2fs_acl_entry *)((char *)entry +
sizeof(struct f2fs_acl_entry));
break;
default:
goto fail;
}
}
if ((char *)entry != end)
goto fail;
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
|
static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
{
int i, count;
struct posix_acl *acl;
struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
const char *end = value + size;
if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION))
return ERR_PTR(-EINVAL);
count = f2fs_acl_count(size);
if (count < 0)
return ERR_PTR(-EINVAL);
if (count == 0)
return NULL;
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
if ((char *)entry > end)
goto fail;
acl->a_entries[i].e_tag = le16_to_cpu(entry->e_tag);
acl->a_entries[i].e_perm = le16_to_cpu(entry->e_perm);
switch (acl->a_entries[i].e_tag) {
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
entry = (struct f2fs_acl_entry *)((char *)entry +
sizeof(struct f2fs_acl_entry_short));
break;
case ACL_USER:
acl->a_entries[i].e_uid =
make_kuid(&init_user_ns,
le32_to_cpu(entry->e_id));
entry = (struct f2fs_acl_entry *)((char *)entry +
sizeof(struct f2fs_acl_entry));
break;
case ACL_GROUP:
acl->a_entries[i].e_gid =
make_kgid(&init_user_ns,
le32_to_cpu(entry->e_id));
entry = (struct f2fs_acl_entry *)((char *)entry +
sizeof(struct f2fs_acl_entry));
break;
default:
goto fail;
}
}
if ((char *)entry != end)
goto fail;
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
|
C
|
linux
| 0 |
CVE-2013-4483
|
https://www.cvedetails.com/cve/CVE-2013-4483/
|
CWE-189
|
https://github.com/torvalds/linux/commit/6062a8dc0517bce23e3c2f7d2fea5e22411269a3
|
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
|
ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int count_semncnt (struct sem_array * sma, ushort semnum)
{
int semncnt;
struct sem_queue * q;
semncnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op < 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semncnt++;
}
return semncnt;
}
|
static int count_semncnt (struct sem_array * sma, ushort semnum)
{
int semncnt;
struct sem_queue * q;
semncnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op < 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semncnt++;
}
return semncnt;
}
|
C
|
linux
| 0 |
CVE-2017-18257
|
https://www.cvedetails.com/cve/CVE-2017-18257/
|
CWE-190
|
https://github.com/torvalds/linux/commit/b86e33075ed1909d8002745b56ecf73b833db143
|
b86e33075ed1909d8002745b56ecf73b833db143
|
f2fs: fix a dead loop in f2fs_fiemap()
A dead loop can be triggered in f2fs_fiemap() using the test case
as below:
...
fd = open();
fallocate(fd, 0, 0, 4294967296);
ioctl(fd, FS_IOC_FIEMAP, fiemap_buf);
...
It's caused by an overflow in __get_data_block():
...
bh->b_size = map.m_len << inode->i_blkbits;
...
map.m_len is an unsigned int, and bh->b_size is a size_t which is 64 bits
on 64 bits archtecture, type conversion from an unsigned int to a size_t
will result in an overflow.
In the above-mentioned case, bh->b_size will be zero, and f2fs_fiemap()
will call get_data_block() at block 0 again an again.
Fix this by adding a force conversion before left shift.
Signed-off-by: Wei Fang <[email protected]>
Acked-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
|
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
int rw)
{
__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}
|
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
int rw)
{
__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}
|
C
|
linux
| 0 |
CVE-2018-16425
|
https://www.cvedetails.com/cve/CVE-2018-16425/
|
CWE-415
|
https://github.com/OpenSC/OpenSC/commit/360e95d45ac4123255a4c796db96337f332160ad#diff-d643a0fa169471dbf2912f4866dc49c5
|
360e95d45ac4123255a4c796db96337f332160ad#diff-d643a0fa169471dbf2912f4866dc49c5
|
fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems.
|
int sc_bin_to_hex(const u8 *in, size_t in_len, char *out, size_t out_len,
int in_sep)
{
unsigned int n, sep_len;
char *pos, *end, sep;
sep = (char)in_sep;
sep_len = sep > 0 ? 1 : 0;
pos = out;
end = out + out_len;
for (n = 0; n < in_len; n++) {
if (pos + 3 + sep_len >= end)
return SC_ERROR_BUFFER_TOO_SMALL;
if (n && sep_len)
*pos++ = sep;
sprintf(pos, "%02x", in[n]);
pos += 2;
}
*pos = '\0';
return SC_SUCCESS;
}
|
int sc_bin_to_hex(const u8 *in, size_t in_len, char *out, size_t out_len,
int in_sep)
{
unsigned int n, sep_len;
char *pos, *end, sep;
sep = (char)in_sep;
sep_len = sep > 0 ? 1 : 0;
pos = out;
end = out + out_len;
for (n = 0; n < in_len; n++) {
if (pos + 3 + sep_len >= end)
return SC_ERROR_BUFFER_TOO_SMALL;
if (n && sep_len)
*pos++ = sep;
sprintf(pos, "%02x", in[n]);
pos += 2;
}
*pos = '\0';
return SC_SUCCESS;
}
|
C
|
OpenSC
| 0 |
CVE-2017-10971
|
https://www.cvedetails.com/cve/CVE-2017-10971/
|
CWE-119
|
https://cgit.freedesktop.org/xorg/xserver/commit/?id=215f894965df5fb0bb45b107d84524e700d2073c
|
215f894965df5fb0bb45b107d84524e700d2073c
| null |
EnqueueEvent(InternalEvent *ev, DeviceIntPtr device)
{
QdEventPtr tail = NULL;
QdEventPtr qe;
SpritePtr pSprite = device->spriteInfo->sprite;
int eventlen;
DeviceEvent *event = &ev->device_event;
if (!xorg_list_is_empty(&syncEvents.pending))
tail = xorg_list_last_entry(&syncEvents.pending, QdEventRec, next);
NoticeTimeMillis(device, &ev->any.time);
/* Fix for key repeating bug. */
if (device->key != NULL && device->key->xkbInfo != NULL &&
event->type == ET_KeyRelease)
AccessXCancelRepeatKey(device->key->xkbInfo, event->detail.key);
if (DeviceEventCallback) {
DeviceEventInfoRec eventinfo;
/* The RECORD spec says that the root window field of motion events
* must be valid. At this point, it hasn't been filled in yet, so
* we do it here. The long expression below is necessary to get
* the current root window; the apparently reasonable alternative
* GetCurrentRootWindow()->drawable.id doesn't give you the right
* answer on the first motion event after a screen change because
* the data that GetCurrentRootWindow relies on hasn't been
* updated yet.
*/
if (ev->any.type == ET_Motion)
ev->device_event.root = pSprite->hotPhys.pScreen->root->drawable.id;
eventinfo.event = ev;
eventinfo.device = device;
CallCallbacks(&DeviceEventCallback, (void *) &eventinfo);
}
if (event->type == ET_Motion) {
#ifdef PANORAMIX
if (!noPanoramiXExtension) {
event->root_x += pSprite->screen->x - screenInfo.screens[0]->x;
event->root_y += pSprite->screen->y - screenInfo.screens[0]->y;
}
#endif
pSprite->hotPhys.x = event->root_x;
pSprite->hotPhys.y = event->root_y;
/* do motion compression, but not if from different devices */
if (tail &&
(tail->event->any.type == ET_Motion) &&
(tail->device == device) &&
(tail->pScreen == pSprite->hotPhys.pScreen)) {
DeviceEvent *tailev = &tail->event->device_event;
tailev->root_x = pSprite->hotPhys.x;
tailev->root_y = pSprite->hotPhys.y;
tailev->time = event->time;
tail->months = currentTime.months;
return;
}
}
eventlen = event->length;
qe = malloc(sizeof(QdEventRec) + eventlen);
if (!qe)
return;
xorg_list_init(&qe->next);
qe->device = device;
qe->pScreen = pSprite->hotPhys.pScreen;
qe->months = currentTime.months;
qe->event = (InternalEvent *) (qe + 1);
memcpy(qe->event, event, eventlen);
xorg_list_append(&qe->next, &syncEvents.pending);
}
|
EnqueueEvent(InternalEvent *ev, DeviceIntPtr device)
{
QdEventPtr tail = NULL;
QdEventPtr qe;
SpritePtr pSprite = device->spriteInfo->sprite;
int eventlen;
DeviceEvent *event = &ev->device_event;
if (!xorg_list_is_empty(&syncEvents.pending))
tail = xorg_list_last_entry(&syncEvents.pending, QdEventRec, next);
NoticeTimeMillis(device, &ev->any.time);
/* Fix for key repeating bug. */
if (device->key != NULL && device->key->xkbInfo != NULL &&
event->type == ET_KeyRelease)
AccessXCancelRepeatKey(device->key->xkbInfo, event->detail.key);
if (DeviceEventCallback) {
DeviceEventInfoRec eventinfo;
/* The RECORD spec says that the root window field of motion events
* must be valid. At this point, it hasn't been filled in yet, so
* we do it here. The long expression below is necessary to get
* the current root window; the apparently reasonable alternative
* GetCurrentRootWindow()->drawable.id doesn't give you the right
* answer on the first motion event after a screen change because
* the data that GetCurrentRootWindow relies on hasn't been
* updated yet.
*/
if (ev->any.type == ET_Motion)
ev->device_event.root = pSprite->hotPhys.pScreen->root->drawable.id;
eventinfo.event = ev;
eventinfo.device = device;
CallCallbacks(&DeviceEventCallback, (void *) &eventinfo);
}
if (event->type == ET_Motion) {
#ifdef PANORAMIX
if (!noPanoramiXExtension) {
event->root_x += pSprite->screen->x - screenInfo.screens[0]->x;
event->root_y += pSprite->screen->y - screenInfo.screens[0]->y;
}
#endif
pSprite->hotPhys.x = event->root_x;
pSprite->hotPhys.y = event->root_y;
/* do motion compression, but not if from different devices */
if (tail &&
(tail->event->any.type == ET_Motion) &&
(tail->device == device) &&
(tail->pScreen == pSprite->hotPhys.pScreen)) {
DeviceEvent *tailev = &tail->event->device_event;
tailev->root_x = pSprite->hotPhys.x;
tailev->root_y = pSprite->hotPhys.y;
tailev->time = event->time;
tail->months = currentTime.months;
return;
}
}
eventlen = event->length;
qe = malloc(sizeof(QdEventRec) + eventlen);
if (!qe)
return;
xorg_list_init(&qe->next);
qe->device = device;
qe->pScreen = pSprite->hotPhys.pScreen;
qe->months = currentTime.months;
qe->event = (InternalEvent *) (qe + 1);
memcpy(qe->event, event, eventlen);
xorg_list_append(&qe->next, &syncEvents.pending);
}
|
C
|
xserver
| 0 |
CVE-2012-2890
|
https://www.cvedetails.com/cve/CVE-2012-2890/
|
CWE-399
|
https://github.com/chromium/chromium/commit/a6f7726de20450074a01493e4e85409ce3f2595a
|
a6f7726de20450074a01493e4e85409ce3f2595a
|
Unreviewed, rolling out r147402.
http://trac.webkit.org/changeset/147402
https://bugs.webkit.org/show_bug.cgi?id=112903
Source/WebCore:
* dom/Document.cpp:
(WebCore::Document::processHttpEquiv):
* loader/DocumentLoader.cpp:
(WebCore::DocumentLoader::responseReceived):
LayoutTests:
* http/tests/security/XFrameOptions/x-frame-options-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body.html:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag.html:
* http/tests/security/XFrameOptions/x-frame-options-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny.html:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny-expected.txt:
git-svn-id: svn://svn.chromium.org/blink/trunk@147450 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
SharedBuffer* DocumentLoader::parsedArchiveData() const
{
return m_parsedArchiveData.get();
}
|
SharedBuffer* DocumentLoader::parsedArchiveData() const
{
return m_parsedArchiveData.get();
}
|
C
|
Chrome
| 0 |
CVE-2019-5780
|
https://www.cvedetails.com/cve/CVE-2019-5780/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0328261c41b1b7495e1d4d4cf958f41a08aff38b
|
0328261c41b1b7495e1d4d4cf958f41a08aff38b
|
mac: Do not let synthetic events toggle "Allow JavaScript From AppleEvents"
Bug: 891697
Change-Id: I49eb77963515637df739c9d2ce83530d4e21cf15
Reviewed-on: https://chromium-review.googlesource.com/c/1308771
Reviewed-by: Elly Fong-Jones <[email protected]>
Commit-Queue: Robert Sesek <[email protected]>
Cr-Commit-Position: refs/heads/master@{#604268}
|
void BrowserCommandController::UpdateCommandsForFileSelectionDialogs() {
if (is_locked_fullscreen_)
return;
UpdateSaveAsState();
UpdateOpenFileState(&command_updater_);
}
|
void BrowserCommandController::UpdateCommandsForFileSelectionDialogs() {
if (is_locked_fullscreen_)
return;
UpdateSaveAsState();
UpdateOpenFileState(&command_updater_);
}
|
C
|
Chrome
| 0 |
CVE-2018-16790
|
https://www.cvedetails.com/cve/CVE-2018-16790/
|
CWE-125
|
https://github.com/mongodb/mongo-c-driver/commit/0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example.
|
test_bson_append_int32 (void)
{
bson_t *b;
bson_t *b2;
b = bson_new ();
BSON_ASSERT (bson_append_int32 (b, "a", -1, -123));
BSON_ASSERT (bson_append_int32 (b, "c", -1, 0));
BSON_ASSERT (bson_append_int32 (b, "b", -1, 123));
b2 = get_bson ("test33.bson");
BSON_ASSERT_BSON_EQUAL (b, b2);
bson_destroy (b);
bson_destroy (b2);
}
|
test_bson_append_int32 (void)
{
bson_t *b;
bson_t *b2;
b = bson_new ();
BSON_ASSERT (bson_append_int32 (b, "a", -1, -123));
BSON_ASSERT (bson_append_int32 (b, "c", -1, 0));
BSON_ASSERT (bson_append_int32 (b, "b", -1, 123));
b2 = get_bson ("test33.bson");
BSON_ASSERT_BSON_EQUAL (b, b2);
bson_destroy (b);
bson_destroy (b2);
}
|
C
|
mongo-c-driver
| 0 |
CVE-2011-3896
|
https://www.cvedetails.com/cve/CVE-2011-3896/
|
CWE-119
|
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
|
5925dff83699508b5e2735afb0297dfb310e159d
|
Implement a bubble that appears at the top of the screen when a tab enters
fullscreen mode via webkitRequestFullScreen(), telling the user how to exit
fullscreen.
This is implemented as an NSView rather than an NSWindow because the floating
chrome that appears in presentation mode should overlap the bubble.
Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac:
the mode in which the UI is hidden, accessible by moving the cursor to the top
of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode.
On Lion, however, fullscreen mode does not imply presentation mode: in
non-presentation fullscreen mode, the chrome is permanently shown. It is
possible to switch between presentation mode and fullscreen mode using the
presentation mode UI control.
When a tab initiates fullscreen mode on Lion, we enter presentation mode if not
in presentation mode already. When the user exits fullscreen mode using Chrome
UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we
return the user to the mode they were in before the tab entered fullscreen.
BUG=14471
TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen.
Need to test the Lion logic somehow, with no Lion trybots.
BUG=96883
Original review http://codereview.chromium.org/7890056/
TBR=thakis
Review URL: http://codereview.chromium.org/7920024
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
|
std::wstring WindowCaptionFromPageTitle(std::wstring page_title) {
#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
if (page_title.empty()) {
return UTF16ToWideHack(
l10n_util::GetStringUTF16(IDS_BROWSER_WINDOW_MAC_TAB_UNTITLED));
}
return page_title;
#else
if (page_title.empty())
return UTF16ToWideHack(l10n_util::GetStringUTF16(IDS_PRODUCT_NAME));
return UTF16ToWideHack(
l10n_util::GetStringFUTF16(IDS_BROWSER_WINDOW_TITLE_FORMAT,
WideToUTF16Hack(page_title)));
#endif
}
|
std::wstring WindowCaptionFromPageTitle(std::wstring page_title) {
#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
if (page_title.empty()) {
return UTF16ToWideHack(
l10n_util::GetStringUTF16(IDS_BROWSER_WINDOW_MAC_TAB_UNTITLED));
}
return page_title;
#else
if (page_title.empty())
return UTF16ToWideHack(l10n_util::GetStringUTF16(IDS_PRODUCT_NAME));
return UTF16ToWideHack(
l10n_util::GetStringFUTF16(IDS_BROWSER_WINDOW_TITLE_FORMAT,
WideToUTF16Hack(page_title)));
#endif
}
|
C
|
Chrome
| 0 |
CVE-2013-1828
|
https://www.cvedetails.com/cve/CVE-2013-1828/
|
CWE-20
|
https://github.com/torvalds/linux/commit/726bc6b092da4c093eb74d13c07184b18c1af0f1
|
726bc6b092da4c093eb74d13c07184b18c1af0f1
|
net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS
Building sctp may fail with:
In function ‘copy_from_user’,
inlined from ‘sctp_getsockopt_assoc_stats’ at
net/sctp/socket.c:5656:20:
arch/x86/include/asm/uaccess_32.h:211:26: error: call to
‘copy_from_user_overflow’ declared with attribute error: copy_from_user()
buffer size is not provably correct
if built with W=1 due to a missing parameter size validation
before the call to copy_from_user.
Signed-off-by: Guenter Roeck <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_stats sas;
struct sctp_association *asoc = NULL;
/* User must provide at least the assoc id */
if (len < sizeof(sctp_assoc_t))
return -EINVAL;
/* Allow the struct to grow and fill in as much as possible */
len = min_t(size_t, len, sizeof(sas));
if (copy_from_user(&sas, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
if (!asoc)
return -EINVAL;
sas.sas_rtxchunks = asoc->stats.rtxchunks;
sas.sas_gapcnt = asoc->stats.gapcnt;
sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
sas.sas_osacks = asoc->stats.osacks;
sas.sas_isacks = asoc->stats.isacks;
sas.sas_octrlchunks = asoc->stats.octrlchunks;
sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
sas.sas_oodchunks = asoc->stats.oodchunks;
sas.sas_iodchunks = asoc->stats.iodchunks;
sas.sas_ouodchunks = asoc->stats.ouodchunks;
sas.sas_iuodchunks = asoc->stats.iuodchunks;
sas.sas_idupchunks = asoc->stats.idupchunks;
sas.sas_opackets = asoc->stats.opackets;
sas.sas_ipackets = asoc->stats.ipackets;
/* New high max rto observed, will return 0 if not a single
* RTO update took place. obs_rto_ipaddr will be bogus
* in such a case
*/
sas.sas_maxrto = asoc->stats.max_obs_rto;
memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
sizeof(struct sockaddr_storage));
/* Mark beginning of a new observation period */
asoc->stats.max_obs_rto = asoc->rto_min;
if (put_user(len, optlen))
return -EFAULT;
SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n",
len, sas.sas_assoc_id);
if (copy_to_user(optval, &sas, len))
return -EFAULT;
return 0;
}
|
static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_stats sas;
struct sctp_association *asoc = NULL;
/* User must provide at least the assoc id */
if (len < sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&sas, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
if (!asoc)
return -EINVAL;
sas.sas_rtxchunks = asoc->stats.rtxchunks;
sas.sas_gapcnt = asoc->stats.gapcnt;
sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
sas.sas_osacks = asoc->stats.osacks;
sas.sas_isacks = asoc->stats.isacks;
sas.sas_octrlchunks = asoc->stats.octrlchunks;
sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
sas.sas_oodchunks = asoc->stats.oodchunks;
sas.sas_iodchunks = asoc->stats.iodchunks;
sas.sas_ouodchunks = asoc->stats.ouodchunks;
sas.sas_iuodchunks = asoc->stats.iuodchunks;
sas.sas_idupchunks = asoc->stats.idupchunks;
sas.sas_opackets = asoc->stats.opackets;
sas.sas_ipackets = asoc->stats.ipackets;
/* New high max rto observed, will return 0 if not a single
* RTO update took place. obs_rto_ipaddr will be bogus
* in such a case
*/
sas.sas_maxrto = asoc->stats.max_obs_rto;
memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
sizeof(struct sockaddr_storage));
/* Mark beginning of a new observation period */
asoc->stats.max_obs_rto = asoc->rto_min;
/* Allow the struct to grow and fill in as much as possible */
len = min_t(size_t, len, sizeof(sas));
if (put_user(len, optlen))
return -EFAULT;
SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n",
len, sas.sas_assoc_id);
if (copy_to_user(optval, &sas, len))
return -EFAULT;
return 0;
}
|
C
|
linux
| 1 |
CVE-2018-12904
|
https://www.cvedetails.com/cve/CVE-2018-12904/
| null |
https://github.com/torvalds/linux/commit/727ba748e110b4de50d142edca9d6a9b7e6111d8
|
727ba748e110b4de50d142edca9d6a9b7e6111d8
|
kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t vmcs_gva;
struct x86_exception e;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
|
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t vmcs_gva;
struct x86_exception e;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva))
return 1;
/* ok to use *_system, as hardware has verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
|
C
|
linux
| 1 |
CVE-2019-5799
|
https://www.cvedetails.com/cve/CVE-2019-5799/
|
CWE-20
|
https://github.com/chromium/chromium/commit/108147dfd1ea159fd3632ef92ccc4ab8952980c7
|
108147dfd1ea159fd3632ef92ccc4ab8952980c7
|
Inherit the navigation initiator when navigating instead of the parent/opener
Spec PR: https://github.com/w3c/webappsec-csp/pull/358
Bug: 905301, 894228, 836148
Change-Id: I43ada2266d42d1cd56dbe3c6dd89d115e878a83a
Reviewed-on: https://chromium-review.googlesource.com/c/1314633
Commit-Queue: Andy Paicu <[email protected]>
Reviewed-by: Mike West <[email protected]>
Cr-Commit-Position: refs/heads/master@{#610850}
|
bool Document::CanExecuteScripts(ReasonForCallingCanExecuteScripts reason) {
DCHECK(GetFrame())
<< "you are querying canExecuteScripts on a non contextDocument.";
if (IsSandboxed(kSandboxScripts) &&
!GetFrame()->GetScriptController().ShouldBypassMainWorldCSP()) {
if (reason == kAboutToExecuteScript) {
AddConsoleMessage(ConsoleMessage::Create(
kSecurityMessageSource, kErrorMessageLevel,
"Blocked script execution in '" + Url().ElidedString() +
"' because the document's frame is sandboxed and the "
"'allow-scripts' permission is not set."));
}
return false;
}
if (!GetFrame()->Client())
return false;
WebContentSettingsClient* settings_client =
GetFrame()->GetContentSettingsClient();
Settings* settings = GetFrame()->GetSettings();
bool script_enabled = settings && settings->GetScriptEnabled();
if (settings_client)
script_enabled = settings_client->AllowScript(script_enabled);
if (!script_enabled && reason == kAboutToExecuteScript && settings_client)
settings_client->DidNotAllowScript();
return script_enabled;
}
|
bool Document::CanExecuteScripts(ReasonForCallingCanExecuteScripts reason) {
DCHECK(GetFrame())
<< "you are querying canExecuteScripts on a non contextDocument.";
if (IsSandboxed(kSandboxScripts) &&
!GetFrame()->GetScriptController().ShouldBypassMainWorldCSP()) {
if (reason == kAboutToExecuteScript) {
AddConsoleMessage(ConsoleMessage::Create(
kSecurityMessageSource, kErrorMessageLevel,
"Blocked script execution in '" + Url().ElidedString() +
"' because the document's frame is sandboxed and the "
"'allow-scripts' permission is not set."));
}
return false;
}
if (!GetFrame()->Client())
return false;
WebContentSettingsClient* settings_client =
GetFrame()->GetContentSettingsClient();
Settings* settings = GetFrame()->GetSettings();
bool script_enabled = settings && settings->GetScriptEnabled();
if (settings_client)
script_enabled = settings_client->AllowScript(script_enabled);
if (!script_enabled && reason == kAboutToExecuteScript && settings_client)
settings_client->DidNotAllowScript();
return script_enabled;
}
|
C
|
Chrome
| 0 |
CVE-2013-7421
|
https://www.cvedetails.com/cve/CVE-2013-7421/
|
CWE-264
|
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *dst = dctx->buffer;
if (dctx->bytes) {
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
while (dctx->bytes--)
*tmp++ ^= 0;
kernel_fpu_begin();
clmul_ghash_mul(dst, &ctx->shash);
kernel_fpu_end();
}
dctx->bytes = 0;
}
|
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *dst = dctx->buffer;
if (dctx->bytes) {
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
while (dctx->bytes--)
*tmp++ ^= 0;
kernel_fpu_begin();
clmul_ghash_mul(dst, &ctx->shash);
kernel_fpu_end();
}
dctx->bytes = 0;
}
|
C
|
linux
| 0 |
CVE-2013-0842
|
https://www.cvedetails.com/cve/CVE-2013-0842/
| null |
https://github.com/chromium/chromium/commit/10cbaf017570ba6454174c55b844647aa6a9b3b4
|
10cbaf017570ba6454174c55b844647aa6a9b3b4
|
Validate that paths don't contain embedded NULLs at deserialization.
BUG=166867
Review URL: https://chromiumcodereview.appspot.com/11743009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@174935 0039d316-1c4b-4281-b951-d872f2087c98
|
bool ParamTraits<double>::Read(const Message* m, PickleIterator* iter,
param_type* r) {
const char *data;
int data_size;
if (!m->ReadData(iter, &data, &data_size) ||
data_size != sizeof(param_type)) {
NOTREACHED();
return false;
}
memcpy(r, data, sizeof(param_type));
return true;
}
|
bool ParamTraits<double>::Read(const Message* m, PickleIterator* iter,
param_type* r) {
const char *data;
int data_size;
if (!m->ReadData(iter, &data, &data_size) ||
data_size != sizeof(param_type)) {
NOTREACHED();
return false;
}
memcpy(r, data, sizeof(param_type));
return true;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
|
7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
|
Move variations prefs into the variations component
These prefs are used by variations code that is targeted for componentization.
BUG=382865
TBR=thakis
Review URL: https://codereview.chromium.org/1265423003
Cr-Commit-Position: refs/heads/master@{#343661}
|
void VariationsService::RemoveObserver(Observer* observer) {
DCHECK(thread_checker_.CalledOnValidThread());
observer_list_.RemoveObserver(observer);
}
|
void VariationsService::RemoveObserver(Observer* observer) {
DCHECK(thread_checker_.CalledOnValidThread());
observer_list_.RemoveObserver(observer);
}
|
C
|
Chrome
| 0 |
CVE-2012-5156
|
https://www.cvedetails.com/cve/CVE-2012-5156/
|
CWE-399
|
https://github.com/chromium/chromium/commit/b15c87071f906301bccc824ce013966ca93998c7
|
b15c87071f906301bccc824ce013966ca93998c7
|
Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process.
As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition.
BUG=134694
Review URL: https://chromiumcodereview.appspot.com/11143025
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98
|
void DesktopSessionWin::OnSessionAttached(uint32 session_id) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
DCHECK(launcher_.get() == NULL);
if (desktop_binary_.empty()) {
FilePath dir_path;
if (!PathService::Get(base::DIR_EXE, &dir_path)) {
LOG(ERROR) << "Failed to get the executable file name.";
OnPermanentError();
return;
}
desktop_binary_ = dir_path.Append(kDesktopBinaryName);
}
scoped_ptr<WtsSessionProcessDelegate> delegate(
new WtsSessionProcessDelegate(main_task_runner_, io_task_runner_,
desktop_binary_, session_id,
true, kDaemonIpcSecurityDescriptor));
launcher_.reset(new WorkerProcessLauncher(
main_task_runner_, delegate.Pass(), this));
}
|
void DesktopSessionWin::OnSessionAttached(uint32 session_id) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
DCHECK(launcher_.get() == NULL);
if (desktop_binary_.empty()) {
FilePath dir_path;
if (!PathService::Get(base::DIR_EXE, &dir_path)) {
LOG(ERROR) << "Failed to get the executable file name.";
OnPermanentError();
return;
}
desktop_binary_ = dir_path.Append(kDesktopBinaryName);
}
scoped_ptr<WtsSessionProcessDelegate> delegate(
new WtsSessionProcessDelegate(main_task_runner_, io_task_runner_,
desktop_binary_, session_id,
true, kDaemonIpcSecurityDescriptor));
launcher_.reset(new WorkerProcessLauncher(
main_task_runner_, delegate.Pass(), this));
}
|
C
|
Chrome
| 0 |
CVE-2019-1010297
|
https://www.cvedetails.com/cve/CVE-2019-1010297/
|
CWE-119
|
https://github.com/OP-TEE/optee_os/commit/a637243270fc1faae16de059091795c32d86e65e
|
a637243270fc1faae16de059091795c32d86e65e
|
svc: check for allocation overflow in crypto calls
Without checking for overflow there is a risk of allocating a buffer
with size smaller than anticipated and as a consequence of that it might
lead to a heap based overflow with attacker controlled data written
outside the boundaries of the buffer.
Fixes: OP-TEE-2018-0010: "Integer overflow in crypto system calls (x2)"
Signed-off-by: Joakim Bech <[email protected]>
Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8)
Reviewed-by: Jens Wiklander <[email protected]>
Reported-by: Riscure <[email protected]>
Reported-by: Alyssa Milburn <[email protected]>
Acked-by: Etienne Carriere <[email protected]>
|
TEE_Result syscall_asymm_verify(unsigned long state,
const struct utee_attribute *usr_params,
size_t num_params, const void *data, size_t data_len,
const void *sig, size_t sig_len)
{
TEE_Result res;
struct tee_cryp_state *cs;
struct tee_ta_session *sess;
struct tee_obj *o;
size_t hash_size;
int salt_len = 0;
TEE_Attribute *params = NULL;
uint32_t hash_algo;
struct user_ta_ctx *utc;
res = tee_ta_get_current_session(&sess);
if (res != TEE_SUCCESS)
return res;
utc = to_user_ta_ctx(sess->ctx);
res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs);
if (res != TEE_SUCCESS)
return res;
if (cs->mode != TEE_MODE_VERIFY)
return TEE_ERROR_BAD_PARAMETERS;
res = tee_mmu_check_access_rights(utc,
TEE_MEMORY_ACCESS_READ |
TEE_MEMORY_ACCESS_ANY_OWNER,
(uaddr_t)data, data_len);
if (res != TEE_SUCCESS)
return res;
res = tee_mmu_check_access_rights(utc,
TEE_MEMORY_ACCESS_READ |
TEE_MEMORY_ACCESS_ANY_OWNER,
(uaddr_t)sig, sig_len);
if (res != TEE_SUCCESS)
return res;
params = malloc(sizeof(TEE_Attribute) * num_params);
if (!params)
return TEE_ERROR_OUT_OF_MEMORY;
res = copy_in_attrs(utc, usr_params, num_params, params);
if (res != TEE_SUCCESS)
goto out;
res = tee_obj_get(utc, cs->key1, &o);
if (res != TEE_SUCCESS)
goto out;
if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) {
res = TEE_ERROR_BAD_PARAMETERS;
goto out;
}
switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) {
case TEE_MAIN_ALGO_RSA:
if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) {
hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo);
res = tee_hash_get_digest_size(hash_algo, &hash_size);
if (res != TEE_SUCCESS)
break;
if (data_len != hash_size) {
res = TEE_ERROR_BAD_PARAMETERS;
break;
}
salt_len = pkcs1_get_salt_len(params, num_params,
hash_size);
}
res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len,
data, data_len, sig,
sig_len);
break;
case TEE_MAIN_ALGO_DSA:
hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo);
res = tee_hash_get_digest_size(hash_algo, &hash_size);
if (res != TEE_SUCCESS)
break;
/*
* Depending on the DSA algorithm (NIST), the digital signature
* output size may be truncated to the size of a key pair
* (Q prime size). Q prime size must be less or equal than the
* hash output length of the hash algorithm involved.
*/
if (data_len > hash_size) {
res = TEE_ERROR_BAD_PARAMETERS;
break;
}
res = crypto_acipher_dsa_verify(cs->algo, o->attr, data,
data_len, sig, sig_len);
break;
case TEE_MAIN_ALGO_ECDSA:
res = crypto_acipher_ecc_verify(cs->algo, o->attr, data,
data_len, sig, sig_len);
break;
default:
res = TEE_ERROR_NOT_SUPPORTED;
}
out:
free(params);
return res;
}
|
TEE_Result syscall_asymm_verify(unsigned long state,
const struct utee_attribute *usr_params,
size_t num_params, const void *data, size_t data_len,
const void *sig, size_t sig_len)
{
TEE_Result res;
struct tee_cryp_state *cs;
struct tee_ta_session *sess;
struct tee_obj *o;
size_t hash_size;
int salt_len = 0;
TEE_Attribute *params = NULL;
uint32_t hash_algo;
struct user_ta_ctx *utc;
res = tee_ta_get_current_session(&sess);
if (res != TEE_SUCCESS)
return res;
utc = to_user_ta_ctx(sess->ctx);
res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs);
if (res != TEE_SUCCESS)
return res;
if (cs->mode != TEE_MODE_VERIFY)
return TEE_ERROR_BAD_PARAMETERS;
res = tee_mmu_check_access_rights(utc,
TEE_MEMORY_ACCESS_READ |
TEE_MEMORY_ACCESS_ANY_OWNER,
(uaddr_t)data, data_len);
if (res != TEE_SUCCESS)
return res;
res = tee_mmu_check_access_rights(utc,
TEE_MEMORY_ACCESS_READ |
TEE_MEMORY_ACCESS_ANY_OWNER,
(uaddr_t)sig, sig_len);
if (res != TEE_SUCCESS)
return res;
params = malloc(sizeof(TEE_Attribute) * num_params);
if (!params)
return TEE_ERROR_OUT_OF_MEMORY;
res = copy_in_attrs(utc, usr_params, num_params, params);
if (res != TEE_SUCCESS)
goto out;
res = tee_obj_get(utc, cs->key1, &o);
if (res != TEE_SUCCESS)
goto out;
if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) {
res = TEE_ERROR_BAD_PARAMETERS;
goto out;
}
switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) {
case TEE_MAIN_ALGO_RSA:
if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) {
hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo);
res = tee_hash_get_digest_size(hash_algo, &hash_size);
if (res != TEE_SUCCESS)
break;
if (data_len != hash_size) {
res = TEE_ERROR_BAD_PARAMETERS;
break;
}
salt_len = pkcs1_get_salt_len(params, num_params,
hash_size);
}
res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len,
data, data_len, sig,
sig_len);
break;
case TEE_MAIN_ALGO_DSA:
hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo);
res = tee_hash_get_digest_size(hash_algo, &hash_size);
if (res != TEE_SUCCESS)
break;
/*
* Depending on the DSA algorithm (NIST), the digital signature
* output size may be truncated to the size of a key pair
* (Q prime size). Q prime size must be less or equal than the
* hash output length of the hash algorithm involved.
*/
if (data_len > hash_size) {
res = TEE_ERROR_BAD_PARAMETERS;
break;
}
res = crypto_acipher_dsa_verify(cs->algo, o->attr, data,
data_len, sig, sig_len);
break;
case TEE_MAIN_ALGO_ECDSA:
res = crypto_acipher_ecc_verify(cs->algo, o->attr, data,
data_len, sig, sig_len);
break;
default:
res = TEE_ERROR_NOT_SUPPORTED;
}
out:
free(params);
return res;
}
|
C
|
optee_os
| 0 |
CVE-2018-18710
|
https://www.cvedetails.com/cve/CVE-2018-18710/
|
CWE-200
|
https://github.com/torvalds/linux/commit/e4f3aa2e1e67bb48dfbaaf1cad59013d5a5bc276
|
e4f3aa2e1e67bb48dfbaaf1cad59013d5a5bc276
|
cdrom: fix improper type cast, which can leat to information leak.
There is another cast from unsigned long to int which causes
a bounds check to fail with specially crafted input. The value is
then used as an index in the slot array in cdrom_slot_status().
This issue is similar to CVE-2018-16658 and CVE-2018-10940.
Signed-off-by: Young_X <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc,
unsigned int cmd)
{
struct cdrom_volctrl volctrl;
unsigned char buffer[32];
char mask[sizeof(buffer)];
unsigned short offset;
int ret;
cd_dbg(CD_DO_IOCTL, "entering CDROMVOLUME\n");
if (copy_from_user(&volctrl, (struct cdrom_volctrl __user *)arg,
sizeof(volctrl)))
return -EFAULT;
cgc->buffer = buffer;
cgc->buflen = 24;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
/* originally the code depended on buffer[1] to determine
how much data is available for transfer. buffer[1] is
unfortunately ambigious and the only reliable way seem
to be to simply skip over the block descriptor... */
offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
if (offset + 16 > sizeof(buffer))
return -E2BIG;
if (offset + 16 > cgc->buflen) {
cgc->buflen = offset + 16;
ret = cdrom_mode_sense(cdi, cgc,
GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
}
/* sanity check */
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
buffer[offset + 1] < 14)
return -EINVAL;
/* now we have the current volume settings. if it was only
a CDROMVOLREAD, return these values */
if (cmd == CDROMVOLREAD) {
volctrl.channel0 = buffer[offset+9];
volctrl.channel1 = buffer[offset+11];
volctrl.channel2 = buffer[offset+13];
volctrl.channel3 = buffer[offset+15];
if (copy_to_user((struct cdrom_volctrl __user *)arg, &volctrl,
sizeof(volctrl)))
return -EFAULT;
return 0;
}
/* get the volume mask */
cgc->buffer = mask;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
if (ret)
return ret;
buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
/* set volume */
cgc->buffer = buffer + offset - 8;
memset(cgc->buffer, 0, 8);
return cdrom_mode_select(cdi, cgc);
}
|
static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc,
unsigned int cmd)
{
struct cdrom_volctrl volctrl;
unsigned char buffer[32];
char mask[sizeof(buffer)];
unsigned short offset;
int ret;
cd_dbg(CD_DO_IOCTL, "entering CDROMVOLUME\n");
if (copy_from_user(&volctrl, (struct cdrom_volctrl __user *)arg,
sizeof(volctrl)))
return -EFAULT;
cgc->buffer = buffer;
cgc->buflen = 24;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
/* originally the code depended on buffer[1] to determine
how much data is available for transfer. buffer[1] is
unfortunately ambigious and the only reliable way seem
to be to simply skip over the block descriptor... */
offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
if (offset + 16 > sizeof(buffer))
return -E2BIG;
if (offset + 16 > cgc->buflen) {
cgc->buflen = offset + 16;
ret = cdrom_mode_sense(cdi, cgc,
GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
}
/* sanity check */
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
buffer[offset + 1] < 14)
return -EINVAL;
/* now we have the current volume settings. if it was only
a CDROMVOLREAD, return these values */
if (cmd == CDROMVOLREAD) {
volctrl.channel0 = buffer[offset+9];
volctrl.channel1 = buffer[offset+11];
volctrl.channel2 = buffer[offset+13];
volctrl.channel3 = buffer[offset+15];
if (copy_to_user((struct cdrom_volctrl __user *)arg, &volctrl,
sizeof(volctrl)))
return -EFAULT;
return 0;
}
/* get the volume mask */
cgc->buffer = mask;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
if (ret)
return ret;
buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
/* set volume */
cgc->buffer = buffer + offset - 8;
memset(cgc->buffer, 0, 8);
return cdrom_mode_select(cdi, cgc);
}
|
C
|
linux
| 0 |
CVE-2018-1000039
|
https://www.cvedetails.com/cve/CVE-2018-1000039/
|
CWE-416
|
http://git.ghostscript.com/?p=mupdf.git;a=commitdiff;h=4dcc6affe04368461310a21238f7e1871a752a05;hp=8ec561d1bccc46e9db40a9f61310cd8b3763914e
|
4dcc6affe04368461310a21238f7e1871a752a05
| null |
pdf_show_path(fz_context *ctx, pdf_run_processor *pr, int doclose, int dofill, int dostroke, int even_odd)
{
pdf_gstate *gstate = pr->gstate + pr->gtop;
fz_path *path;
fz_rect bbox;
softmask_save softmask = { NULL };
int knockout_group = 0;
if (dostroke) {
if (pr->dev->flags & (FZ_DEVFLAG_STROKECOLOR_UNDEFINED | FZ_DEVFLAG_LINEJOIN_UNDEFINED | FZ_DEVFLAG_LINEWIDTH_UNDEFINED))
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
else if (gstate->stroke_state->dash_len != 0 && pr->dev->flags & (FZ_DEVFLAG_STARTCAP_UNDEFINED | FZ_DEVFLAG_DASHCAP_UNDEFINED | FZ_DEVFLAG_ENDCAP_UNDEFINED))
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
else if (gstate->stroke_state->linejoin == FZ_LINEJOIN_MITER && (pr->dev->flags & FZ_DEVFLAG_MITERLIMIT_UNDEFINED))
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
}
if (dofill) {
if (pr->dev->flags & FZ_DEVFLAG_FILLCOLOR_UNDEFINED)
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
}
path = pr->path;
pr->path = fz_new_path(ctx);
fz_try(ctx)
{
if (doclose)
fz_closepath(ctx, path);
fz_bound_path(ctx, path, (dostroke ? gstate->stroke_state : NULL), &gstate->ctm, &bbox);
if (pr->clip)
{
gstate->clip_depth++;
fz_clip_path(ctx, pr->dev, path, pr->clip_even_odd, &gstate->ctm, &bbox);
pr->clip = 0;
}
if (pr->super.hidden)
dostroke = dofill = 0;
if (dofill || dostroke)
gstate = pdf_begin_group(ctx, pr, &bbox, &softmask);
if (dofill && dostroke)
{
/* We may need to push a knockout group */
if (gstate->stroke.alpha == 0)
{
/* No need for group, as stroke won't do anything */
}
else if (gstate->stroke.alpha == 1.0f && gstate->blendmode == FZ_BLEND_NORMAL)
{
/* No need for group, as stroke won't show up */
}
else
{
knockout_group = 1;
fz_begin_group(ctx, pr->dev, &bbox, NULL, 0, 1, FZ_BLEND_NORMAL, 1);
}
}
if (dofill)
{
switch (gstate->fill.kind)
{
case PDF_MAT_NONE:
break;
case PDF_MAT_COLOR:
fz_fill_path(ctx, pr->dev, path, even_odd, &gstate->ctm,
gstate->fill.colorspace, gstate->fill.v, gstate->fill.alpha, &gstate->fill.color_params);
break;
case PDF_MAT_PATTERN:
if (gstate->fill.pattern)
{
fz_clip_path(ctx, pr->dev, path, even_odd, &gstate->ctm, &bbox);
pdf_show_pattern(ctx, pr, gstate->fill.pattern, &pr->gstate[gstate->fill.gstate_num], &bbox, PDF_FILL);
fz_pop_clip(ctx, pr->dev);
}
break;
case PDF_MAT_SHADE:
if (gstate->fill.shade)
{
fz_clip_path(ctx, pr->dev, path, even_odd, &gstate->ctm, &bbox);
/* The cluster and page 2 of patterns.pdf shows that fz_fill_shade should NOT be called with gstate->ctm. */
fz_fill_shade(ctx, pr->dev, gstate->fill.shade, &pr->gstate[gstate->fill.gstate_num].ctm, gstate->fill.alpha, &gstate->fill.color_params);
fz_pop_clip(ctx, pr->dev);
}
break;
}
}
if (dostroke)
{
switch (gstate->stroke.kind)
{
case PDF_MAT_NONE:
break;
case PDF_MAT_COLOR:
fz_stroke_path(ctx, pr->dev, path, gstate->stroke_state, &gstate->ctm,
gstate->stroke.colorspace, gstate->stroke.v, gstate->stroke.alpha, &gstate->stroke.color_params);
break;
case PDF_MAT_PATTERN:
if (gstate->stroke.pattern)
{
fz_clip_stroke_path(ctx, pr->dev, path, gstate->stroke_state, &gstate->ctm, &bbox);
pdf_show_pattern(ctx, pr, gstate->stroke.pattern, &pr->gstate[gstate->stroke.gstate_num], &bbox, PDF_STROKE);
fz_pop_clip(ctx, pr->dev);
}
break;
case PDF_MAT_SHADE:
if (gstate->stroke.shade)
{
fz_clip_stroke_path(ctx, pr->dev, path, gstate->stroke_state, &gstate->ctm, &bbox);
fz_fill_shade(ctx, pr->dev, gstate->stroke.shade, &pr->gstate[gstate->stroke.gstate_num].ctm, gstate->stroke.alpha, &gstate->stroke.color_params);
fz_pop_clip(ctx, pr->dev);
}
break;
}
}
if (knockout_group)
fz_end_group(ctx, pr->dev);
if (dofill || dostroke)
pdf_end_group(ctx, pr, &softmask);
}
fz_always(ctx)
{
fz_drop_path(ctx, path);
}
fz_catch(ctx)
{
fz_rethrow(ctx);
}
}
|
pdf_show_path(fz_context *ctx, pdf_run_processor *pr, int doclose, int dofill, int dostroke, int even_odd)
{
pdf_gstate *gstate = pr->gstate + pr->gtop;
fz_path *path;
fz_rect bbox;
softmask_save softmask = { NULL };
int knockout_group = 0;
if (dostroke) {
if (pr->dev->flags & (FZ_DEVFLAG_STROKECOLOR_UNDEFINED | FZ_DEVFLAG_LINEJOIN_UNDEFINED | FZ_DEVFLAG_LINEWIDTH_UNDEFINED))
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
else if (gstate->stroke_state->dash_len != 0 && pr->dev->flags & (FZ_DEVFLAG_STARTCAP_UNDEFINED | FZ_DEVFLAG_DASHCAP_UNDEFINED | FZ_DEVFLAG_ENDCAP_UNDEFINED))
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
else if (gstate->stroke_state->linejoin == FZ_LINEJOIN_MITER && (pr->dev->flags & FZ_DEVFLAG_MITERLIMIT_UNDEFINED))
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
}
if (dofill) {
if (pr->dev->flags & FZ_DEVFLAG_FILLCOLOR_UNDEFINED)
pr->dev->flags |= FZ_DEVFLAG_UNCACHEABLE;
}
path = pr->path;
pr->path = fz_new_path(ctx);
fz_try(ctx)
{
if (doclose)
fz_closepath(ctx, path);
fz_bound_path(ctx, path, (dostroke ? gstate->stroke_state : NULL), &gstate->ctm, &bbox);
if (pr->clip)
{
gstate->clip_depth++;
fz_clip_path(ctx, pr->dev, path, pr->clip_even_odd, &gstate->ctm, &bbox);
pr->clip = 0;
}
if (pr->super.hidden)
dostroke = dofill = 0;
if (dofill || dostroke)
gstate = pdf_begin_group(ctx, pr, &bbox, &softmask);
if (dofill && dostroke)
{
/* We may need to push a knockout group */
if (gstate->stroke.alpha == 0)
{
/* No need for group, as stroke won't do anything */
}
else if (gstate->stroke.alpha == 1.0f && gstate->blendmode == FZ_BLEND_NORMAL)
{
/* No need for group, as stroke won't show up */
}
else
{
knockout_group = 1;
fz_begin_group(ctx, pr->dev, &bbox, NULL, 0, 1, FZ_BLEND_NORMAL, 1);
}
}
if (dofill)
{
switch (gstate->fill.kind)
{
case PDF_MAT_NONE:
break;
case PDF_MAT_COLOR:
fz_fill_path(ctx, pr->dev, path, even_odd, &gstate->ctm,
gstate->fill.colorspace, gstate->fill.v, gstate->fill.alpha, &gstate->fill.color_params);
break;
case PDF_MAT_PATTERN:
if (gstate->fill.pattern)
{
fz_clip_path(ctx, pr->dev, path, even_odd, &gstate->ctm, &bbox);
pdf_show_pattern(ctx, pr, gstate->fill.pattern, &pr->gstate[gstate->fill.gstate_num], &bbox, PDF_FILL);
fz_pop_clip(ctx, pr->dev);
}
break;
case PDF_MAT_SHADE:
if (gstate->fill.shade)
{
fz_clip_path(ctx, pr->dev, path, even_odd, &gstate->ctm, &bbox);
/* The cluster and page 2 of patterns.pdf shows that fz_fill_shade should NOT be called with gstate->ctm. */
fz_fill_shade(ctx, pr->dev, gstate->fill.shade, &pr->gstate[gstate->fill.gstate_num].ctm, gstate->fill.alpha, &gstate->fill.color_params);
fz_pop_clip(ctx, pr->dev);
}
break;
}
}
if (dostroke)
{
switch (gstate->stroke.kind)
{
case PDF_MAT_NONE:
break;
case PDF_MAT_COLOR:
fz_stroke_path(ctx, pr->dev, path, gstate->stroke_state, &gstate->ctm,
gstate->stroke.colorspace, gstate->stroke.v, gstate->stroke.alpha, &gstate->stroke.color_params);
break;
case PDF_MAT_PATTERN:
if (gstate->stroke.pattern)
{
fz_clip_stroke_path(ctx, pr->dev, path, gstate->stroke_state, &gstate->ctm, &bbox);
pdf_show_pattern(ctx, pr, gstate->stroke.pattern, &pr->gstate[gstate->stroke.gstate_num], &bbox, PDF_STROKE);
fz_pop_clip(ctx, pr->dev);
}
break;
case PDF_MAT_SHADE:
if (gstate->stroke.shade)
{
fz_clip_stroke_path(ctx, pr->dev, path, gstate->stroke_state, &gstate->ctm, &bbox);
fz_fill_shade(ctx, pr->dev, gstate->stroke.shade, &pr->gstate[gstate->stroke.gstate_num].ctm, gstate->stroke.alpha, &gstate->stroke.color_params);
fz_pop_clip(ctx, pr->dev);
}
break;
}
}
if (knockout_group)
fz_end_group(ctx, pr->dev);
if (dofill || dostroke)
pdf_end_group(ctx, pr, &softmask);
}
fz_always(ctx)
{
fz_drop_path(ctx, path);
}
fz_catch(ctx)
{
fz_rethrow(ctx);
}
}
|
C
|
ghostscript
| 0 |
CVE-2013-3235
|
https://www.cvedetails.com/cve/CVE-2013-3235/
|
CWE-200
|
https://github.com/torvalds/linux/commit/60085c3d009b0df252547adb336d1ccca5ce52ec
|
60085c3d009b0df252547adb336d1ccca5ce52ec
|
tipc: fix info leaks via msg_name in recv_msg/recv_stream
The code in set_orig_addr() does not initialize all of the members of
struct sockaddr_tipc when filling the sockaddr info -- namely the union
is only partly filled. This will make recv_msg() and recv_stream() --
the only users of this function -- leak kernel stack memory as the
msg_name member is a local variable in net/socket.c.
Additionally to that both recv_msg() and recv_stream() fail to update
the msg_namelen member to 0 while otherwise returning with 0, i.e.
"success". This is the case for, e.g., non-blocking sockets. This will
lead to a 128 byte kernel stack leak in net/socket.c.
Fix the first issue by initializing the memory of the union with
memset(0). Fix the second one by setting msg_namelen to 0 early as it
will be updated later if we're going to fill the msg_name member.
Cc: Jon Maloy <[email protected]>
Cc: Allan Stephens <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int send_msg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
int needs_conn;
long timeout_val;
int res = -EINVAL;
if (unlikely(!dest))
return -EDESTADDRREQ;
if (unlikely((m->msg_namelen < sizeof(*dest)) ||
(dest->family != AF_TIPC)))
return -EINVAL;
if (total_len > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
if (iocb)
lock_sock(sk);
needs_conn = (sock->state != SS_READY);
if (unlikely(needs_conn)) {
if (sock->state == SS_LISTENING) {
res = -EPIPE;
goto exit;
}
if (sock->state != SS_UNCONNECTED) {
res = -EISCONN;
goto exit;
}
if ((tport->published) ||
((sock->type == SOCK_STREAM) && (total_len != 0))) {
res = -EOPNOTSUPP;
goto exit;
}
if (dest->addrtype == TIPC_ADDR_NAME) {
tport->conn_type = dest->addr.name.name.type;
tport->conn_instance = dest->addr.name.name.instance;
}
/* Abort any pending connection attempts (very unlikely) */
reject_rx_queue(sk);
}
timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_send2name(tport->ref,
&dest->addr.name.name,
dest->addr.name.domain,
m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_ID) {
res = tipc_send2port(tport->ref,
&dest->addr.id,
m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_MCAST) {
if (needs_conn) {
res = -EOPNOTSUPP;
break;
}
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_multicast(tport->ref,
&dest->addr.nameseq,
m->msg_iovlen,
m->msg_iov,
total_len);
}
if (likely(res != -ELINKCONG)) {
if (needs_conn && (res >= 0))
sock->state = SS_CONNECTING;
break;
}
if (timeout_val <= 0L) {
res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
!tport->congested, timeout_val);
lock_sock(sk);
} while (1);
exit:
if (iocb)
release_sock(sk);
return res;
}
|
static int send_msg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
int needs_conn;
long timeout_val;
int res = -EINVAL;
if (unlikely(!dest))
return -EDESTADDRREQ;
if (unlikely((m->msg_namelen < sizeof(*dest)) ||
(dest->family != AF_TIPC)))
return -EINVAL;
if (total_len > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
if (iocb)
lock_sock(sk);
needs_conn = (sock->state != SS_READY);
if (unlikely(needs_conn)) {
if (sock->state == SS_LISTENING) {
res = -EPIPE;
goto exit;
}
if (sock->state != SS_UNCONNECTED) {
res = -EISCONN;
goto exit;
}
if ((tport->published) ||
((sock->type == SOCK_STREAM) && (total_len != 0))) {
res = -EOPNOTSUPP;
goto exit;
}
if (dest->addrtype == TIPC_ADDR_NAME) {
tport->conn_type = dest->addr.name.name.type;
tport->conn_instance = dest->addr.name.name.instance;
}
/* Abort any pending connection attempts (very unlikely) */
reject_rx_queue(sk);
}
timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_send2name(tport->ref,
&dest->addr.name.name,
dest->addr.name.domain,
m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_ID) {
res = tipc_send2port(tport->ref,
&dest->addr.id,
m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_MCAST) {
if (needs_conn) {
res = -EOPNOTSUPP;
break;
}
res = dest_name_check(dest, m);
if (res)
break;
res = tipc_multicast(tport->ref,
&dest->addr.nameseq,
m->msg_iovlen,
m->msg_iov,
total_len);
}
if (likely(res != -ELINKCONG)) {
if (needs_conn && (res >= 0))
sock->state = SS_CONNECTING;
break;
}
if (timeout_val <= 0L) {
res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
!tport->congested, timeout_val);
lock_sock(sk);
} while (1);
exit:
if (iocb)
release_sock(sk);
return res;
}
|
C
|
linux
| 0 |
CVE-2013-2017
|
https://www.cvedetails.com/cve/CVE-2013-2017/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6ec82562ffc6f297d0de36d65776cff8e5704867
|
6ec82562ffc6f297d0de36d65776cff8e5704867
|
veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void __hw_addr_init(struct netdev_hw_addr_list *list)
{
INIT_LIST_HEAD(&list->list);
list->count = 0;
}
|
static void __hw_addr_init(struct netdev_hw_addr_list *list)
{
INIT_LIST_HEAD(&list->list);
list->count = 0;
}
|
C
|
linux
| 0 |
CVE-2014-3690
|
https://www.cvedetails.com/cve/CVE-2014-3690/
|
CWE-399
|
https://github.com/torvalds/linux/commit/d974baa398f34393db76be45f7d4d04fbdbb4a0a
|
d974baa398f34393db76be45f7d4d04fbdbb4a0a
|
x86,kvm,vmx: Preserve CR4 across VM entry
CR4 isn't constant; at least the TSD and PCE bits can vary.
TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
like it's correct.
This adds a branch and a read from cr4 to each vm entry. Because it is
extremely likely that consecutive entries into the same vcpu will have
the same host cr4 value, this fixes up the vmcs instead of restoring cr4
after the fact. A subsequent patch will add a kernel-wide cr4 shadow,
reducing the overhead in the common case to just two memory reads and a
branch.
Signed-off-by: Andy Lutomirski <[email protected]>
Acked-by: Paolo Bonzini <[email protected]>
Cc: [email protected]
Cc: Petr Matousek <[email protected]>
Cc: Gleb Natapov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
get_debugreg(vcpu->arch.db[0], 0);
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
get_debugreg(vcpu->arch.dr6, 6);
vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}
|
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
get_debugreg(vcpu->arch.db[0], 0);
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
get_debugreg(vcpu->arch.dr6, 6);
vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}
|
C
|
linux
| 0 |
CVE-2014-8481
|
https://www.cvedetails.com/cve/CVE-2014-8481/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a430c9166312e1aa3d80bce32374233bdbfeba32
|
a430c9166312e1aa3d80bce32374233bdbfeba32
|
KVM: emulate: avoid accessing NULL ctxt->memopp
A failure to decode the instruction can cause a NULL pointer access.
This is fixed simply by moving the "done" label as close as possible
to the return.
This fixes CVE-2014-8481.
Reported-by: Andy Lutomirski <[email protected]>
Cc: [email protected]
Fixes: 41061cdb98a0bec464278b4db8e894a3121671f5
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
|
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
|
C
|
linux
| 0 |
CVE-2011-3055
|
https://www.cvedetails.com/cve/CVE-2011-3055/
| null |
https://github.com/chromium/chromium/commit/e9372a1bfd3588a80fcf49aa07321f0971dd6091
|
e9372a1bfd3588a80fcf49aa07321f0971dd6091
|
[V8] Pass Isolate to throwNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=86983
Reviewed by Adam Barth.
The objective is to pass Isolate around in V8 bindings.
This patch passes Isolate to throwNotEnoughArgumentsError().
No tests. No change in behavior.
* bindings/scripts/CodeGeneratorV8.pm:
(GenerateArgumentsCountCheck):
(GenerateEventConstructorCallback):
* bindings/scripts/test/V8/V8Float64Array.cpp:
(WebCore::Float64ArrayV8Internal::fooCallback):
* bindings/scripts/test/V8/V8TestActiveDOMObject.cpp:
(WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback):
(WebCore::TestActiveDOMObjectV8Internal::postMessageCallback):
* bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp:
(WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback):
* bindings/scripts/test/V8/V8TestEventConstructor.cpp:
(WebCore::V8TestEventConstructor::constructorCallback):
* bindings/scripts/test/V8/V8TestEventTarget.cpp:
(WebCore::TestEventTargetV8Internal::itemCallback):
(WebCore::TestEventTargetV8Internal::dispatchEventCallback):
* bindings/scripts/test/V8/V8TestInterface.cpp:
(WebCore::TestInterfaceV8Internal::supplementalMethod2Callback):
(WebCore::V8TestInterface::constructorCallback):
* bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp:
(WebCore::TestMediaQueryListListenerV8Internal::methodCallback):
* bindings/scripts/test/V8/V8TestNamedConstructor.cpp:
(WebCore::V8TestNamedConstructorConstructorCallback):
* bindings/scripts/test/V8/V8TestObj.cpp:
(WebCore::TestObjV8Internal::voidMethodWithArgsCallback):
(WebCore::TestObjV8Internal::intMethodWithArgsCallback):
(WebCore::TestObjV8Internal::objMethodWithArgsCallback):
(WebCore::TestObjV8Internal::methodWithSequenceArgCallback):
(WebCore::TestObjV8Internal::methodReturningSequenceCallback):
(WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback):
(WebCore::TestObjV8Internal::serializedValueCallback):
(WebCore::TestObjV8Internal::idbKeyCallback):
(WebCore::TestObjV8Internal::optionsObjectCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback):
(WebCore::TestObjV8Internal::methodWithCallbackArgCallback):
(WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback):
(WebCore::TestObjV8Internal::overloadedMethod1Callback):
(WebCore::TestObjV8Internal::overloadedMethod2Callback):
(WebCore::TestObjV8Internal::overloadedMethod3Callback):
(WebCore::TestObjV8Internal::overloadedMethod4Callback):
(WebCore::TestObjV8Internal::overloadedMethod5Callback):
(WebCore::TestObjV8Internal::overloadedMethod6Callback):
(WebCore::TestObjV8Internal::overloadedMethod7Callback):
(WebCore::TestObjV8Internal::overloadedMethod11Callback):
(WebCore::TestObjV8Internal::overloadedMethod12Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback):
(WebCore::TestObjV8Internal::convert1Callback):
(WebCore::TestObjV8Internal::convert2Callback):
(WebCore::TestObjV8Internal::convert3Callback):
(WebCore::TestObjV8Internal::convert4Callback):
(WebCore::TestObjV8Internal::convert5Callback):
(WebCore::TestObjV8Internal::strictFunctionCallback):
(WebCore::V8TestObj::constructorCallback):
* bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp:
(WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback):
(WebCore::V8TestSerializedScriptValueInterface::constructorCallback):
* bindings/v8/ScriptController.cpp:
(WebCore::setValueAndClosePopupCallback):
* bindings/v8/V8Proxy.cpp:
(WebCore::V8Proxy::throwNotEnoughArgumentsError):
* bindings/v8/V8Proxy.h:
(V8Proxy):
* bindings/v8/custom/V8AudioContextCustom.cpp:
(WebCore::V8AudioContext::constructorCallback):
* bindings/v8/custom/V8DataViewCustom.cpp:
(WebCore::V8DataView::getInt8Callback):
(WebCore::V8DataView::getUint8Callback):
(WebCore::V8DataView::setInt8Callback):
(WebCore::V8DataView::setUint8Callback):
* bindings/v8/custom/V8DirectoryEntryCustom.cpp:
(WebCore::V8DirectoryEntry::getDirectoryCallback):
(WebCore::V8DirectoryEntry::getFileCallback):
* bindings/v8/custom/V8IntentConstructor.cpp:
(WebCore::V8Intent::constructorCallback):
* bindings/v8/custom/V8SVGLengthCustom.cpp:
(WebCore::V8SVGLength::convertToSpecifiedUnitsCallback):
* bindings/v8/custom/V8WebGLRenderingContextCustom.cpp:
(WebCore::getObjectParameter):
(WebCore::V8WebGLRenderingContext::getAttachedShadersCallback):
(WebCore::V8WebGLRenderingContext::getExtensionCallback):
(WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback):
(WebCore::V8WebGLRenderingContext::getParameterCallback):
(WebCore::V8WebGLRenderingContext::getProgramParameterCallback):
(WebCore::V8WebGLRenderingContext::getShaderParameterCallback):
(WebCore::V8WebGLRenderingContext::getUniformCallback):
(WebCore::vertexAttribAndUniformHelperf):
(WebCore::uniformHelperi):
(WebCore::uniformMatrixHelper):
* bindings/v8/custom/V8WebKitMutationObserverCustom.cpp:
(WebCore::V8WebKitMutationObserver::constructorCallback):
(WebCore::V8WebKitMutationObserver::observeCallback):
* bindings/v8/custom/V8WebSocketCustom.cpp:
(WebCore::V8WebSocket::constructorCallback):
(WebCore::V8WebSocket::sendCallback):
* bindings/v8/custom/V8XMLHttpRequestCustom.cpp:
(WebCore::V8XMLHttpRequest::openCallback):
git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
v8::Handle<v8::Value> V8WebGLRenderingContext::vertexAttrib4fvCallback(const v8::Arguments& args)
{
INC_STATS("DOM.WebGLRenderingContext.vertexAttrib4fv()");
return vertexAttribAndUniformHelperf(args, kVertexAttrib4v);
}
|
v8::Handle<v8::Value> V8WebGLRenderingContext::vertexAttrib4fvCallback(const v8::Arguments& args)
{
INC_STATS("DOM.WebGLRenderingContext.vertexAttrib4fv()");
return vertexAttribAndUniformHelperf(args, kVertexAttrib4v);
}
|
C
|
Chrome
| 0 |
CVE-2018-0429
|
https://www.cvedetails.com/cve/CVE-2018-0429/
|
CWE-119
|
https://github.com/cisco/thor/commit/18de8f9f0762c3a542b1122589edb8af859d9813
|
18de8f9f0762c3a542b1122589edb8af859d9813
|
Fix possible stack overflows in decoder for illegal bit streams
Fixes CVE-2018-0429
A vulnerability in the Thor decoder (available at:
https://github.com/cisco/thor) could allow an authenticated, local
attacker to cause segmentation faults and stack overflows when using a
non-conformant Thor bitstream as input.
The vulnerability is due to lack of input validation when parsing the
bitstream. A successful exploit could allow the attacker to cause a
stack overflow and potentially inject and execute arbitrary code.
|
void read_frame_header(decoder_info_t *dec_info, stream_t *stream) {
frame_info_t *frame_info = &dec_info->frame_info;
frame_info->frame_type = get_flc(1, stream);
frame_info->qp = get_flc(8, stream);
frame_info->num_intra_modes = get_flc(4, stream);
if (frame_info->frame_type != I_FRAME) {
frame_info->num_ref = get_flc(2, stream) + 1;
int r;
for (r = 0; r < frame_info->num_ref; r++) {
frame_info->ref_array[r] = get_flc(6, stream) - 1;
}
if (frame_info->num_ref == 2 && frame_info->ref_array[0] == -1) {
frame_info->ref_array[frame_info->num_ref++] = get_flc(5, stream) - 1;
}
}
else {
frame_info->num_ref = 0;
}
frame_info->display_frame_num = get_flc(16, stream);
#if CDEF
dec_info->cdef_damping[1] = dec_info->cdef_damping[0] = get_flc(2, stream) + 3;
dec_info->cdef_bits = get_flc(2, stream);
for (int i = 0; i < (1 << dec_info->cdef_bits); i++) {
dec_info->cdef_presets[i].pri_strength[0] = get_flc(4, stream);
dec_info->cdef_presets[i].skip_condition[0] = get_flc(1, stream);
dec_info->cdef_presets[i].sec_strength[0] = get_flc(2, stream);
if (dec_info->subsample != 400) {
dec_info->cdef_presets[i].pri_strength[1] = get_flc(4, stream);
dec_info->cdef_presets[i].skip_condition[1] = get_flc(1, stream);
dec_info->cdef_presets[i].sec_strength[1] = get_flc(2, stream);
}
}
#endif
}
|
void read_frame_header(decoder_info_t *dec_info, stream_t *stream) {
frame_info_t *frame_info = &dec_info->frame_info;
frame_info->frame_type = get_flc(1, stream);
frame_info->qp = get_flc(8, stream);
frame_info->num_intra_modes = get_flc(4, stream);
if (frame_info->frame_type != I_FRAME) {
frame_info->num_ref = get_flc(2, stream) + 1;
int r;
for (r = 0; r < frame_info->num_ref; r++) {
frame_info->ref_array[r] = get_flc(6, stream) - 1;
}
if (frame_info->num_ref == 2 && frame_info->ref_array[0] == -1) {
frame_info->ref_array[frame_info->num_ref++] = get_flc(5, stream) - 1;
}
}
else {
frame_info->num_ref = 0;
}
frame_info->display_frame_num = get_flc(16, stream);
#if CDEF
dec_info->cdef_damping[1] = dec_info->cdef_damping[0] = get_flc(2, stream) + 3;
dec_info->cdef_bits = get_flc(2, stream);
for (int i = 0; i < (1 << dec_info->cdef_bits); i++) {
dec_info->cdef_presets[i].pri_strength[0] = get_flc(4, stream);
dec_info->cdef_presets[i].skip_condition[0] = get_flc(1, stream);
dec_info->cdef_presets[i].sec_strength[0] = get_flc(2, stream);
if (dec_info->subsample != 400) {
dec_info->cdef_presets[i].pri_strength[1] = get_flc(4, stream);
dec_info->cdef_presets[i].skip_condition[1] = get_flc(1, stream);
dec_info->cdef_presets[i].sec_strength[1] = get_flc(2, stream);
}
}
#endif
}
|
C
|
thor
| 0 |
CVE-2013-6663
|
https://www.cvedetails.com/cve/CVE-2013-6663/
|
CWE-399
|
https://github.com/chromium/chromium/commit/c21d7ac13d69cbadbbb5b2dc147be1933d52147a
|
c21d7ac13d69cbadbbb5b2dc147be1933d52147a
|
Use the host coordinate when comparing to host window bounds.
I somehow overlooked this and the test was not strict enough to catch this.
BUG=521919
TEST=Updated ScreenPositionControllerTest.ConvertHostPointToScreenHiDPI so that it fails without the patch.
Review URL: https://codereview.chromium.org/1293373002
Cr-Commit-Position: refs/heads/master@{#344186}
|
void ScreenPositionController::ConvertHostPointToRelativeToRootWindow(
aura::Window* root_window,
const aura::Window::Windows& root_windows,
gfx::Point* point,
aura::Window** target_root) {
DCHECK(!root_window->parent());
gfx::Point point_in_root(*point);
root_window->GetHost()->ConvertPointFromHost(&point_in_root);
#if defined(USE_X11) || defined(USE_OZONE)
gfx::Rect host_bounds(root_window->GetHost()->GetBounds().size());
if (!host_bounds.Contains(*point)) {
gfx::Point location_in_native(point_in_root);
root_window->GetHost()->ConvertPointToNativeScreen(&location_in_native);
for (size_t i = 0; i < root_windows.size(); ++i) {
aura::WindowTreeHost* host = root_windows[i]->GetHost();
const gfx::Rect native_bounds = host->GetBounds();
if (native_bounds.Contains(location_in_native)) {
*target_root = root_windows[i];
*point = location_in_native;
host->ConvertPointFromNativeScreen(point);
return;
}
}
}
#endif
*target_root = root_window;
*point = point_in_root;
}
|
void ScreenPositionController::ConvertHostPointToRelativeToRootWindow(
aura::Window* root_window,
const aura::Window::Windows& root_windows,
gfx::Point* point,
aura::Window** target_root) {
DCHECK(!root_window->parent());
gfx::Point point_in_root(*point);
root_window->GetHost()->ConvertPointFromHost(&point_in_root);
*target_root = root_window;
*point = point_in_root;
#if defined(USE_X11) || defined(USE_OZONE)
if (!root_window->GetHost()->GetBounds().Contains(*point)) {
gfx::Point location_in_native(point_in_root);
root_window->GetHost()->ConvertPointToNativeScreen(&location_in_native);
for (size_t i = 0; i < root_windows.size(); ++i) {
aura::WindowTreeHost* host = root_windows[i]->GetHost();
const gfx::Rect native_bounds = host->GetBounds();
if (native_bounds.Contains(location_in_native)) {
*target_root = root_windows[i];
*point = location_in_native;
host->ConvertPointFromNativeScreen(point);
break;
}
}
}
#else
NOTIMPLEMENTED();
#endif
}
|
C
|
Chrome
| 1 |
CVE-2012-3412
|
https://www.cvedetails.com/cve/CVE-2012-3412/
|
CWE-189
|
https://github.com/torvalds/linux/commit/68cb695ccecf949d48949e72f8ce591fdaaa325c
|
68cb695ccecf949d48949e72f8ce591fdaaa325c
|
sfc: Fix maximum number of TSO segments and minimum TX queue size
[ Upstream commit 7e6d06f0de3f74ca929441add094518ae332257c ]
Currently an skb requiring TSO may not fit within a minimum-size TX
queue. The TX queue selected for the skb may stall and trigger the TX
watchdog repeatedly (since the problem skb will be retried after the
TX reset). This issue is designated as CVE-2012-3412.
Set the maximum number of TSO segments for our devices to 100. This
should make no difference to behaviour unless the actual MSS is less
than about 700. Increase the minimum TX queue size accordingly to
allow for 2 worst-case skbs, so that there will definitely be space
to add an skb after we wake a queue.
To avoid invalidating existing configurations, change
efx_ethtool_set_ringparam() to fix up values that are too small rather
than returning -EINVAL.
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
|
static int efx_probe_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
/* Restart special buffer allocation */
efx->next_buffer_table = 0;
efx_for_each_channel(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create channel %d\n",
channel->channel);
goto fail;
}
}
efx_set_channel_names(efx);
return 0;
fail:
efx_remove_channels(efx);
return rc;
}
|
static int efx_probe_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
/* Restart special buffer allocation */
efx->next_buffer_table = 0;
efx_for_each_channel(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create channel %d\n",
channel->channel);
goto fail;
}
}
efx_set_channel_names(efx);
return 0;
fail:
efx_remove_channels(efx);
return rc;
}
|
C
|
linux
| 0 |
CVE-2018-20856
|
https://www.cvedetails.com/cve/CVE-2018-20856/
|
CWE-416
|
https://github.com/torvalds/linux/commit/54648cf1ec2d7f4b6a71767799c45676a138ca24
|
54648cf1ec2d7f4b6a71767799c45676a138ca24
|
block: blk_init_allocated_queue() set q->fq as NULL in the fail case
We find the memory use-after-free issue in __blk_drain_queue()
on the kernel 4.14. After read the latest kernel 4.18-rc6 we
think it has the same problem.
Memory is allocated for q->fq in the blk_init_allocated_queue().
If the elevator init function called with error return, it will
run into the fail case to free the q->fq.
Then the __blk_drain_queue() uses the same memory after the free
of the q->fq, it will lead to the unpredictable event.
The patch is to set q->fq as NULL in the fail case of
blk_init_allocated_queue().
Fixes: commit 7c94e1c157a2 ("block: introduce blk_flush_queue to drive flush machinery")
Cc: <[email protected]>
Reviewed-by: Ming Lei <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Signed-off-by: xiao jin <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
blk_qc_t generic_make_request(struct bio *bio)
{
/*
* bio_list_on_stack[0] contains bios submitted by the current
* make_request_fn.
* bio_list_on_stack[1] contains bios that were submitted before
* the current make_request_fn, but that haven't been processed
* yet.
*/
struct bio_list bio_list_on_stack[2];
blk_mq_req_flags_t flags = 0;
struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (bio_flagged(bio, BIO_QUEUE_ENTERED))
blk_queue_enter_live(q);
else if (blk_queue_enter(q, flags) < 0) {
if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
bio_io_error(bio);
return ret;
}
if (!generic_make_request_checks(bio))
goto out;
/*
* We only want one ->make_request_fn to be active at a time, else
* stack usage with stacked devices could be a problem. So use
* current->bio_list to keep a list of requests submited by a
* make_request_fn function. current->bio_list is also used as a
* flag to say if generic_make_request is currently active in this
* task or not. If it is NULL, then no make_request is active. If
* it is non-NULL, then a make_request is active, and new requests
* should be added at the tail
*/
if (current->bio_list) {
bio_list_add(¤t->bio_list[0], bio);
goto out;
}
/* following loop may be a bit non-obvious, and so deserves some
* explanation.
* Before entering the loop, bio->bi_next is NULL (as all callers
* ensure that) so we have a list with a single bio.
* We pretend that we have just taken it off a longer list, so
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
* added. ->make_request() may indeed add some more bios
* through a recursive call to generic_make_request. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so remove it from
* bio_list, and call into ->make_request() again.
*/
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
bool enter_succeeded = true;
if (unlikely(q != bio->bi_disk->queue)) {
if (q)
blk_queue_exit(q);
q = bio->bi_disk->queue;
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (blk_queue_enter(q, flags) < 0) {
enter_succeeded = false;
q = NULL;
}
}
if (enter_succeeded) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
/* sort new bios into those for a lower level
* and those for the same level
*/
bio_list_init(&lower);
bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
if (q == bio->bi_disk->queue)
bio_list_add(&same, bio);
else
bio_list_add(&lower, bio);
/* now assemble so we handle the lowest level first */
bio_list_merge(&bio_list_on_stack[0], &lower);
bio_list_merge(&bio_list_on_stack[0], &same);
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} else {
if (unlikely(!blk_queue_dying(q) &&
(bio->bi_opf & REQ_NOWAIT)))
bio_wouldblock_error(bio);
else
bio_io_error(bio);
}
bio = bio_list_pop(&bio_list_on_stack[0]);
} while (bio);
current->bio_list = NULL; /* deactivate */
out:
if (q)
blk_queue_exit(q);
return ret;
}
|
blk_qc_t generic_make_request(struct bio *bio)
{
/*
* bio_list_on_stack[0] contains bios submitted by the current
* make_request_fn.
* bio_list_on_stack[1] contains bios that were submitted before
* the current make_request_fn, but that haven't been processed
* yet.
*/
struct bio_list bio_list_on_stack[2];
blk_mq_req_flags_t flags = 0;
struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (bio_flagged(bio, BIO_QUEUE_ENTERED))
blk_queue_enter_live(q);
else if (blk_queue_enter(q, flags) < 0) {
if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
bio_io_error(bio);
return ret;
}
if (!generic_make_request_checks(bio))
goto out;
/*
* We only want one ->make_request_fn to be active at a time, else
* stack usage with stacked devices could be a problem. So use
* current->bio_list to keep a list of requests submited by a
* make_request_fn function. current->bio_list is also used as a
* flag to say if generic_make_request is currently active in this
* task or not. If it is NULL, then no make_request is active. If
* it is non-NULL, then a make_request is active, and new requests
* should be added at the tail
*/
if (current->bio_list) {
bio_list_add(¤t->bio_list[0], bio);
goto out;
}
/* following loop may be a bit non-obvious, and so deserves some
* explanation.
* Before entering the loop, bio->bi_next is NULL (as all callers
* ensure that) so we have a list with a single bio.
* We pretend that we have just taken it off a longer list, so
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
* added. ->make_request() may indeed add some more bios
* through a recursive call to generic_make_request. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so remove it from
* bio_list, and call into ->make_request() again.
*/
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
bool enter_succeeded = true;
if (unlikely(q != bio->bi_disk->queue)) {
if (q)
blk_queue_exit(q);
q = bio->bi_disk->queue;
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (blk_queue_enter(q, flags) < 0) {
enter_succeeded = false;
q = NULL;
}
}
if (enter_succeeded) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
/* sort new bios into those for a lower level
* and those for the same level
*/
bio_list_init(&lower);
bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
if (q == bio->bi_disk->queue)
bio_list_add(&same, bio);
else
bio_list_add(&lower, bio);
/* now assemble so we handle the lowest level first */
bio_list_merge(&bio_list_on_stack[0], &lower);
bio_list_merge(&bio_list_on_stack[0], &same);
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} else {
if (unlikely(!blk_queue_dying(q) &&
(bio->bi_opf & REQ_NOWAIT)))
bio_wouldblock_error(bio);
else
bio_io_error(bio);
}
bio = bio_list_pop(&bio_list_on_stack[0]);
} while (bio);
current->bio_list = NULL; /* deactivate */
out:
if (q)
blk_queue_exit(q);
return ret;
}
|
C
|
linux
| 0 |
CVE-2015-6761
|
https://www.cvedetails.com/cve/CVE-2015-6761/
|
CWE-362
|
https://github.com/chromium/chromium/commit/fd506b0ac6c7846ae45b5034044fe85c28ee68ac
|
fd506b0ac6c7846ae45b5034044fe85c28ee68ac
|
Fix detach with open()ed document leaving parent loading indefinitely
Change-Id: I26c2a054b9f1e5eb076acd677e1223058825f6d6
Bug: 803416
Test: fast/loader/document-open-iframe-then-detach.html
Change-Id: I26c2a054b9f1e5eb076acd677e1223058825f6d6
Reviewed-on: https://chromium-review.googlesource.com/887298
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Nate Chapin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#532967}
|
void DocumentLoader::DidCommitNavigation(
WebGlobalObjectReusePolicy global_object_reuse_policy) {
if (GetFrameLoader().StateMachine()->CreatingInitialEmptyDocument())
return;
if (!frame_->Loader().StateMachine()->CommittedMultipleRealLoads() &&
load_type_ == kFrameLoadTypeStandard) {
frame_->Loader().StateMachine()->AdvanceTo(
FrameLoaderStateMachine::kCommittedMultipleRealLoads);
}
HistoryCommitType commit_type = LoadTypeToCommitType(load_type_);
frame_->FrameScheduler()->DidCommitProvisionalLoad(
commit_type == kHistoryInertCommit, load_type_ == kFrameLoadTypeReload,
frame_->IsLocalRoot());
GetLocalFrameClient().DispatchDidCommitLoad(history_item_.Get(), commit_type,
global_object_reuse_policy);
frame_->GetSecurityContext()
->GetContentSecurityPolicy()
->ReportAccumulatedHeaders(&GetLocalFrameClient());
if (service_worker_network_provider_ &&
service_worker_network_provider_->HasControllerServiceWorker()) {
GetLocalFrameClient().DidObserveLoadingBehavior(
kWebLoadingBehaviorServiceWorkerControlled);
}
DispatchLinkHeaderPreloads(nullptr, LinkLoader::kOnlyLoadNonMedia);
Document* document = frame_->GetDocument();
InteractiveDetector* interactive_detector =
InteractiveDetector::From(*document);
if (interactive_detector)
interactive_detector->SetNavigationStartTime(GetTiming().NavigationStart());
TRACE_EVENT1("devtools.timeline", "CommitLoad", "data",
InspectorCommitLoadEvent::Data(frame_));
probe::didCommitLoad(frame_, this);
frame_->GetPage()->DidCommitLoad(frame_);
if (response_.IsLegacySymantecCert()) {
GetLocalFrameClient().ReportLegacySymantecCert(response_.Url());
}
}
|
void DocumentLoader::DidCommitNavigation(
WebGlobalObjectReusePolicy global_object_reuse_policy) {
if (GetFrameLoader().StateMachine()->CreatingInitialEmptyDocument())
return;
if (!frame_->Loader().StateMachine()->CommittedMultipleRealLoads() &&
load_type_ == kFrameLoadTypeStandard) {
frame_->Loader().StateMachine()->AdvanceTo(
FrameLoaderStateMachine::kCommittedMultipleRealLoads);
}
HistoryCommitType commit_type = LoadTypeToCommitType(load_type_);
frame_->FrameScheduler()->DidCommitProvisionalLoad(
commit_type == kHistoryInertCommit, load_type_ == kFrameLoadTypeReload,
frame_->IsLocalRoot());
GetLocalFrameClient().DispatchDidCommitLoad(history_item_.Get(), commit_type,
global_object_reuse_policy);
frame_->GetSecurityContext()
->GetContentSecurityPolicy()
->ReportAccumulatedHeaders(&GetLocalFrameClient());
if (service_worker_network_provider_ &&
service_worker_network_provider_->HasControllerServiceWorker()) {
GetLocalFrameClient().DidObserveLoadingBehavior(
kWebLoadingBehaviorServiceWorkerControlled);
}
DispatchLinkHeaderPreloads(nullptr, LinkLoader::kOnlyLoadNonMedia);
Document* document = frame_->GetDocument();
InteractiveDetector* interactive_detector =
InteractiveDetector::From(*document);
if (interactive_detector)
interactive_detector->SetNavigationStartTime(GetTiming().NavigationStart());
TRACE_EVENT1("devtools.timeline", "CommitLoad", "data",
InspectorCommitLoadEvent::Data(frame_));
probe::didCommitLoad(frame_, this);
frame_->GetPage()->DidCommitLoad(frame_);
if (response_.IsLegacySymantecCert()) {
GetLocalFrameClient().ReportLegacySymantecCert(response_.Url());
}
}
|
C
|
Chrome
| 0 |
CVE-2011-4080
|
https://www.cvedetails.com/cve/CVE-2011-4080/
|
CWE-264
|
https://github.com/torvalds/linux/commit/bfdc0b497faa82a0ba2f9dddcf109231dd519fcc
|
bfdc0b497faa82a0ba2f9dddcf109231dd519fcc
|
sysctl: restrict write access to dmesg_restrict
When dmesg_restrict is set to 1 CAP_SYS_ADMIN is needed to read the kernel
ring buffer. But a root user without CAP_SYS_ADMIN is able to reset
dmesg_restrict to 0.
This is an issue when e.g. LXC (Linux Containers) are used and complete
user space is running without CAP_SYS_ADMIN. A unprivileged and jailed
root user can bypass the dmesg_restrict protection.
With this patch writing to dmesg_restrict is only allowed when root has
CAP_SYS_ADMIN.
Signed-off-by: Richard Weinberger <[email protected]>
Acked-by: Dan Rosenberg <[email protected]>
Acked-by: Serge E. Hallyn <[email protected]>
Cc: Eric Paris <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: James Morris <[email protected]>
Cc: Eugene Teo <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int use_table(struct ctl_table_header *p)
{
if (unlikely(p->unregistering))
return 0;
p->used++;
return 1;
}
|
static int use_table(struct ctl_table_header *p)
{
if (unlikely(p->unregistering))
return 0;
p->used++;
return 1;
}
|
C
|
linux
| 0 |
CVE-2014-1700
|
https://www.cvedetails.com/cve/CVE-2014-1700/
|
CWE-399
|
https://github.com/chromium/chromium/commit/d926098e2e2be270c80a5ba25ab8a611b80b8556
|
d926098e2e2be270c80a5ba25ab8a611b80b8556
|
Connect WebUSB client interface to the devices app
This provides a basic WebUSB client interface in
content/renderer. Most of the interface is unimplemented,
but this CL hooks up navigator.usb.getDevices() to the
browser's Mojo devices app to enumerate available USB
devices.
BUG=492204
Review URL: https://codereview.chromium.org/1293253002
Cr-Commit-Position: refs/heads/master@{#344881}
|
void GetRedirectChain(WebDataSource* ds, std::vector<GURL>* result) {
const WebURL& blank_url = GURL(url::kAboutBlankURL);
WebVector<WebURL> urls;
ds->redirectChain(urls);
result->reserve(urls.size());
for (size_t i = 0; i < urls.size(); ++i) {
if (urls[i] != GURL(kSwappedOutURL))
result->push_back(urls[i]);
else
result->push_back(blank_url);
}
}
|
void GetRedirectChain(WebDataSource* ds, std::vector<GURL>* result) {
const WebURL& blank_url = GURL(url::kAboutBlankURL);
WebVector<WebURL> urls;
ds->redirectChain(urls);
result->reserve(urls.size());
for (size_t i = 0; i < urls.size(); ++i) {
if (urls[i] != GURL(kSwappedOutURL))
result->push_back(urls[i]);
else
result->push_back(blank_url);
}
}
|
C
|
Chrome
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.