max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
367
<reponame>Allenjonesing/tutorials // You should include this file and bitmap.cpp with your projects from now on. // Now you don't need to cut and paste the functions you want to use, just include // the .h and .cpp files to your project if you want to use bitmaps #ifndef _BITMAP_H // If we haven't included this file #define _BITMAP_H // Set a flag saying we included it #include "main.h" struct BUFFER // This is our back buffering structure { HWND hwnd; // This holds the current window's handle RECT scrnRect; // This holds the client rectangle of the window HANDLE hCompBitmap; // This holds the compatible bitmap for the backbuffer HANDLE hOldBitmap; // This is used for storage to free when the program quits HANDLE hOldBitmap2; // This is used as storage to swap between selected bitmaps when using selectObject() HDC hdcFront; // This is the front buffer (The part we see) HDC hdcBack; // This is the back buffer (the part we draw to, then flip) HDC hdcBitmap; // This is a temp buffer to swap the bitmap back and forth from }; // Create the double buffering for all of our bitmaps void CreateDoubleBuffering(BUFFER *pBuffer, HWND hwnd); // This loads a bitmap (ie. "Bitmap.bmp") and returns a handle to that bitmap HBITMAP LoadABitmap(LPSTR szFileName); // This displays the bitmap to the screen at a X and a Y location void DisplayBitmap(BUFFER *pBuffer, HBITMAP hBitmap, int x, int y); // Clears the backbuffer to what ever color is passed in void ClearScreen(HDC hdc, RECT scrnRect, int color); // Swaps the back buffer to the screen void SwapBackBuffer(BUFFER *pBuffer, BOOL bClearBackBuffer); // This resizes the buffers to the current window size void ResizeBuffers(BUFFER *pBuffer); #endif //////////////////////////////////////////////////////////// // // *Quick Notes* // // This needs to be include from now on in your projects if // you want to use bitmaps, also bitmap.cpp. This allows you // to not have to copy and paste functions into your program, // just include these files in your project. This is also // needed for animation if you are including sprite.cpp/sprite.h. // // // // // // // // //
688
410
// Copyright(c) 2017 POLYGONTEK // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http ://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "Precompiled.h" #include "RHI/RHIOpenGL.h" #include "RGLInternal.h" #include "Platform/PlatformProcess.h" #include "Platform/PlatformTime.h" #include "Profiler/Profiler.h" #include <tchar.h> BE_NAMESPACE_BEGIN #define MAX_PIXEL_FORMAT 1024 #define MAX_ATTRIB_SIZE 32 #define FAKE_WINDOW_CLASSNAME _T("BLUESHIFT_FAKE_WINDOW") enum GLContextProfile { CompatibilityProfile, CoreProfile, ES2Profile // ES2 profile including ES3 }; static int majorVersion; static int minorVersion; static bool deviceFullscreen = false; static int deviceBpp = 0; static int deviceHz = 0; static HGLRC hrcMain; static CVar gl_debug("gl_debug", "0", CVar::Flag::Bool, ""); static CVar gl_debugLevel("gl_debugLevel", "3", CVar::Flag::Integer, ""); static CVar gl_ignoreError("gl_ignoreError", "0", CVar::Flag::Bool, ""); static CVar gl_finish("gl_finish", "0", CVar::Flag::Bool, ""); static LRESULT CALLBACK FakeWndProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam) { return DefWindowProc(hwnd, uMsg, wParam, lParam); } static HWND CreateFakeWindow() { WNDCLASS wc; HINSTANCE hInstance = (HINSTANCE)GetModuleHandle(nullptr); if (!GetClassInfo(hInstance, FAKE_WINDOW_CLASSNAME, &wc)) { wc.style = CS_OWNDC; wc.lpfnWndProc = FakeWndProc; wc.cbClsExtra = 0; wc.cbWndExtra = 0; wc.hInstance = hInstance; wc.hIcon = LoadIcon(wc.hInstance, IDI_WINLOGO); wc.hCursor = LoadCursor(nullptr, IDC_ARROW); wc.hbrBackground = nullptr; wc.lpszMenuName = _T(""); wc.lpszClassName = FAKE_WINDOW_CLASSNAME; if (!RegisterClass(&wc)) { BE_FATALERROR("Couldn't register fake window class"); } } HWND hwndFake = CreateWindowEx(0, FAKE_WINDOW_CLASSNAME, _T("FAKE"), WS_POPUP, 0, 0, 0, 0, nullptr, nullptr, wc.hInstance, nullptr); if (!hwndFake) { BE_FATALERROR("Couldn't create fake window"); } // hide fake window ShowWindow(hwndFake, SW_HIDE); return hwndFake; } static int ChooseBestPixelFormat(HDC hDC, int inColorBits, int inAlphaBits, int inDepthBits, int inStencilBits, int inMultiSamples) { int best = 0; BE_LOG("ChoosePixelFormat(%i, %i, %i, %i)\n", inColorBits, inAlphaBits, inDepthBits, inStencilBits); if (!gwglChoosePixelFormatARB) { // Get the number of pixel formats supported by this DC. unsigned int numFormats = DescribePixelFormat(hDC, 0, 0, nullptr); if (numFormats > MAX_PIXEL_FORMAT) { BE_WARNLOG("numFormats > MAX_PIXEL_FORMAT\n"); numFormats = MAX_PIXEL_FORMAT; } else if (numFormats < 1) { BE_FATALERROR("no pixel formats found"); } BE_LOG("%i pixel formats found\n", numFormats); DWORD dwTargetFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; PIXELFORMATDESCRIPTOR pfdList[MAX_PIXEL_FORMAT]; PIXELFORMATDESCRIPTOR *pfd = pfdList; // Use positive one-based integer indexes for (int i = 1; i <= numFormats; i++, pfd++) { DescribePixelFormat(hDC, i, sizeof(PIXELFORMATDESCRIPTOR), pfd); } pfd = pfdList; for (int i = 1; i <= numFormats; i++, pfd++) { // Is supported by GDI software implementation ? if (pfd->dwFlags & PFD_GENERIC_FORMAT) { BE_DLOG("PF %i rejected, software implementation\n", i); continue; } if ((pfd->dwFlags & dwTargetFlags) != dwTargetFlags) { BE_DLOG("PF %i rejected, improper flags (%x instead of %x)\n", i, pfd->dwFlags, dwTargetFlags); continue; } // Is color index pixel type ? if (pfd->iPixelType != PFD_TYPE_RGBA) { BE_DLOG("PF %i rejected, not RGBA\n", i); continue; } BE_DLOG("PF %3i: color(%2i-bits) alpha(%2i-bits), depth(%2i-bits) stencil(%2i-bits)\n", i, pfd->cColorBits, pfd->cAlphaBits, pfd->cDepthBits, pfd->cStencilBits); if (pfd->cDepthBits < 16 && inDepthBits > 0) { continue; } if (pfd->cStencilBits < 4 && inStencilBits > 0) { continue; } if (!best) { best = i; continue; } // Check color bits if (pfdList[best-1].cColorBits != inColorBits) { if (pfd->cColorBits == inColorBits || pfd->cColorBits > pfdList[best-1].cColorBits) { best = i; continue; } } // Check alpha bits if (pfdList[best-1].cAlphaBits != inAlphaBits) { if (pfd->cAlphaBits == inAlphaBits || pfd->cAlphaBits > pfdList[best-1].cAlphaBits) { best = i; continue; } } // Check depth bits if (pfdList[best-1].cDepthBits != inDepthBits) { if (pfd->cDepthBits == inDepthBits || pfd->cDepthBits > pfdList[best-1].cDepthBits) { best = i; continue; } } // Check stencil bits if (pfdList[best-1].cStencilBits != inStencilBits) { if (pfd->cStencilBits == inStencilBits || (pfd->cStencilBits > inStencilBits && inStencilBits > 0)) { best = i; continue; } } } // Best PFD choosed !! if (!(best = ::ChoosePixelFormat(hDC, &pfdList[best-1]))) { BE_FATALERROR("ChoosePixelFormat: failed"); } } else { int results[MAX_ATTRIB_SIZE]; int attribs[MAX_ATTRIB_SIZE]; attribs[0] = WGL_NUMBER_PIXEL_FORMATS_ARB; gwglGetPixelFormatAttribivARB(hDC, 0, 0, 1, attribs, results); unsigned int numFormats = results[0]; if (numFormats > MAX_PIXEL_FORMAT) { BE_WARNLOG("numFormats > MAX_PIXEL_FORMAT\n"); numFormats = MAX_PIXEL_FORMAT; } else if (numFormats < 1) { BE_FATALERROR("no pixel formats found"); } BE_LOG("%i pixel formats found\n", numFormats); int numAttribs = 0; attribs[numAttribs++] = WGL_SUPPORT_OPENGL_ARB; attribs[numAttribs++] = WGL_ACCELERATION_ARB; attribs[numAttribs++] = WGL_DOUBLE_BUFFER_ARB; attribs[numAttribs++] = WGL_DRAW_TO_WINDOW_ARB; attribs[numAttribs++] = WGL_PIXEL_TYPE_ARB; attribs[numAttribs++] = WGL_COLOR_BITS_ARB; attribs[numAttribs++] = WGL_ALPHA_BITS_ARB; attribs[numAttribs++] = WGL_DEPTH_BITS_ARB; attribs[numAttribs++] = WGL_STENCIL_BITS_ARB; attribs[numAttribs++] = WGL_SAMPLE_BUFFERS_ARB; attribs[numAttribs++] = WGL_SAMPLES_ARB; attribs[numAttribs++] = WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB; attribs[numAttribs++] = 0; int attribList[MAX_PIXEL_FORMAT][MAX_ATTRIB_SIZE]; for (int i = 1; i <= numFormats; i++) { gwglGetPixelFormatAttribivARB(hDC, i, 0, numAttribs, attribs, attribList[i-1]); } for (int i = 1; i <= numFormats; i++) { int *attr = attribList[i-1]; // WGL_SUPPORT_OPENGL_ARB if (attr[0] != GL_TRUE) { BE_DLOG("PF %i rejected, software implementation\n", i); continue; } // WGL_ACCELERATION_ARB if (attr[1] != WGL_FULL_ACCELERATION_ARB) { BE_DLOG("PF %i rejected, full hw-acceleration required\n", i); continue; } // WGL_DOUBLE_BUFFER_ARB if (attr[2] != GL_TRUE) { BE_DLOG("PF %i rejected, double buffer required\n", i); continue; } // WGL_DRAW_TO_WINDOW_ARB if (attr[3] != GL_TRUE) { BE_DLOG("PF %i rejected, draw to windows required\n", i); continue; } // WGL_PIXEL_TYPE_ARB if (attr[4] != WGL_TYPE_RGBA_ARB) { BE_DLOG("PF %i rejected, not RGBA\n", i); continue; } BE_DLOG("PF %3i: color(%2i-bits) alpha(%2i-bits) depth(%2i-bits), stencil(%2i-bits), multisamples(%2ix)\n", i, attr[5], attr[6], attr[7], attr[8], attr[10]); // WGL_ALPHA_BITS_ARB if (attr[6] <= 0 && inAlphaBits > 0) { continue; } // WGL_DEPTH_BITS_ARB if (attr[7] < 16 && inDepthBits > 0) { continue; } // WGL_STENCIL_BITS_ARB if (attr[8] < 4 && inStencilBits > 0) { continue; } // WGL_SAMPLE_BUFFERS_ARB if (attr[9] == GL_FALSE && inMultiSamples > 0) { continue; } // WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB if (attr[11] == GL_FALSE) { continue; } if (!best) { best = i; continue; } // check color bits if (attribList[best-1][5] != inColorBits) { if (attr[5] == inColorBits || attr[5] > attribList[best-1][5]) { best = i; continue; } } // check alpha bits if (attribList[best-1][6] != inAlphaBits) { if (attr[6] == inAlphaBits || attr[6] > attribList[best-1][6]) { best = i; continue; } } // check depth bits if (attribList[best-1][7] != inDepthBits) { if (attr[7] == inDepthBits || attr[7] > attribList[best-1][7]) { best = i; continue; } } // check stencil bits if (attribList[best-1][8] != inStencilBits) { if (attr[8] == inStencilBits || (attr[8] > inStencilBits && inStencilBits > 0)) { best = i; continue; } } // check multi samples if (attribList[best-1][10] != inMultiSamples) { if (attr[10] == inMultiSamples || attr[10] > attribList[best-1][10]) { best = i; continue; } } } numAttribs = 0; attribs[numAttribs++] = WGL_SUPPORT_OPENGL_ARB; attribs[numAttribs++] = attribList[best-1][0]; attribs[numAttribs++] = WGL_ACCELERATION_ARB; attribs[numAttribs++] = attribList[best-1][1]; attribs[numAttribs++] = WGL_DOUBLE_BUFFER_ARB; attribs[numAttribs++] = attribList[best-1][2]; attribs[numAttribs++] = WGL_DRAW_TO_WINDOW_ARB; attribs[numAttribs++] = attribList[best-1][3]; attribs[numAttribs++] = WGL_PIXEL_TYPE_ARB; attribs[numAttribs++] = attribList[best-1][4]; attribs[numAttribs++] = WGL_COLOR_BITS_ARB; attribs[numAttribs++] = attribList[best-1][5]; attribs[numAttribs++] = WGL_ALPHA_BITS_ARB; attribs[numAttribs++] = attribList[best-1][6]; attribs[numAttribs++] = WGL_DEPTH_BITS_ARB; attribs[numAttribs++] = attribList[best-1][7]; attribs[numAttribs++] = WGL_STENCIL_BITS_ARB; attribs[numAttribs++] = attribList[best-1][8]; attribs[numAttribs++] = WGL_SAMPLE_BUFFERS_ARB; attribs[numAttribs++] = attribList[best-1][9]; attribs[numAttribs++] = WGL_SAMPLES_ARB; attribs[numAttribs++] = attribList[best-1][10]; attribs[numAttribs++] = WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB; attribs[numAttribs++] = attribList[best - 1][11]; attribs[numAttribs++] = 0; attribs[numAttribs++] = 0; if (!gwglChoosePixelFormatARB(hDC, attribs, nullptr, 1, &best, &numFormats)) { BE_FATALERROR("gwglChoosePixelFormatARB: failed"); } } BE_LOG("PIXELFORMAT %i choosed\n", best); return best; } static HGLRC CreateContextAttribs(HDC hdc, HGLRC hSharedContext, GLContextProfile contextProfile, int majorVersion, int minorVersion) { int contextFlags = WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB; if (gl_debug.GetBool()) { if (gglext._GL_ARB_debug_output) { contextFlags |= WGL_CONTEXT_DEBUG_BIT_ARB; } } int profileMask = 0; switch (contextProfile) { case CompatibilityProfile: profileMask |= WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB; break; case CoreProfile: profileMask |= WGL_CONTEXT_CORE_PROFILE_BIT_ARB; break; case ES2Profile: profileMask |= WGL_CONTEXT_ES2_PROFILE_BIT_EXT; break; } int attribs[16]; int numAttribs = 0; if (contextProfile != CompatibilityProfile) { attribs[numAttribs++] = WGL_CONTEXT_MAJOR_VERSION_ARB; attribs[numAttribs++] = majorVersion; attribs[numAttribs++] = WGL_CONTEXT_MINOR_VERSION_ARB; attribs[numAttribs++] = minorVersion; } attribs[numAttribs++] = WGL_CONTEXT_FLAGS_ARB; attribs[numAttribs++] = contextFlags; attribs[numAttribs++] = WGL_CONTEXT_PROFILE_MASK_ARB; attribs[numAttribs++] = profileMask; attribs[numAttribs++] = 0; return gwglCreateContextAttribsARB(hdc, hSharedContext, attribs); } static void GetGLVersion(int *major, int *minor) { #if 1 // GL_MAJOR_VERSION and GL_MINOR_VERSION queries are supported from 3.0 or higher core context. glGetIntegerv(GL_MAJOR_VERSION, major); glGetIntegerv(GL_MINOR_VERSION, minor); #else // Use glGetString if context has not been created yet. const char *verstr = (const char *)glGetString(GL_VERSION); if (!verstr || sscanf(verstr, "%d.%d", major, minor) != 2) { *major = *minor = 0; } #endif } static void GetContextVersionFromCommandLine(int &majorVersion, int &minorVersion) { majorVersion = 4; minorVersion = 3; //majorVersion = 3; //minorVersion = 2; } static void InitGLFunctions() { HWND hwndFake = CreateFakeWindow(); HDC hdcFake = GetDC(hwndFake); PIXELFORMATDESCRIPTOR pfd; memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR)); pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR); pfd.nVersion = 1; pfd.dwFlags = PFD_DOUBLEBUFFER | PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW; pfd.iPixelType = PFD_TYPE_RGBA; pfd.cColorBits = 32; pfd.cDepthBits = 32; pfd.iLayerType = PFD_MAIN_PLANE; int pixelFormat = ChoosePixelFormat(hdcFake, &pfd); if (!SetPixelFormat(hdcFake, pixelFormat, nullptr)) { BE_FATALERROR("set pixel format: failed"); } BE_DLOG("set pixel format: ok\n"); HGLRC hrcFake = wglCreateContext(hdcFake); if (!hrcFake) { BE_FATALERROR("Couldn't create fake RC"); } wglMakeCurrent(hdcFake, hrcFake); // NOTE: To get the WGL extension string, you need a valid DC connected to RC. // gwglXXX function bindings & check WGL extensions gwgl_init(hdcFake, false); // gglXXX function bindings & check GL extensions ggl_init(gl_debug.GetBool()); wglMakeCurrent(nullptr, nullptr); wglDeleteContext(hrcFake); ReleaseDC(hwndFake, hdcFake); DestroyWindow(hwndFake); } void OpenGLRHI::InitMainContext(WindowHandle windowHandle, const Settings *settings) { InitGLFunctions(); // Create main context mainContext = new GLContext; mainContext->state = new GLState; mainContext->hwnd = CreateFakeWindow(); // Create fake window for global RC mainContext->hdc = GetDC(mainContext->hwnd); // Set PF for the main DC int pixelFormat = ChooseBestPixelFormat(mainContext->hdc, settings->colorBits, settings->alphaBits, settings->depthBits, settings->stencilBits, settings->multiSamples); if (!SetPixelFormat(mainContext->hdc, pixelFormat, nullptr)) { BE_FATALERROR("set pixel format: failed"); } BE_DLOG("set pixel format: ok\n"); GLContextProfile contextProfile = CoreProfile; int contextMajorVersion = 0; int contextMinorVersion = 0; GetContextVersionFromCommandLine(contextMajorVersion, contextMinorVersion); // Create rendering context mainContext->hrc = CreateContextAttribs(mainContext->hdc, nullptr, contextProfile, contextMajorVersion, contextMinorVersion); if (!mainContext->hrc) { BE_FATALERROR("Couldn't create RC"); } if (!wglMakeCurrent(mainContext->hdc, mainContext->hrc)) { BE_FATALERROR("Couldn't make current context"); } GetGLVersion(&majorVersion, &minorVersion); if (contextProfile == CompatibilityProfile) { int decimalVersion = majorVersion * 10 + minorVersion; if (decimalVersion < 32) { BE_FATALERROR("Minimum OpenGL extensions missing !!\nRequired OpenGL 3.2 or higher graphic card"); } } // Enable debug callback if (gl_debug.GetBool()) { if (gglext._GL_ARB_debug_output) { gglDebugMessageCallbackARB(OpenGL::DebugCallback, nullptr); gglEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB); } else { ggl_rebind(true); } } // default FBO mainContext->defaultFramebuffer = 0; // Create default VAO for main context gglGenVertexArrays(1, &mainContext->defaultVAO); #ifdef ENABLE_IMGUI ImGuiCreateContext(mainContext); #endif } void OpenGLRHI::FreeMainContext() { #ifdef ENABLE_IMGUI ImGuiDestroyContext(mainContext); #endif // Delete default VAO for main context gglDeleteVertexArrays(1, &mainContext->defaultVAO); wglMakeCurrent(nullptr, nullptr); if (!wglDeleteContext(mainContext->hrc)) { BE_FATALERROR("deleting main context RC: failed"); } if (!ReleaseDC(mainContext->hwnd, mainContext->hdc)) { BE_FATALERROR("releasing main context DC: failed"); } if (!DestroyWindow(mainContext->hwnd)) { BE_FATALERROR("destroying main context window: failed"); } SAFE_DELETE(mainContext->state); SAFE_DELETE(mainContext); } //------------------------------------------------------------------------------------------------------- // ImGui_ImplWin32_XXXX //------------------------------------------------------------------------------------------------------- static ImGuiMouseCursor g_LastMouseCursor = ImGuiMouseCursor_COUNT; static bool ImGui_ImplWin32_UpdateMouseCursor() { ImGuiIO &io = ImGui::GetIO(); if (io.ConfigFlags & ImGuiConfigFlags_NoMouseCursorChange) { return false; } ImGuiMouseCursor imgui_cursor = ImGui::GetMouseCursor(); if (imgui_cursor == ImGuiMouseCursor_None || io.MouseDrawCursor) { // Hide OS mouse cursor if imgui is drawing it or if it wants no cursor ::SetCursor(NULL); } else { // Show OS mouse cursor LPTSTR win32_cursor = IDC_ARROW; switch (imgui_cursor) { case ImGuiMouseCursor_Arrow: win32_cursor = IDC_ARROW; break; case ImGuiMouseCursor_TextInput: win32_cursor = IDC_IBEAM; break; case ImGuiMouseCursor_ResizeAll: win32_cursor = IDC_SIZEALL; break; case ImGuiMouseCursor_ResizeEW: win32_cursor = IDC_SIZEWE; break; case ImGuiMouseCursor_ResizeNS: win32_cursor = IDC_SIZENS; break; case ImGuiMouseCursor_ResizeNESW: win32_cursor = IDC_SIZENESW; break; case ImGuiMouseCursor_ResizeNWSE: win32_cursor = IDC_SIZENWSE; break; case ImGuiMouseCursor_Hand: win32_cursor = IDC_HAND; break; case ImGuiMouseCursor_NotAllowed: win32_cursor = IDC_NO; break; } ::SetCursor(::LoadCursor(NULL, win32_cursor)); } return true; } static void ImGui_ImplWin32_UpdateMousePos(HWND hwnd) { ImGuiIO &io = ImGui::GetIO(); // Set OS mouse position if requested (rarely used, only when ImGuiConfigFlags_NavEnableSetMousePos is enabled by user) if (io.WantSetMousePos) { POINT pos = { (int)io.MousePos.x, (int)io.MousePos.y }; ::ClientToScreen(hwnd, &pos); ::SetCursorPos(pos.x, pos.y); } // Set mouse position io.MousePos = ImVec2(-FLT_MAX, -FLT_MAX); POINT pos; if (HWND active_window = ::GetForegroundWindow()) { if (active_window == hwnd || ::IsChild(active_window, hwnd)) { if (::GetCursorPos(&pos) && ::ScreenToClient(hwnd, &pos)) { io.MousePos = ImVec2((float)pos.x, (float)pos.y); } } } } // Allow compilation with old Windows SDK. MinGW doesn't have default _WIN32_WINNT/WINVER versions. #ifndef WM_MOUSEHWHEEL #define WM_MOUSEHWHEEL 0x020E #endif #ifndef DBT_DEVNODES_CHANGED #define DBT_DEVNODES_CHANGED 0x0007 #endif // Win32 message handler (process Win32 mouse/keyboard inputs, etc.) // Call from your application's message handler. // When implementing your own back-end, you can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if Dear ImGui wants to use your inputs. // - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application. // - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application. // Generally you may always pass all inputs to Dear ImGui, and hide them from your application based on those two flags. // PS: In this Win32 handler, we use the capture API (GetCapture/SetCapture/ReleaseCapture) to be able to read mouse coordinates when dragging mouse outside of our window bounds. // PS: We treat DBLCLK messages as regular mouse down messages, so this code will work on windows classes that have the CS_DBLCLKS flag set. Our own example app code doesn't set this flag. static IMGUI_IMPL_API LRESULT ImGui_ImplWin32_WndProcHandler(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) { if (ImGui::GetCurrentContext() == NULL) { return 0; } ImGuiIO &io = ImGui::GetIO(); switch (msg) { case WM_LBUTTONDOWN: case WM_LBUTTONDBLCLK: case WM_RBUTTONDOWN: case WM_RBUTTONDBLCLK: case WM_MBUTTONDOWN: case WM_MBUTTONDBLCLK: case WM_XBUTTONDOWN: case WM_XBUTTONDBLCLK: { int button = 0; if (msg == WM_LBUTTONDOWN || msg == WM_LBUTTONDBLCLK) { button = 0; } if (msg == WM_RBUTTONDOWN || msg == WM_RBUTTONDBLCLK) { button = 1; } if (msg == WM_MBUTTONDOWN || msg == WM_MBUTTONDBLCLK) { button = 2; } if (msg == WM_XBUTTONDOWN || msg == WM_XBUTTONDBLCLK) { button = (GET_XBUTTON_WPARAM(wParam) == XBUTTON1) ? 3 : 4; } if (!ImGui::IsAnyMouseDown() && ::GetCapture() == NULL) { ::SetCapture(hwnd); } io.MouseDown[button] = true; return io.WantCaptureMouse; } case WM_LBUTTONUP: case WM_RBUTTONUP: case WM_MBUTTONUP: case WM_XBUTTONUP: { int button = 0; if (msg == WM_LBUTTONUP) { button = 0; } if (msg == WM_RBUTTONUP) { button = 1; } if (msg == WM_MBUTTONUP) { button = 2; } if (msg == WM_XBUTTONUP) { button = (GET_XBUTTON_WPARAM(wParam) == XBUTTON1) ? 3 : 4; } io.MouseDown[button] = false; if (!ImGui::IsAnyMouseDown() && ::GetCapture() == hwnd) { ::ReleaseCapture(); } return io.WantCaptureMouse; } case WM_MOUSEWHEEL: io.MouseWheel += (float)GET_WHEEL_DELTA_WPARAM(wParam) / (float)WHEEL_DELTA; return io.WantCaptureMouse; case WM_MOUSEHWHEEL: io.MouseWheelH += (float)GET_WHEEL_DELTA_WPARAM(wParam) / (float)WHEEL_DELTA; return io.WantCaptureMouse; case WM_KEYDOWN: case WM_SYSKEYDOWN: if (wParam < 256) { io.KeysDown[wParam] = 1; } return io.WantCaptureKeyboard; case WM_KEYUP: case WM_SYSKEYUP: if (wParam < 256) { io.KeysDown[wParam] = 0; } return io.WantCaptureKeyboard; case WM_CHAR: // You can also use ToAscii()+GetKeyboardState() to retrieve characters. if (wParam > 0 && wParam < 0x10000) { io.AddInputCharacterUTF16((unsigned short)wParam); } return io.WantCaptureKeyboard; case WM_SETCURSOR: if (LOWORD(lParam) == HTCLIENT && ImGui_ImplWin32_UpdateMouseCursor()) { return 1; } return 0; } return 0; } //------------------------------------------------------------------------------------------------------- static LRESULT CALLBACK NewWndProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam) { GLContext *ctx = (GLContext *)GetWindowLongPtr(hwnd, GWLP_USERDATA); #ifdef ENABLE_IMGUI ImGuiContext *oldContext = ImGui::GetCurrentContext(); ImGui::SetCurrentContext(ctx->imGuiContext); if (ImGui_ImplWin32_WndProcHandler(hwnd, uMsg, wParam, lParam)) { ImGui::SetCurrentContext(oldContext); return 0; } ImGui::SetCurrentContext(oldContext); #endif switch (uMsg) { case WM_PAINT: { PAINTSTRUCT ps; HDC hdc = BeginPaint(hwnd, &ps); if (ctx->onDemandDrawing) { rhi.DisplayContext(ctx->handle); } EndPaint(hwnd, &ps); break; } } return ctx->oldWndProc(hwnd, uMsg, wParam, lParam); } RHI::Handle OpenGLRHI::CreateContext(RHI::WindowHandle windowHandle, bool useSharedContext) { GLContext *ctx = new GLContext; int handle = contextList.FindNull(); if (handle == -1) { handle = contextList.Append(ctx); } else { contextList[handle] = ctx; } ctx->handle = (Handle)handle; ctx->hwnd = (HWND)windowHandle; ctx->hdc = GetDC(ctx->hwnd); if (!ctx->hdc) { BE_FATALERROR("get DC: failed"); } SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR)ctx); ctx->oldWndProc = (WNDPROC)SetWindowLongPtr(ctx->hwnd, GWLP_WNDPROC, (LONG_PTR)NewWndProc); // All rendering contexts use an identical pixel format int pixelFormat = GetPixelFormat(mainContext->hdc); if (!SetPixelFormat(ctx->hdc, pixelFormat, nullptr)) { BE_FATALERROR("set pixel format: failed"); } BE_DLOG("set pixel format: ok\n"); if (!useSharedContext) { // main context will be reused ctx->state = mainContext->state; ctx->hrc = mainContext->hrc; ctx->defaultVAO = mainContext->defaultVAO; } else { ctx->state = new GLState; if (gwglCreateContextAttribsARB) { GLContextProfile contextProfile = CoreProfile; int contextMajorVersion = 0; int contextMinorVersion = 0; GetContextVersionFromCommandLine(contextMajorVersion, contextMinorVersion); ctx->hrc = CreateContextAttribs(ctx->hdc, mainContext->hrc, contextProfile, contextMajorVersion, contextMinorVersion); if (!ctx->hrc) { BE_FATALERROR("Couldn't create RC"); } wglMakeCurrent(nullptr, nullptr); } else { ctx->hrc = wglCreateContext(ctx->hdc); if (!ctx->hrc) { BE_FATALERROR("Couldn't create RC"); } // Allow sharing of all display list and texture objects between rendering context // // NOTE: The best time to call wglShareLists is after creating the GL contexts // you want to share, but before you create any objects in either of the contexts. // If you create objects, then there is a chance that wglShareLists will fail. wglShareLists(mainContext->hrc, ctx->hrc); } } #ifdef ENABLE_IMGUI ctx->imGuiContext = mainContext->imGuiContext; ctx->imGuiLastTime = PlatformTime::Seconds(); #endif SetContext((Handle)handle); ctx->defaultFramebuffer = 0; if (useSharedContext) { // Create default VAO for shared context gglGenVertexArrays(1, &ctx->defaultVAO); } SetDefaultState(); return (Handle)handle; } void OpenGLRHI::DestroyContext(Handle ctxHandle) { GLContext *ctx = contextList[ctxHandle]; if (ctx->hrc != mainContext->hrc) { // Delete default VAO for shared context gglDeleteVertexArrays(1, &ctx->defaultVAO); if (!wglDeleteContext(ctx->hrc)) { BE_FATALERROR("deleting RC: failed"); } BE_DLOG("deleting RC: ok\n"); delete ctx->state; } if (currentContext == ctx) { currentContext = mainContext; wglMakeCurrent(mainContext->hdc, mainContext->hrc); } ReleaseDC(ctx->hwnd, ctx->hdc); SetWindowLongPtr(ctx->hwnd, GWLP_WNDPROC, (LONG_PTR)ctx->oldWndProc); delete ctx; contextList[ctxHandle] = nullptr; } void OpenGLRHI::ActivateSurface(Handle ctxHandle, WindowHandle windowHandle) { GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; } void OpenGLRHI::DeactivateSurface(Handle ctxHandle) { GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; } void OpenGLRHI::SetContext(Handle ctxHandle) { HDC currentDC = wglGetCurrentDC(); GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; if (currentDC != ctx->hdc) { if (currentDC) { gglFlush(); } wglMakeCurrent(currentContext->hdc, nullptr); } if (!wglMakeCurrent(ctx->hdc, ctx->hrc)) { BE_FATALERROR("OpenGLRHI::SetContext: Couldn't make current context"); } this->currentContext = ctx; #ifdef ENABLE_IMGUI ImGui::SetCurrentContext(ctx->imGuiContext); #endif } void OpenGLRHI::SetContextDisplayFunc(Handle ctxHandle, DisplayContextFunc displayFunc, void *displayFuncDataPtr, bool onDemandDrawing) { GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; ctx->displayFunc = displayFunc; ctx->displayFuncDataPtr = displayFuncDataPtr; ctx->onDemandDrawing = onDemandDrawing; } void OpenGLRHI::DisplayContext(Handle ctxHandle) { GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; ctx->displayFunc(ctxHandle, ctx->displayFuncDataPtr); } RHI::WindowHandle OpenGLRHI::GetWindowHandleFromContext(Handle ctxHandle) { const GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; return (WindowHandle)ctx->hwnd; } void OpenGLRHI::GetDisplayMetrics(Handle ctxHandle, DisplayMetrics *displayMetrics) const { const GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; #if 0 //DPI_AWARENESS dpiAwareness = GetAwarenessFromDpiAwarenessContext(GetWindowDpiAwarenessContext(ctx->hwnd)); // Number of pixels per logical inch along the screen size. // In a system with multiple display monitors, this value is the same for all monitors. //int dpi = GetDpiForWindow(ctx->hwnd); int dpi = GetDeviceCaps(ctx->hdc, LOGPIXELSX); float dpiScale = Math::Round(dpi / 96.0f); #else float dpiScale = 1.0f; #endif RECT rc; GetClientRect(ctx->hwnd, &rc); displayMetrics->screenWidth = rc.right / dpiScale; displayMetrics->screenHeight = rc.bottom / dpiScale; displayMetrics->backingWidth = rc.right; displayMetrics->backingHeight = rc.bottom; displayMetrics->safeAreaInsets.Set(0, 0, 0, 0); } bool OpenGLRHI::IsFullscreen() const { return deviceFullscreen; } bool OpenGLRHI::SetFullscreen(Handle ctxHandle, int width, int height) { HDC hdc; BE_LOG("Changing display setting...\n"); DEVMODE dm; memset(&dm, 0, sizeof(dm)); dm.dmSize = sizeof(dm); dm.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT; dm.dmPelsWidth = width; dm.dmPelsHeight = height; int bpp = 0; int hz = 0; if (bpp > 0) { dm.dmFields |= DM_BITSPERPEL; dm.dmBitsPerPel = bpp; } else { hdc = GetDC(GetDesktopWindow()); bpp = GetDeviceCaps(hdc, BITSPIXEL); ReleaseDC(nullptr, hdc); if (bpp < 16) { dm.dmFields |= DM_BITSPERPEL; dm.dmBitsPerPel = 16; BE_LOG("using color bits of %i\n", 16); } else { BE_LOG("using desktop display depth of %i\n", bpp); } } if (hz > 0) { dm.dmDisplayFrequency = hz; dm.dmFields |= DM_DISPLAYFREQUENCY; } if (ChangeDisplaySettings(&dm, CDS_FULLSCREEN) != DISP_CHANGE_SUCCESSFUL) { BE_WARNLOG("Can't change fullscreen mode"); return false; } deviceFullscreen = true; hdc = GetDC(GetDesktopWindow()); deviceBpp = GetDeviceCaps(hdc, BITSPIXEL); deviceHz = GetDeviceCaps(hdc, VREFRESH); ReleaseDC(nullptr, hdc); BE_LOG("set fullscreen mode: %ix%i %ibpp %ihz\n", width, height, deviceBpp, deviceHz); return true; } void OpenGLRHI::ResetFullscreen(Handle ctxHandle) { if (!deviceFullscreen) { return; } deviceFullscreen = false; BE_LOG("resetting display setting: "); if (ChangeDisplaySettings(nullptr, 0) == DISP_CHANGE_SUCCESSFUL) { BE_LOG("ok\n"); } else { BE_LOG("failed\n"); } HDC hdc = GetDC(nullptr); deviceBpp = GetDeviceCaps(hdc, BITSPIXEL); deviceHz = GetDeviceCaps(hdc, VREFRESH); ReleaseDC(nullptr, hdc); BE_LOG("set window mode: %ibpp %ihz\n", deviceBpp, deviceHz); } void OpenGLRHI::GetGammaRamp(unsigned short ramp[768]) const { ::GetDeviceGammaRamp(currentContext->hdc, ramp); } void OpenGLRHI::SetGammaRamp(unsigned short ramp[768]) const { ::SetDeviceGammaRamp(currentContext->hdc, ramp); } bool OpenGLRHI::SwapBuffers() { BE_PROFILE_CPU_SCOPE_STATIC("OpenGLRHI::SwapBuffers"); BE_PROFILE_GPU_SCOPE_STATIC("OpenGLRHI::SwapBuffers"); if (!gl_ignoreError.GetBool()) { CheckError("OpenGLRHI::SwapBuffers"); } if (gl_finish.GetBool()) { gglFinish(); } BOOL succeeded = ::SwapBuffers(currentContext->hdc); if (!succeeded) { Str lastErrorText = PlatformWinProcess::GetLastErrorText(); BE_WARNLOG("Failed to SwapBuffers : %s", lastErrorText.c_str()); return false; } if (gl_debug.IsModified()) { gl_debug.ClearModified(); if (gglext._GL_ARB_debug_output) { if (gl_debug.GetBool()) { gglDebugMessageCallbackARB(OpenGL::DebugCallback, nullptr); gglEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB); } else { gglDebugMessageCallbackARB(nullptr, nullptr); gglDisable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB); } } else { ggl_rebind(gl_debug.GetBool()); gwgl_rebind(gl_debug.GetBool()); } } return true; } void OpenGLRHI::SwapInterval(int interval) const { gwglSwapIntervalEXT(interval); } void OpenGLRHI::ImGuiCreateContext(GLContext *ctx) { // Setup Dear ImGui context ctx->imGuiContext = ImGui::CreateContext(); ImGui::SetCurrentContext(ctx->imGuiContext); ImGuiIO &io = ImGui::GetIO(); //io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; // Enable Keyboard Controls //io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad; // Enable Gamepad Controls // Setup Dear ImGui style ImGui::StyleColorsDark(); //ImGui::StyleColorsClassic(); io.IniFilename = nullptr; // Setup back-end capabilities flags io.BackendFlags |= ImGuiBackendFlags_HasMouseCursors; // We can honor GetMouseCursor() values (optional) //io.BackendFlags |= ImGuiBackendFlags_HasSetMousePos; // We can honor io.WantSetMousePos requests (optional, rarely used) io.BackendPlatformName = "OpenGLRHI-Windows"; io.ImeWindowHandle = ctx->hwnd; // Keyboard mapping. ImGui will use those indices to peek into the io.KeysDown[] array that we will update during the application lifetime. io.KeyMap[ImGuiKey_Tab] = VK_TAB; io.KeyMap[ImGuiKey_LeftArrow] = VK_LEFT; io.KeyMap[ImGuiKey_RightArrow] = VK_RIGHT; io.KeyMap[ImGuiKey_UpArrow] = VK_UP; io.KeyMap[ImGuiKey_DownArrow] = VK_DOWN; io.KeyMap[ImGuiKey_PageUp] = VK_PRIOR; io.KeyMap[ImGuiKey_PageDown] = VK_NEXT; io.KeyMap[ImGuiKey_Home] = VK_HOME; io.KeyMap[ImGuiKey_End] = VK_END; io.KeyMap[ImGuiKey_Insert] = VK_INSERT; io.KeyMap[ImGuiKey_Delete] = VK_DELETE; io.KeyMap[ImGuiKey_Backspace] = VK_BACK; io.KeyMap[ImGuiKey_Space] = VK_SPACE; io.KeyMap[ImGuiKey_Enter] = VK_RETURN; io.KeyMap[ImGuiKey_Escape] = VK_ESCAPE; io.KeyMap[ImGuiKey_KeyPadEnter] = VK_RETURN; io.KeyMap[ImGuiKey_A] = 'A'; io.KeyMap[ImGuiKey_C] = 'C'; io.KeyMap[ImGuiKey_V] = 'V'; io.KeyMap[ImGuiKey_X] = 'X'; io.KeyMap[ImGuiKey_Y] = 'Y'; io.KeyMap[ImGuiKey_Z] = 'Z'; ImGui_ImplOpenGL_Init("#version 130"); // Load Fonts // - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use ImGui::PushFont()/PopFont() to select them. // - AddFontFromFileTTF() will return the ImFont* so you can store it if you need to select the font among multiple. // - If the file cannot be loaded, the function will return NULL. Please handle those errors in your application (e.g. use an assertion, or display an error and quit). // - The fonts will be rasterized at a given size (w/ oversampling) and stored into a texture when calling ImFontAtlas::Build()/GetTexDataAsXXXX(), which ImGui_ImplXXXX_NewFrame below will call. // - Read 'docs/FONTS.txt' for more instructions and details. // - Remember that in C/C++ if you want to include a backslash \ in a string literal you need to write a double backslash \\ ! //io.Fonts->AddFontDefault(); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Roboto-Medium.ttf", 16.0f); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Cousine-Regular.ttf", 15.0f); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/DroidSans.ttf", 16.0f); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/ProggyTiny.ttf", 10.0f); //ImFont* font = io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\ArialUni.ttf", 18.0f, NULL, io.Fonts->GetGlyphRangesJapanese()); //IM_ASSERT(font != NULL); //io.Fonts->AddFontFromFileTTF("../../../Data/EngineFonts/consola.ttf", 13); } void OpenGLRHI::ImGuiDestroyContext(GLContext *ctx) { ImGui_ImplOpenGL_Shutdown(); ImGui::DestroyContext(ctx->imGuiContext); } void OpenGLRHI::ImGuiBeginFrame(Handle ctxHandle) { BE_PROFILE_CPU_SCOPE_STATIC("OpenGLRHI::ImGuiBeginFrame"); ImGui_ImplOpenGL_ValidateFrame(); GLContext *ctx = ctxHandle == NullContext ? mainContext : contextList[ctxHandle]; DisplayMetrics dm; GetDisplayMetrics(ctxHandle, &dm); // Setup display size (every frame to accommodate for window resizing) ImGuiIO &io = ImGui::GetIO(); io.DisplaySize = ImVec2(dm.screenWidth, dm.screenHeight); // Setup time step double currentTime = PlatformTime::Seconds(); io.DeltaTime = currentTime - ctx->imGuiLastTime; ctx->imGuiLastTime = currentTime; // Read keyboard modifiers inputs io.KeyCtrl = (::GetKeyState(VK_CONTROL) & 0x8000) != 0; io.KeyShift = (::GetKeyState(VK_SHIFT) & 0x8000) != 0; io.KeyAlt = (::GetKeyState(VK_MENU) & 0x8000) != 0; io.KeySuper = false; // io.KeysDown[], io.MousePos, io.MouseDown[], io.MouseWheel: filled by the WndProc handler below. // Update OS mouse position ImGui_ImplWin32_UpdateMousePos(ctx->hwnd); // Update OS mouse cursor with the cursor requested by imgui ImGuiMouseCursor mouse_cursor = io.MouseDrawCursor ? ImGuiMouseCursor_None : ImGui::GetMouseCursor(); if (g_LastMouseCursor != mouse_cursor) { g_LastMouseCursor = mouse_cursor; ImGui_ImplWin32_UpdateMouseCursor(); } ImGui::NewFrame(); } void OpenGLRHI::ImGuiRender() { BE_PROFILE_CPU_SCOPE_STATIC("OpenGLRHI::ImGuiRender"); BE_PROFILE_GPU_SCOPE_STATIC("OpenGLRHI::ImGuiRender"); ImGui::Render(); bool sRGBWriteEnabled = OpenGL::SupportsFrameBufferSRGB() && IsSRGBWriteEnabled(); if (sRGBWriteEnabled) { SetSRGBWrite(false); } ImGui_ImplOpenGL_RenderDrawData(ImGui::GetDrawData()); if (sRGBWriteEnabled) { SetSRGBWrite(true); } } void OpenGLRHI::ImGuiEndFrame() { BE_PROFILE_CPU_SCOPE_STATIC("OpenGLRHI::ImGuiEndFrame"); ImGui::EndFrame(); } BE_NAMESPACE_END
18,768
5,169
{ "name": "ISUActionMenu", "version": "0.1.3", "summary": "An interactive action menu button with gesture.", "homepage": "https://github.com/citysite102", "license": "MIT", "authors": { "Samuel": "<EMAIL>" }, "source": { "git": "https://github.com/citysite102/ISUActionMenu.git", "tag": "0.1.3" }, "social_media_url": "https://twitter.com/citysite102", "platforms": { "ios": "8.0" }, "ios": { "frameworks": [ "UIKit", "QuartzCore" ] }, "source_files": "ISUActionMenu/Classes/**/*", "resource_bundles": { "ISUActionMenu": [ "ISUActionMenu/Assets/*.png" ] } }
286
713
<gh_stars>100-1000 package org.infinispan.notifications.cachemanagerlistener.event; import java.util.List; import org.infinispan.remoting.transport.Address; /** * This event is passed in to any method annotated with {@link org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged}. * It represents a JGroups view change event. * * @author <a href="mailto:<EMAIL>"><NAME></a> * @since 4.0 */ public interface ViewChangedEvent extends Event { /** * Gets the current list of members. * * @return the new view associated with this view change. List cannot be null. */ List<Address> getNewMembers(); /** * Gets the previous list of members. * * @return the old view associated with this view change. List cannot be null. */ List<Address> getOldMembers(); Address getLocalAddress(); /** * Get JGroups view id. * @return */ int getViewId(); boolean isMergeView(); }
315
417
/******************************************************************************* * Copyright 2019 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files(the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and / or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions : * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. ******************************************************************************/ #include <gmock/gmock.h> #include <gtest/gtest.h> #include <cstdlib> #include "gts/platform/Atomic.h" #include "gts/analysis/Trace.h" #include "gts/micro_scheduler/WorkerPool.h" #include "gts/micro_scheduler/MicroScheduler.h" #include "SchedulerTestsCommon.h" using namespace gts; namespace testing { //////////////////////////////////////////////////////////////////////////////// struct SpawnedTaskCounter : public Task { SpawnedTaskCounter(gts::Atomic<uint32_t>* taskCountByThreadIdx) : taskCountByThreadIdx(taskCountByThreadIdx) {} //-------------------------------------------------------------------------- // Count the task when its executed. Task* execute(TaskContext const& ctx) { taskCountByThreadIdx[ctx.workerId.localId()].fetch_add(1, memory_order::release); return nullptr; } gts::Atomic<uint32_t>* taskCountByThreadIdx; }; //////////////////////////////////////////////////////////////////////////////// struct SpawnedTaskCounterGenerator : public Task { //-------------------------------------------------------------------------- // Root task for TestQueueTask Task* execute(TaskContext const& ctx) { addRef(numTasks + 1); for (uint32_t ii = 0; ii < numTasks; ++ii) { Task* pTask = ctx.pMicroScheduler->allocateTask<SpawnedTaskCounter>(taskCountByThreadIdx); addChildTaskWithoutRef(pTask); ctx.pMicroScheduler->spawnTask(pTask); } waitForAll(); return nullptr; } uint32_t numTasks; gts::Atomic<uint32_t>* taskCountByThreadIdx; }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // SPAWN TASK TESTS: //------------------------------------------------------------------------------ void TestSpawnTask(const uint32_t numTasks, const uint32_t threadCount) { WorkerPool workerPool; workerPool.initialize(threadCount); MicroScheduler taskScheduler; taskScheduler.initialize(&workerPool); // Create a counter per thread. std::vector<gts::Atomic<uint32_t>> taskCountByThreadIdx(threadCount); for (auto& counter : taskCountByThreadIdx) { counter.store(0, memory_order::release); } SpawnedTaskCounterGenerator* pRootTask = taskScheduler.allocateTask<SpawnedTaskCounterGenerator>(); pRootTask->numTasks = numTasks; pRootTask->taskCountByThreadIdx = taskCountByThreadIdx.data(); taskScheduler.spawnTaskAndWait(pRootTask); // Total up the counters uint32_t numTasksCompleted = 0; for (auto& counter : taskCountByThreadIdx) { numTasksCompleted += counter.load(memory_order::acquire); } // Verify all the tasks ran. ASSERT_EQ(numTasks, numTasksCompleted); taskScheduler.shutdown(); } //------------------------------------------------------------------------------ TEST(MicroScheduler, spawnOneTask) { for (uint32_t ii = 0; ii < ITERATIONS; ++ii) { GTS_TRACE_FRAME_MARK(gts::analysis::CaptureMask::ALL); TestSpawnTask(1, 1); } } //------------------------------------------------------------------------------ TEST(MicroScheduler, spawnTaskSingleThreaded) { for (uint32_t ii = 0; ii < ITERATIONS; ++ii) { GTS_TRACE_FRAME_MARK(gts::analysis::CaptureMask::ALL); TestSpawnTask(TEST_DEPTH, 1); } } //------------------------------------------------------------------------------ TEST(MicroScheduler, spawnTaskMultiThreaded) { for (uint32_t ii = 0; ii < ITERATIONS_CONCUR; ++ii) { GTS_TRACE_FRAME_MARK(gts::analysis::CaptureMask::ALL); TestSpawnTask(TEST_DEPTH, gts::Thread::getHardwareThreadCount()); } } } // namespace testing
1,615
344
<filename>modules/audio_processing/test/api_call_statistics.h /* * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #ifndef MODULES_AUDIO_PROCESSING_TEST_API_CALL_STATISTICS_H_ #define MODULES_AUDIO_PROCESSING_TEST_API_CALL_STATISTICS_H_ #include <string> #include <vector> namespace webrtc { namespace test { // Collects statistics about the API call durations. class ApiCallStatistics { public: enum class CallType { kRender, kCapture }; // Adds a new datapoint. void Add(int64_t duration_nanos, CallType call_type); // Prints out a report of the statistics. void PrintReport() const; // Writes the call information to a file. void WriteReportToFile(const std::string& filename) const; private: struct CallData { CallData(int64_t duration_nanos, CallType call_type); int64_t duration_nanos; CallType call_type; }; std::vector<CallData> calls_; }; } // namespace test } // namespace webrtc #endif // MODULES_AUDIO_PROCESSING_TEST_API_CALL_STATISTICS_H_
447
807
<gh_stars>100-1000 {"HandlerID":"4","GameID":"17520","Title":"Synergy Handler","ExeName":"synergy","Dev":"d1maxa","MD5":null,"V":1,"PlatV":9}
56
1,233
<reponame>calvin681/mantis /* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core; import io.mantisrx.common.WorkerPorts; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import java.net.URL; import java.util.LinkedList; import java.util.List; public class ExecuteStageRequest { private final boolean hasJobMaster; private final long subscriptionTimeoutSecs; private final long minRuntimeSecs; private final WorkerPorts workerPorts; private String jobName; private String jobId; private int workerIndex; private int workerNumber; private URL jobJarUrl; private int stage; private int totalNumStages; private int metricsPort; private List<Integer> ports = new LinkedList<Integer>(); private long timeoutToReportStart; private List<Parameter> parameters = new LinkedList<Parameter>(); private SchedulingInfo schedulingInfo; private MantisJobDurationType durationType; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public ExecuteStageRequest(@JsonProperty("jobName") String jobName, @JsonProperty("jobID") String jobId, @JsonProperty("workerIndex") int workerIndex, @JsonProperty("workerNumber") int workerNumber, @JsonProperty("jobJarUrl") URL jobJarUrl, @JsonProperty("stage") int stage, @JsonProperty("totalNumStages") int totalNumStages, @JsonProperty("ports") List<Integer> ports, @JsonProperty("timeoutToReportStart") long timeoutToReportStart, @JsonProperty("metricsPort") int metricsPort, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo, @JsonProperty("durationType") MantisJobDurationType durationType, @JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs, @JsonProperty("minRuntimeSecs") long minRuntimeSecs, @JsonProperty("workerPorts") WorkerPorts workerPorts ) { this.jobName = jobName; this.jobId = jobId; this.workerIndex = workerIndex; this.workerNumber = workerNumber; this.jobJarUrl = jobJarUrl; this.stage = stage; this.totalNumStages = totalNumStages; this.ports.addAll(ports); this.metricsPort = metricsPort; this.timeoutToReportStart = timeoutToReportStart; if (parameters != null) { this.parameters = parameters; } else { this.parameters = new LinkedList<>(); } this.schedulingInfo = schedulingInfo; this.durationType = durationType; hasJobMaster = schedulingInfo != null && schedulingInfo.forStage(0) != null; this.subscriptionTimeoutSecs = subscriptionTimeoutSecs; this.minRuntimeSecs = minRuntimeSecs; this.workerPorts = workerPorts; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } public List<Parameter> getParameters() { return parameters; } public int getMetricsPort() { return metricsPort; } public String getJobName() { return jobName; } public String getJobId() { return jobId; } public int getWorkerIndex() { return workerIndex; } public int getWorkerNumber() { return workerNumber; } public URL getJobJarUrl() { return jobJarUrl; } public int getStage() { return stage; } public int getTotalNumStages() { return totalNumStages; } public List<Integer> getPorts() { return ports; } public WorkerPorts getWorkerPorts() { return workerPorts; } public long getTimeoutToReportStart() { return timeoutToReportStart; } public MantisJobDurationType getDurationType() { return durationType; } public boolean getHasJobMaster() { return hasJobMaster; } public long getSubscriptionTimeoutSecs() { return subscriptionTimeoutSecs; } public long getMinRuntimeSecs() { return minRuntimeSecs; } @Override public String toString() { return "ExecuteStageRequest{" + "jobName='" + jobName + '\'' + ", jobId='" + jobId + '\'' + ", workerIndex=" + workerIndex + ", workerNumber=" + workerNumber + ", jobJarUrl=" + jobJarUrl + ", stage=" + stage + ", totalNumStages=" + totalNumStages + ", metricsPort=" + metricsPort + ", ports=" + ports + ", timeoutToReportStart=" + timeoutToReportStart + ", parameters=" + parameters + ", schedulingInfo=" + schedulingInfo + ", durationType=" + durationType + ", hasJobMaster=" + hasJobMaster + ", subscriptionTimeoutSecs=" + subscriptionTimeoutSecs + ", minRuntimeSecs=" + minRuntimeSecs + ", workerPorts=" + workerPorts + '}'; } }
2,672
569
<reponame>aossp/nfcgate package de.tu_darmstadt.seemoo.nfcgate.network.threading; import android.util.Log; import java.io.DataInputStream; import java.io.IOException; import de.tu_darmstadt.seemoo.nfcgate.network.data.NetworkStatus; import de.tu_darmstadt.seemoo.nfcgate.network.ServerConnection; public class ReceiveThread extends BaseThread { private static final String TAG = "ReceiveThread"; // references private DataInputStream mReadStream; /** * Waits on sendQueue and sends the data over the specified stream */ public ReceiveThread(ServerConnection connection) { super(connection); } @Override void initThread() throws IOException { mReadStream = new DataInputStream(mSocket.getInputStream()); } /** * Tries to send one item from the sendQueue. */ @Override void runInternal() throws IOException { // block and wait for the 4 byte length prefix int length = mReadStream.readInt(); // block and wait for actual data byte[] data = new byte[length]; mReadStream.readFully(data); Log.v(TAG, "Got message of " + length + " bytes"); // deliver data mConnection.onReceive(data); } @Override void onError(Exception e) { Log.e(TAG, "Receive onError", e); mConnection.reportStatus(NetworkStatus.ERROR); } }
522
6,210
package org.apache.cassandra.stress.settings; /* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import com.google.common.collect.ImmutableList; public enum Command { READ(false, "standard1", "Multiple concurrent reads - the cluster must first be populated by a write test", CommandCategory.BASIC ), WRITE(true, "standard1", "insert", "Multiple concurrent writes against the cluster", CommandCategory.BASIC ), MIXED(true, null, "Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test", CommandCategory.MIXED ), COUNTER_WRITE(true, "counter1", "counter_add", "Multiple concurrent updates of counters.", CommandCategory.BASIC ), COUNTER_READ(false, "counter1", "counter_get", "Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test.", CommandCategory.BASIC ), USER(true, null, "Interleaving of user provided queries, with configurable ratio and distribution", CommandCategory.USER ), HELP(false, null, "-?", "Print help for a command or option", null), PRINT(false, null, "Inspect the output of a distribution definition", null), LEGACY(false, null, "Legacy support mode", null), VERSION(false, null, "Print the version of cassandra stress", null) ; private static final Map<String, Command> LOOKUP; static { final Map<String, Command> lookup = new HashMap<>(); for (Command cmd : values()) { for (String name : cmd.names) lookup.put(name, cmd); } LOOKUP = lookup; } public static Command get(String command) { return LOOKUP.get(command.toLowerCase()); } public final boolean updates; public final CommandCategory category; public final List<String> names; public final String description; public final String table; Command(boolean updates, String table, String description, CommandCategory category) { this(updates, table, null, description, category); } Command(boolean updates, String table, String extra, String description, CommandCategory category) { this.table = table; this.updates = updates; this.category = category; List<String> names = new ArrayList<>(); names.add(this.toString().toLowerCase()); names.add(this.toString().replaceAll("_", "").toLowerCase()); if (extra != null) { names.add(extra.toLowerCase()); names.add(extra.replaceAll("_", "").toLowerCase()); } this.names = ImmutableList.copyOf(names); this.description = description; } public void printHelp() { helpPrinter().run(); } public final Runnable helpPrinter() { switch (this) { case PRINT: return SettingsMisc.printHelpPrinter(); case HELP: return SettingsMisc.helpHelpPrinter(); case LEGACY: return Legacy.helpPrinter(); } switch (category) { case USER: return SettingsCommandUser.helpPrinter(); case BASIC: return SettingsCommandPreDefined.helpPrinter(this); case MIXED: return SettingsCommandPreDefinedMixed.helpPrinter(); } throw new AssertionError(); } }
1,729
1,303
<filename>src/backend/access/brin/brin_tuple.c /* * brin_tuples.c * Method implementations for tuples in BRIN indexes. * * Intended usage is that code outside this file only deals with * BrinMemTuples, and convert to and from the on-disk representation through * functions in this file. * * NOTES * * A BRIN tuple is similar to a heap tuple, with a few key differences. The * first interesting difference is that the tuple header is much simpler, only * containing its total length and a small area for flags. Also, the stored * data does not match the relation tuple descriptor exactly: for each * attribute in the descriptor, the index tuple carries an arbitrary number * of values, depending on the opclass. * * Also, for each column of the index relation there are two null bits: one * (hasnulls) stores whether any tuple within the page range has that column * set to null; the other one (allnulls) stores whether the column values are * all null. If allnulls is true, then the tuple data area does not contain * values for that column at all; whereas it does if the hasnulls is set. * Note the size of the null bitmask may not be the same as that of the * datum array. * * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION * src/backend/access/brin/brin_tuple.c */ #include "postgres.h" #include "access/htup_details.h" #include "access/brin_tuple.h" #include "access/tupdesc.h" #include "access/tupmacs.h" #include "utils/datum.h" #include "utils/memutils.h" static inline void brin_deconstruct_tuple(BrinDesc *brdesc, char *tp, bits8 *nullbits, bool nulls, Datum *values, bool *allnulls, bool *hasnulls); /* * Return a tuple descriptor used for on-disk storage of BRIN tuples. */ static TupleDesc brtuple_disk_tupdesc(BrinDesc *brdesc) { /* We cache these in the BrinDesc */ if (brdesc->bd_disktdesc == NULL) { int i; int j; AttrNumber attno = 1; TupleDesc tupdesc; MemoryContext oldcxt; /* make sure it's in the bdesc's context */ oldcxt = MemoryContextSwitchTo(brdesc->bd_context); tupdesc = CreateTemplateTupleDesc(brdesc->bd_totalstored, false); for (i = 0; i < brdesc->bd_tupdesc->natts; i++) { for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++) TupleDescInitEntry(tupdesc, attno++, NULL, brdesc->bd_info[i]->oi_typcache[j]->type_id, -1, 0); } MemoryContextSwitchTo(oldcxt); brdesc->bd_disktdesc = tupdesc; } return brdesc->bd_disktdesc; } /* * Generate a new on-disk tuple to be inserted in a BRIN index. * * See brin_form_placeholder_tuple if you touch this. */ BrinTuple * brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple, Size *size) {// #lizard forgives Datum *values; bool *nulls; bool anynulls = false; BrinTuple *rettuple; int keyno; int idxattno; uint16 phony_infomask = 0; bits8 *phony_nullbitmap; Size len, hoff, data_len; Assert(brdesc->bd_totalstored > 0); values = (Datum *) palloc(sizeof(Datum) * brdesc->bd_totalstored); nulls = (bool *) palloc0(sizeof(bool) * brdesc->bd_totalstored); phony_nullbitmap = (bits8 *) palloc(sizeof(bits8) * BITMAPLEN(brdesc->bd_totalstored)); /* * Set up the values/nulls arrays for heap_fill_tuple */ idxattno = 0; for (keyno = 0; keyno < brdesc->bd_tupdesc->natts; keyno++) { int datumno; /* * "allnulls" is set when there's no nonnull value in any row in the * column; when this happens, there is no data to store. Thus set the * nullable bits for all data elements of this column and we're done. */ if (tuple->bt_columns[keyno].bv_allnulls) { for (datumno = 0; datumno < brdesc->bd_info[keyno]->oi_nstored; datumno++) nulls[idxattno++] = true; anynulls = true; continue; } /* * The "hasnulls" bit is set when there are some null values in the * data. We still need to store a real value, but the presence of * this means we need a null bitmap. */ if (tuple->bt_columns[keyno].bv_hasnulls) anynulls = true; for (datumno = 0; datumno < brdesc->bd_info[keyno]->oi_nstored; datumno++) values[idxattno++] = tuple->bt_columns[keyno].bv_values[datumno]; } /* Assert we did not overrun temp arrays */ Assert(idxattno <= brdesc->bd_totalstored); /* compute total space needed */ len = SizeOfBrinTuple; if (anynulls) { /* * We need a double-length bitmap on an on-disk BRIN index tuple; the * first half stores the "allnulls" bits, the second stores * "hasnulls". */ len += BITMAPLEN(brdesc->bd_tupdesc->natts * 2); } len = hoff = MAXALIGN(len); data_len = heap_compute_data_size(brtuple_disk_tupdesc(brdesc), values, nulls); len += data_len; len = MAXALIGN(len); rettuple = palloc0(len); rettuple->bt_blkno = blkno; rettuple->bt_info = hoff; /* Assert that hoff fits in the space available */ Assert((rettuple->bt_info & BRIN_OFFSET_MASK) == hoff); /* * The infomask and null bitmap as computed by heap_fill_tuple are useless * to us. However, that function will not accept a null infomask; and we * need to pass a valid null bitmap so that it will correctly skip * outputting null attributes in the data area. */ heap_fill_tuple(brtuple_disk_tupdesc(brdesc), values, nulls, (char *) rettuple + hoff, data_len, &phony_infomask, phony_nullbitmap); /* done with these */ pfree(values); pfree(nulls); pfree(phony_nullbitmap); /* * Now fill in the real null bitmasks. allnulls first. */ if (anynulls) { bits8 *bitP; int bitmask; rettuple->bt_info |= BRIN_NULLS_MASK; /* * Note that we reverse the sense of null bits in this module: we * store a 1 for a null attribute rather than a 0. So we must reverse * the sense of the att_isnull test in br_deconstruct_tuple as well. */ bitP = ((bits8 *) ((char *) rettuple + SizeOfBrinTuple)) - 1; bitmask = HIGHBIT; for (keyno = 0; keyno < brdesc->bd_tupdesc->natts; keyno++) { if (bitmask != HIGHBIT) bitmask <<= 1; else { bitP += 1; *bitP = 0x0; bitmask = 1; } if (!tuple->bt_columns[keyno].bv_allnulls) continue; *bitP |= bitmask; } /* hasnulls bits follow */ for (keyno = 0; keyno < brdesc->bd_tupdesc->natts; keyno++) { if (bitmask != HIGHBIT) bitmask <<= 1; else { bitP += 1; *bitP = 0x0; bitmask = 1; } if (!tuple->bt_columns[keyno].bv_hasnulls) continue; *bitP |= bitmask; } bitP = ((bits8 *) (rettuple + SizeOfBrinTuple)) - 1; } if (tuple->bt_placeholder) rettuple->bt_info |= BRIN_PLACEHOLDER_MASK; *size = len; return rettuple; } /* * Generate a new on-disk tuple with no data values, marked as placeholder. * * This is a cut-down version of brin_form_tuple. */ BrinTuple * brin_form_placeholder_tuple(BrinDesc *brdesc, BlockNumber blkno, Size *size) { Size len; Size hoff; BrinTuple *rettuple; int keyno; bits8 *bitP; int bitmask; /* compute total space needed: always add nulls */ len = SizeOfBrinTuple; len += BITMAPLEN(brdesc->bd_tupdesc->natts * 2); len = hoff = MAXALIGN(len); rettuple = palloc0(len); rettuple->bt_blkno = blkno; rettuple->bt_info = hoff; rettuple->bt_info |= BRIN_NULLS_MASK | BRIN_PLACEHOLDER_MASK; bitP = ((bits8 *) ((char *) rettuple + SizeOfBrinTuple)) - 1; bitmask = HIGHBIT; /* set allnulls true for all attributes */ for (keyno = 0; keyno < brdesc->bd_tupdesc->natts; keyno++) { if (bitmask != HIGHBIT) bitmask <<= 1; else { bitP += 1; *bitP = 0x0; bitmask = 1; } *bitP |= bitmask; } /* no need to set hasnulls */ *size = len; return rettuple; } /* * Free a tuple created by brin_form_tuple */ void brin_free_tuple(BrinTuple *tuple) { pfree(tuple); } /* * Given a brin tuple of size len, create a copy of it. If 'dest' is not * NULL, its size is destsz, and can be used as output buffer; if the tuple * to be copied does not fit, it is enlarged by repalloc, and the size is * updated to match. This avoids palloc/free cycles when many brin tuples * are being processed in loops. */ BrinTuple * brin_copy_tuple(BrinTuple *tuple, Size len, BrinTuple *dest, Size *destsz) { if (!destsz || *destsz == 0) dest = palloc(len); else if (len > *destsz) { dest = repalloc(dest, len); *destsz = len; } memcpy(dest, tuple, len); return dest; } /* * Return whether two BrinTuples are bitwise identical. */ bool brin_tuples_equal(const BrinTuple *a, Size alen, const BrinTuple *b, Size blen) { if (alen != blen) return false; if (memcmp(a, b, alen) != 0) return false; return true; } /* * Create a new BrinMemTuple from scratch, and initialize it to an empty * state. * * Note: we don't provide any means to free a deformed tuple, so make sure to * use a temporary memory context. */ BrinMemTuple * brin_new_memtuple(BrinDesc *brdesc) { BrinMemTuple *dtup; long basesize; basesize = MAXALIGN(sizeof(BrinMemTuple) + sizeof(BrinValues) * brdesc->bd_tupdesc->natts); dtup = palloc0(basesize + sizeof(Datum) * brdesc->bd_totalstored); dtup->bt_values = palloc(sizeof(Datum) * brdesc->bd_totalstored); dtup->bt_allnulls = palloc(sizeof(bool) * brdesc->bd_tupdesc->natts); dtup->bt_hasnulls = palloc(sizeof(bool) * brdesc->bd_tupdesc->natts); dtup->bt_context = AllocSetContextCreate(CurrentMemoryContext, "brin dtuple", ALLOCSET_DEFAULT_SIZES); brin_memtuple_initialize(dtup, brdesc); return dtup; } /* * Reset a BrinMemTuple to initial state. We return the same tuple, for * notational convenience. */ BrinMemTuple * brin_memtuple_initialize(BrinMemTuple *dtuple, BrinDesc *brdesc) { int i; char *currdatum; MemoryContextReset(dtuple->bt_context); currdatum = (char *) dtuple + MAXALIGN(sizeof(BrinMemTuple) + sizeof(BrinValues) * brdesc->bd_tupdesc->natts); for (i = 0; i < brdesc->bd_tupdesc->natts; i++) { dtuple->bt_columns[i].bv_allnulls = true; dtuple->bt_columns[i].bv_hasnulls = false; dtuple->bt_columns[i].bv_attno = i + 1; dtuple->bt_columns[i].bv_allnulls = true; dtuple->bt_columns[i].bv_hasnulls = false; dtuple->bt_columns[i].bv_values = (Datum *) currdatum; currdatum += sizeof(Datum) * brdesc->bd_info[i]->oi_nstored; } return dtuple; } /* * Convert a BrinTuple back to a BrinMemTuple. This is the reverse of * brin_form_tuple. * * As an optimization, the caller can pass a previously allocated 'dMemtuple'. * This avoids having to allocate it here, which can be useful when this * function is called many times in a loop. It is caller's responsibility * that the given BrinMemTuple matches what we need here. * * Note we don't need the "on disk tupdesc" here; we rely on our own routine to * deconstruct the tuple from the on-disk format. */ BrinMemTuple * brin_deform_tuple(BrinDesc *brdesc, BrinTuple *tuple, BrinMemTuple *dMemtuple) { BrinMemTuple *dtup; Datum *values; bool *allnulls; bool *hasnulls; char *tp; bits8 *nullbits; int keyno; int valueno; MemoryContext oldcxt; dtup = dMemtuple ? brin_memtuple_initialize(dMemtuple, brdesc) : brin_new_memtuple(brdesc); if (BrinTupleIsPlaceholder(tuple)) dtup->bt_placeholder = true; dtup->bt_blkno = tuple->bt_blkno; values = dtup->bt_values; allnulls = dtup->bt_allnulls; hasnulls = dtup->bt_hasnulls; tp = (char *) tuple + BrinTupleDataOffset(tuple); if (BrinTupleHasNulls(tuple)) nullbits = (bits8 *) ((char *) tuple + SizeOfBrinTuple); else nullbits = NULL; brin_deconstruct_tuple(brdesc, tp, nullbits, BrinTupleHasNulls(tuple), values, allnulls, hasnulls); /* * Iterate to assign each of the values to the corresponding item in the * values array of each column. The copies occur in the tuple's context. */ oldcxt = MemoryContextSwitchTo(dtup->bt_context); for (valueno = 0, keyno = 0; keyno < brdesc->bd_tupdesc->natts; keyno++) { int i; if (allnulls[keyno]) { valueno += brdesc->bd_info[keyno]->oi_nstored; continue; } /* * We would like to skip datumCopy'ing the values datum in some cases, * caller permitting ... */ for (i = 0; i < brdesc->bd_info[keyno]->oi_nstored; i++) dtup->bt_columns[keyno].bv_values[i] = datumCopy(values[valueno++], brdesc->bd_info[keyno]->oi_typcache[i]->typbyval, brdesc->bd_info[keyno]->oi_typcache[i]->typlen); dtup->bt_columns[keyno].bv_hasnulls = hasnulls[keyno]; dtup->bt_columns[keyno].bv_allnulls = false; } MemoryContextSwitchTo(oldcxt); return dtup; } /* * brin_deconstruct_tuple * Guts of attribute extraction from an on-disk BRIN tuple. * * Its arguments are: * brdesc BRIN descriptor for the stored tuple * tp pointer to the tuple data area * nullbits pointer to the tuple nulls bitmask * nulls "has nulls" bit in tuple infomask * values output values, array of size brdesc->bd_totalstored * allnulls output "allnulls", size brdesc->bd_tupdesc->natts * hasnulls output "hasnulls", size brdesc->bd_tupdesc->natts * * Output arrays must have been allocated by caller. */ static inline void brin_deconstruct_tuple(BrinDesc *brdesc, char *tp, bits8 *nullbits, bool nulls, Datum *values, bool *allnulls, bool *hasnulls) {// #lizard forgives int attnum; int stored; TupleDesc diskdsc; long off; /* * First iterate to natts to obtain both null flags for each attribute. * Note that we reverse the sense of the att_isnull test, because we store * 1 for a null value (rather than a 1 for a not null value as is the * att_isnull convention used elsewhere.) See brin_form_tuple. */ for (attnum = 0; attnum < brdesc->bd_tupdesc->natts; attnum++) { /* * the "all nulls" bit means that all values in the page range for * this column are nulls. Therefore there are no values in the tuple * data area. */ allnulls[attnum] = nulls && !att_isnull(attnum, nullbits); /* * the "has nulls" bit means that some tuples have nulls, but others * have not-null values. Therefore we know the tuple contains data * for this column. * * The hasnulls bits follow the allnulls bits in the same bitmask. */ hasnulls[attnum] = nulls && !att_isnull(brdesc->bd_tupdesc->natts + attnum, nullbits); } /* * Iterate to obtain each attribute's stored values. Note that since we * may reuse attribute entries for more than one column, we cannot cache * offsets here. */ diskdsc = brtuple_disk_tupdesc(brdesc); stored = 0; off = 0; for (attnum = 0; attnum < brdesc->bd_tupdesc->natts; attnum++) { int datumno; if (allnulls[attnum]) { stored += brdesc->bd_info[attnum]->oi_nstored; continue; } for (datumno = 0; datumno < brdesc->bd_info[attnum]->oi_nstored; datumno++) { Form_pg_attribute thisatt = diskdsc->attrs[stored]; if (thisatt->attlen == -1) { off = att_align_pointer(off, thisatt->attalign, -1, tp + off); } else { /* not varlena, so safe to use att_align_nominal */ off = att_align_nominal(off, thisatt->attalign); } values[stored++] = fetchatt(thisatt, tp + off); off = att_addlength_pointer(off, thisatt->attlen, tp + off); } } }
8,443
1,248
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.atlas.model.typedef; import org.apache.atlas.model.ModelTestUtil; import org.apache.atlas.type.AtlasType; import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.assertFalse; public class TestAtlasClassificationDef { @Test public void testClassificationDefSerDeEmpty() { AtlasClassificationDef classificationDef = new AtlasClassificationDef("emptyClassificationDef"); String jsonString = AtlasType.toJson(classificationDef); AtlasClassificationDef classificationDef2 = AtlasType.fromJson(jsonString, AtlasClassificationDef.class); assertEquals(classificationDef2, classificationDef, "Incorrect serialization/deserialization of AtlasClassificationDef"); } @Test public void testClassificationDefSerDe() { AtlasClassificationDef classificationDef = ModelTestUtil.getClassificationDef(); String jsonString = AtlasType.toJson(classificationDef); AtlasClassificationDef classificationDef2 = AtlasType.fromJson(jsonString, AtlasClassificationDef.class); assertEquals(classificationDef2, classificationDef, "Incorrect serialization/deserialization of AtlasClassificationDef"); } @Test public void testClassificationDefSerDeWithSuperType() { AtlasClassificationDef classificationDef = ModelTestUtil.getClassificationDefWithSuperType(); String jsonString = AtlasType.toJson(classificationDef); AtlasClassificationDef classificationDef2 = AtlasType.fromJson(jsonString, AtlasClassificationDef.class); assertEquals(classificationDef2, classificationDef, "Incorrect serialization/deserialization of AtlasClassificationDef with superType"); } @Test public void testClassificationDefSerDeWithSuperTypes() { AtlasClassificationDef classificationDef = ModelTestUtil.getClassificationDefWithSuperTypes(); String jsonString = AtlasType.toJson(classificationDef); AtlasClassificationDef classificationDef2 = AtlasType.fromJson(jsonString, AtlasClassificationDef.class); assertEquals(classificationDef2, classificationDef, "Incorrect serialization/deserialization of AtlasClassificationDef with superTypes"); } @Test public void testClassificationDefHasSuperTypeWithNoSuperType() { AtlasClassificationDef classificationDef = ModelTestUtil.getClassificationDef(); for (String superType : classificationDef.getSuperTypes()) { assertTrue(classificationDef.hasSuperType(superType)); } assertFalse(classificationDef.hasSuperType("01234-xyzabc-;''-)(")); } @Test public void testClassificationDefHasSuperTypeWithSuperType() { AtlasClassificationDef classificationDef = ModelTestUtil.getClassificationDefWithSuperTypes(); for (String superType : classificationDef.getSuperTypes()) { assertTrue(classificationDef.hasSuperType(superType)); } assertFalse(classificationDef.hasSuperType("01234-xyzabc-;''-)(")); } }
1,268
28,056
<gh_stars>1000+ package com.alibaba.fastjson.deserializer.issues3796.bean; import java.util.List; public class ObjectF2 { private int a; private int b; private List<Integer> c; private boolean d; public int getA() { return a; } public void setA(int a) { this.a = a; } public int getB() { return b; } public void setB(int b) { this.b = b; } public List<Integer> getC() { return c; } public void setC(List<Integer> c) { this.c = c; } public boolean isD() { return d; } public void setD(boolean d) { this.d = d; } }
249
454
<filename>vertx-pin/zero-crud/src/main/java/io/vertx/tp/crud/uca/op/AgonicImport.java package io.vertx.tp.crud.uca.op; import io.vertx.core.Future; import io.vertx.core.json.JsonArray; import io.vertx.tp.crud.init.IxPin; import io.vertx.tp.crud.refine.Ix; import io.vertx.tp.crud.uca.desk.IxMod; import io.vertx.tp.crud.uca.input.Pre; import io.vertx.tp.ke.atom.specification.KField; import io.vertx.up.eon.em.ChangeFlag; import io.vertx.up.uca.jooq.UxJooq; import io.vertx.up.unity.Ux; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ConcurrentMap; /** * @author <a href="http://www.origin-x.cn">Lang</a> */ class AgonicImport implements Agonic { @Override public Future<JsonArray> runAAsync(final JsonArray input, final IxMod in) { final KField fieldConfig = in.module().getField(); final JsonArray matrix = Ix.onMatrix(fieldConfig); /* * Header And Compress */ final UxJooq jooq = IxPin.jooq(in); return Ix.passion(input, in, Pre.head()::inAAsync, /* Header Value */ this::runCompress /* Compress */ ).compose(processed -> Pre.qUk().inAJAsync(processed, in) .compose(jooq::fetchJAsync) /* Compared Data */ .compose(original -> Ux.future(Ux.compareJ(original, processed, matrix))) ).compose(compared -> this.runSave(compared, in)); } private Future<JsonArray> runSave(final ConcurrentMap<ChangeFlag, JsonArray> compared, final IxMod in) { final List<Future<JsonArray>> combine = new ArrayList<>(); final JsonArray inserted = compared.getOrDefault(ChangeFlag.ADD, new JsonArray()); if (!inserted.isEmpty()) { combine.add(Agonic.write(ChangeFlag.ADD).runAAsync(inserted, in)); } final JsonArray updated = compared.getOrDefault(ChangeFlag.UPDATE, new JsonArray()); if (!updated.isEmpty()) { combine.add(Agonic.write(ChangeFlag.UPDATE).runAAsync(updated, in)); } return Ux.thenCombineArray(combine); } private Future<JsonArray> runCompress(final JsonArray source, final IxMod in) { final KField fieldConfig = in.module().getField(); final JsonArray matrix = fieldConfig.getUnique(); // Here the `key` should be ignored final JsonArray normalized = Ux.ruleJReduce(source, matrix); return Ux.future(normalized); } }
1,087
2,144
<filename>pinot-segment-local/src/test/java/org/apache/pinot/segment/local/realtime/impl/invertedindex/NativeMutableTextIndexReaderWriterTest.java<gh_stars>1000+ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pinot.segment.local.realtime.impl.invertedindex; import java.io.IOException; import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; public class NativeMutableTextIndexReaderWriterTest { @Test public void testIndexWriterReader() throws IOException { String[] uniqueValues = new String[4]; uniqueValues[0] = "hello-world"; uniqueValues[1] = "hello-world123"; uniqueValues[2] = "still"; uniqueValues[3] = "zoobar"; try (NativeMutableTextIndex textIndex = new NativeMutableTextIndex("testFSTColumn")) { for (int i = 0; i < 4; i++) { textIndex.add(uniqueValues[i]); } int[] matchedDocIds = textIndex.getDocIds("hello.*").toArray(); assertEquals(2, matchedDocIds.length); assertEquals(0, matchedDocIds[0]); assertEquals(1, matchedDocIds[1]); matchedDocIds = textIndex.getDocIds(".*llo").toArray(); assertEquals(2, matchedDocIds.length); assertEquals(0, matchedDocIds[0]); assertEquals(1, matchedDocIds[1]); matchedDocIds = textIndex.getDocIds("wor.*").toArray(); assertEquals(2, matchedDocIds.length); assertEquals(0, matchedDocIds[0]); assertEquals(1, matchedDocIds[1]); matchedDocIds = textIndex.getDocIds("zoo.*").toArray(); assertEquals(1, matchedDocIds.length); assertEquals(3, matchedDocIds[0]); } } }
817
17,085
<gh_stars>1000+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import six from distutils.sysconfig import get_python_lib from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS IS_MAC = sys.platform.startswith('darwin') site_packages_path = get_python_lib() # Note(Aurelius84): We use `add_test` in Cmake to config how to run unittest in CI. # `PYTHONPATH` will be set as `build/python/paddle` that will make no way to find # paddle include directory. Because the following path is generated after insalling # PaddlePaddle whl. So here we specific `include_dirs` to avoid errors in CI. paddle_includes = [ os.path.join(site_packages_path, 'paddle', 'include'), os.path.join(site_packages_path, 'paddle', 'include', 'third_party') ] # Test for extra compile args extra_cc_args = ['-w', '-g'] if not IS_WINDOWS else ['/w'] extra_nvcc_args = ['-O3'] extra_compile_args = {'cc': extra_cc_args, 'nvcc': extra_nvcc_args}
477
492
package com.github.fakemongo.test.beans; import com.google.common.base.Objects; /** * @author <NAME> * @created 4/23/2016 */ public class TestChildBean extends TestParentBean { private String parentId; public String getParentId() { return parentId; } public void setParentId(String parentId) { this.parentId = parentId; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } if (!super.equals(o)) { return false; } TestChildBean that = (TestChildBean) o; return Objects.equal(parentId, that.parentId); } @Override public int hashCode() { return Objects.hashCode(super.hashCode(), parentId); } @Override public String toString() { final StringBuilder sb = new StringBuilder("InheritedAttributes {"); final String parentStr = super.toString(); sb.append(parentStr).append("}, ChildAttributes:{"); sb.append("parentId='").append(parentId).append('\''); sb.append('}'); return sb.toString(); } }
519
1,750
<reponame>literator/Typhoon // // SingletonC.h // Tests // // Created by <NAME> on 10/08/13. // // #import <Foundation/Foundation.h> @class SingletonA; @interface NotSingletonA : NSObject @property(nonatomic, strong) SingletonA *dependencyOnA; - (id)initWithSingletonA:(SingletonA *)singletonA; @end
122
386
package com.qq.tars.server.core; import com.qq.tars.common.AbstractFilterChain; import com.qq.tars.common.Filter; import com.qq.tars.common.FilterKind; import com.qq.tars.net.core.Request; import com.qq.tars.net.core.Response; import com.qq.tars.rpc.protocol.tars.TarsServantRequest; import com.qq.tars.rpc.protocol.tars.TarsServantResponse; import java.util.List; public class TarsServerFilterChain extends AbstractFilterChain<ServantHomeSkeleton> { public TarsServerFilterChain(List<Filter> filters, String servant, FilterKind kind, ServantHomeSkeleton target) { super(filters, servant, kind, target); } @Override protected void doRealInvoke(Request request, Response response) throws Throwable { if (request instanceof TarsServantRequest && target != null) { TarsServantRequest tarsServantRequest = (TarsServantRequest)request; Object value = target.invoke(tarsServantRequest.getMethodInfo().getMethod(), tarsServantRequest.getMethodParameters()); TarsServantResponse tarsServantResponse = (TarsServantResponse) response; tarsServantResponse.setResult(value); } } }
372
1,253
<gh_stars>1000+ package it.cosenonjaviste.daggermock.providesannotatedfields; import it.cosenonjaviste.daggermock.simple.MyService; public class MyServiceImpl extends MyService { String name; public MyServiceImpl(String name) { this.name = name; } }
104
348
{"nom":"Saint-Cyr-sur-Loire","circ":"5ème circonscription","dpt":"Indre-et-Loire","inscrits":12631,"abs":6317,"votants":6314,"blancs":425,"nuls":108,"exp":5781,"res":[{"nuance":"REM","nom":"<NAME>","voix":3131},{"nuance":"LR","nom":"<NAME>","voix":2650}]}
105
348
{"nom":"Lancrans","circ":"3ème circonscription","dpt":"Ain","inscrits":702,"abs":414,"votants":288,"blancs":25,"nuls":8,"exp":255,"res":[{"nuance":"REM","nom":"M<NAME>","voix":133},{"nuance":"LR","nom":"Mme <NAME>","voix":122}]}
91
468
{ "name": "purrr", "author": "rstudio", "license": "CC0", "raster": "http://hexb.in/hexagons/purrr.png", "vector": "http://hexb.in/vector/purrr.svg", "description": "purrr: Functional Programming Tools.", "order_online_url": "https://www.stickermule.com/marketplace/13594-rstudio-hex-purrr" }
123
819
<reponame>Asteur/vrhelper<filename>seurat/baker/framework/ray_bundle.cc /* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "seurat/baker/framework/ray_bundle.h" #include "ion/math/transformutils.h" #include "seurat/base/status.h" #include "seurat/base/util.h" namespace seurat { namespace baker { using base::Color4f; using ion::math::Matrix4f; using ion::math::Point2f; using ion::math::Point2i; using ion::math::Point3f; using ion::math::Vector2f; using ion::math::Vector2f; using ion::math::Vector3f; ViewGroupRayBundle::ViewGroupRayBundle( std::vector<std::shared_ptr<base::Camera>> cameras, std::vector<image::Ldi4f> ldis) : cameras_(std::move(cameras)), ldis_(std::move(ldis)) { CHECK_EQ(cameras_.size(), ldis_.size()); num_views_ = cameras_.size(); if (num_views_ == 0) { image_size_ = {0, 0}; } else { image_size_ = cameras_.front()->GetImageSize(); for (int i = 0; i < num_views_; ++i) { CHECK_EQ(image_size_, cameras_[i]->GetImageSize()); CHECK_EQ(image_size_, ldis_[i].GetSize()); } } } int ViewGroupRayBundle::GetRayCount() const { return image_size_[0] * image_size_[1] * num_views_; } Point3f ViewGroupRayBundle::GetOrigin(int ray_index) const { int view_index; Point2i pixel_coords; std::tie(view_index, pixel_coords) = PixelCoordFromRayIndex(ray_index); return cameras_[view_index]->RayOrigin(pixel_coords); } Vector3f ViewGroupRayBundle::GetDirection(int ray_index) const { // Rays originate from the near clipping plane. int view_index; Point2i pixel_coords; std::tie(view_index, pixel_coords) = PixelCoordFromRayIndex(ray_index); return cameras_[view_index]->RayDirection(pixel_coords); } int ViewGroupRayBundle::GetIntersectionCount(int ray_index) const { int view_index; Point2i pixel_coords; std::tie(view_index, pixel_coords) = PixelCoordFromRayIndex(ray_index); return ldis_[view_index].GetSampleCount(pixel_coords); } Point3f ViewGroupRayBundle::GetIntersectionPoint(int ray_index, int intersection) const { int view_index; Point2i pixel_coords; std::tie(view_index, pixel_coords) = PixelCoordFromRayIndex(ray_index); float depth = ldis_[view_index].GetDepths(pixel_coords)[intersection]; return cameras_[view_index]->RayEnd(pixel_coords, depth); } Color4f ViewGroupRayBundle::GetIntersectionColor(int ray_index, int intersection) const { int view_index; Point2i pixel_coords; std::tie(view_index, pixel_coords) = PixelCoordFromRayIndex(ray_index); return ldis_[view_index].GetColors(pixel_coords)[intersection]; } std::tuple<int, Point2i> ViewGroupRayBundle::PixelCoordFromRayIndex( int ray_index) const { int rays_per_image = image_size_[0] * image_size_[1]; int ray_index_in_image = ray_index % rays_per_image; return std::make_tuple(ray_index / rays_per_image, Point2i(ray_index_in_image % image_size_[0], ray_index_in_image / image_size_[0])); } } // namespace baker } // namespace seurat
1,405
678
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #import "UISlider.h" @class UIColor; @interface WCNetworkMediaSlider : UISlider { double m_cacheTime; _Bool _showCacheProgress; UIColor *_frontgroundColor; UIColor *_backgroundColor; double _sliderHeight; } @property(nonatomic) _Bool showCacheProgress; // @synthesize showCacheProgress=_showCacheProgress; @property(nonatomic) double sliderHeight; // @synthesize sliderHeight=_sliderHeight; @property(retain, nonatomic) UIColor *backgroundColor; // @synthesize backgroundColor=_backgroundColor; @property(retain, nonatomic) UIColor *frontgroundColor; // @synthesize frontgroundColor=_frontgroundColor; - (void).cxx_destruct; - (id)getFrontgroundImage; - (id)getBackgroundImage; - (void)resizeCacheProgress; - (void)setCacheProgress:(double)arg1; - (struct CGRect)minimumValueImageRectForBounds:(struct CGRect)arg1; - (void)initView; - (id)init; @end
363
841
package org.jboss.resteasy.core; import java.util.concurrent.CompletionStage; /** * Need to distinguish from a param or property that is actually a CompletionStage from * an async injection * */ public class CompletionStageHolder { private final CompletionStage stage; public CompletionStageHolder(final CompletionStage stage) { this.stage = stage; } public CompletionStage getStage() { return stage; } public static Object resolve(Object injectedObject) { if (injectedObject != null && injectedObject instanceof CompletionStageHolder) { return ((CompletionStageHolder)injectedObject).getStage(); } return injectedObject; } }
237
397
<filename>capsule-core/src/main/java/io/usethesource/capsule/core/PersistentTrieSet.java /** * Copyright (c) <NAME> <Centrum Wiskunde & Informatica> and Contributors. * All rights reserved. * * This file is licensed under the BSD 2-Clause License, which accompanies this project * and is available under https://opensource.org/licenses/BSD-2-Clause. */ package io.usethesource.capsule.core; import java.text.DecimalFormat; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Deque; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; import java.util.Optional; import java.util.Spliterator; import java.util.Spliterators; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Stream; import java.util.stream.StreamSupport; import io.usethesource.capsule.Set; import io.usethesource.capsule.core.trie.ArrayView; import io.usethesource.capsule.core.trie.SetNode; import io.usethesource.capsule.core.trie.SetNodeResult; import io.usethesource.capsule.util.EqualityComparator; public class PersistentTrieSet<K> implements Set.Immutable<K>, java.io.Serializable { private static final long serialVersionUID = 42L; private static final CompactSetNode EMPTY_NODE = new BitmapIndexedSetNode<>(null, 0, 0, new Object[]{}); private static final PersistentTrieSet EMPTY_SET = new PersistentTrieSet(EMPTY_NODE, 0, 0); private static final boolean DEBUG = false; private final AbstractSetNode<K> rootNode; private final int cachedHashCode; private final int cachedSize; PersistentTrieSet(AbstractSetNode<K> rootNode, int cachedHashCode, int cachedSize) { this.rootNode = rootNode; this.cachedHashCode = cachedHashCode; this.cachedSize = cachedSize; if (DEBUG) { assert checkHashCodeAndSize(cachedHashCode, cachedSize); } } public static final <K> Set.Immutable<K> of() { return PersistentTrieSet.EMPTY_SET; } public static final <K> Set.Immutable<K> of(K key0) { final int keyHash0 = key0.hashCode(); final int dataMap = CompactSetNode.bitpos(CompactSetNode.mask(keyHash0, 0)); final CompactSetNode<K> newRootNode = CompactSetNode.nodeOf(null, dataMap, key0, keyHash0); return new PersistentTrieSet<K>(newRootNode, keyHash0, 1); } public static final <K> Set.Immutable<K> of(K key0, K key1) { assert !Objects.equals(key0, key1); final int keyHash0 = key0.hashCode(); final int keyHash1 = key1.hashCode(); CompactSetNode<K> newRootNode = CompactSetNode.mergeTwoKeyValPairs(key0, keyHash0, key1, keyHash1, 0); return new PersistentTrieSet<K>(newRootNode, keyHash0 + keyHash1, 2); } public static final <K> Set.Immutable<K> of(K... keys) { Set.Immutable<K> result = PersistentTrieSet.EMPTY_SET; for (final K key : keys) { result = result.__insert(key); } return result; } public static final <K> Set.Transient<K> transientOf() { return PersistentTrieSet.EMPTY_SET.asTransient(); } public static final <K> Set.Transient<K> transientOf(K... keys) { final Set.Transient<K> result = PersistentTrieSet.EMPTY_SET.asTransient(); for (final K key : keys) { result.__insert(key); } return result; } private static <K> int hashCode(AbstractSetNode<K> rootNode) { int hash = 0; for (Iterator<K> it = new SetKeyIterator<>(rootNode); it.hasNext(); ) { hash += it.next().hashCode(); } return hash; } private static <K> int size(AbstractSetNode<K> rootNode) { int size = 0; for (Iterator<K> it = new SetKeyIterator<>(rootNode); it.hasNext(); it.next()) { size += 1; } return size; } private boolean checkHashCodeAndSize(final int targetHash, final int targetSize) { int hash = 0; int size = 0; for (Iterator<K> it = keyIterator(); it.hasNext(); ) { final K key = it.next(); hash += key.hashCode(); size += 1; } return hash == targetHash && size == targetSize; } public static final int transformHashCode(final int hash) { return hash; } @Override public boolean contains(final Object o) { return containsEquivalent(o, Object::equals); } @Override public boolean containsEquivalent(final Object o, final EqualityComparator<Object> cmp) { try { final K key = (K) o; return rootNode.contains(key, transformHashCode(key.hashCode()), 0, cmp); } catch (ClassCastException unused) { return false; } } @Override public K get(final Object o) { return getEquivalent(o, Object::equals); } @Override public K getEquivalent(final Object o, final EqualityComparator<Object> cmp) { try { final K key = (K) o; final Optional<K> result = rootNode.findByKey(key, transformHashCode(key.hashCode()), 0, cmp); if (result.isPresent()) { return result.get(); } else { return null; } } catch (ClassCastException unused) { return null; } } @Override public Set.Immutable<K> __insert(final K key) { return __insertEquivalent(key, Object::equals); } @Override public Set.Immutable<K> __insertEquivalent(final K key, final EqualityComparator<Object> cmp) { final int keyHash = key.hashCode(); final SetNodeResult<K> details = SetNodeResult.unchanged(); final AbstractSetNode<K> newRootNode = rootNode.updated(null, key, transformHashCode(keyHash), 0, details, cmp); if (details.isModified()) { return new PersistentTrieSet<K>(newRootNode, cachedHashCode + keyHash, cachedSize + 1); } return this; } @Override public Set.Immutable<K> __insertAll(final java.util.Set<? extends K> set) { return __insertAllEquivalent(set, Object::equals); } @Override public Set.Immutable<K> __insertAllEquivalent(final java.util.Set<? extends K> set, final EqualityComparator<Object> cmp) { final Set.Transient<K> tmpTransient = this.asTransient(); tmpTransient.__insertAllEquivalent(set, cmp); return tmpTransient.freeze(); } @Override public Set.Immutable<K> __remove(final K key) { return __removeEquivalent(key, Object::equals); } @Override public Set.Immutable<K> __removeEquivalent(final K key, final EqualityComparator<Object> cmp) { final int keyHash = key.hashCode(); final SetNodeResult<K> details = SetNodeResult.unchanged(); final AbstractSetNode<K> newRootNode = rootNode.removed(null, key, transformHashCode(keyHash), 0, details, cmp); if (details.isModified()) { return new PersistentTrieSet<K>(newRootNode, cachedHashCode - keyHash, cachedSize - 1); } return this; } @Override public Set.Immutable<K> __removeAll(final java.util.Set<? extends K> set) { return __removeAllEquivalent(set, Object::equals); } @Override public Set.Immutable<K> __removeAllEquivalent(final java.util.Set<? extends K> set, final EqualityComparator<Object> cmp) { final Set.Transient<K> tmpTransient = this.asTransient(); tmpTransient.__removeAllEquivalent(set, cmp); return tmpTransient.freeze(); } @Override public Set.Immutable<K> __retainAll(final java.util.Set<? extends K> set) { final Set.Transient<K> tmpTransient = this.asTransient(); tmpTransient.__retainAll(set); return tmpTransient.freeze(); } @Override public Set.Immutable<K> __retainAllEquivalent(final Set.Transient<? extends K> transientSet, final EqualityComparator<Object> cmp) { final Set.Transient<K> tmpTransient = this.asTransient(); tmpTransient.__retainAllEquivalent(transientSet, cmp); return tmpTransient.freeze(); } @Override public boolean add(final K key) { throw new UnsupportedOperationException(); } @Override public boolean addAll(final Collection<? extends K> c) { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public boolean remove(final Object key) { throw new UnsupportedOperationException(); } @Override public boolean removeAll(final Collection<?> c) { throw new UnsupportedOperationException(); } @Override public boolean retainAll(final Collection<?> c) { throw new UnsupportedOperationException(); } @Override public boolean containsAll(final Collection<?> c) { return containsAllEquivalent(c, Object::equals); } @Override public boolean containsAllEquivalent(final Collection<?> c, final EqualityComparator<Object> cmp) { for (Object item : c) { if (!containsEquivalent(item, cmp)) { return false; } } return true; } @Override public int size() { return cachedSize; } @Override public boolean isEmpty() { return cachedSize == 0; } @Override public Iterator<K> iterator() { return keyIterator(); } @Override public Iterator<K> keyIterator() { return new SetKeyIterator<>(rootNode); } @Override public Object[] toArray() { Object[] array = new Object[cachedSize]; int idx = 0; for (K key : this) { array[idx++] = key; } return array; } @Override public <T> T[] toArray(final T[] a) { List<K> list = new ArrayList<K>(cachedSize); for (K key : this) { list.add(key); } return list.toArray(a); } @Override public boolean equals(final Object other) { return equivalent(other, Object::equals); } @Override public boolean equivalent(final Object other, final EqualityComparator<Object> cmp) { if (other == this) { return true; } if (other == null) { return false; } if (other instanceof PersistentTrieSet) { PersistentTrieSet<?> that = (PersistentTrieSet<?>) other; if (this.cachedSize != that.cachedSize) { return false; } if (this.cachedHashCode != that.cachedHashCode) { return false; } return rootNode.equivalent(that.rootNode, cmp); } else if (other instanceof java.util.Set) { java.util.Set that = (java.util.Set) other; if (this.size() != that.size()) { return false; } return containsAllEquivalent(that, cmp); } return false; } @Override public int hashCode() { return cachedHashCode; } @Override public String toString() { String body = stream().map(k -> k.toString()).reduce((o1, o2) -> String.join(", ", o1, o2)) .orElse(""); return String.format("{%s}", body); } @Override public boolean isTransientSupported() { return true; } @Override public Set.Transient<K> asTransient() { return new TransientTrieSet<K>(this); } protected AbstractSetNode<K> getRootNode() { return rootNode; } /* * For analysis purposes only. */ protected Iterator<AbstractSetNode<K>> nodeIterator() { return new TrieSetNodeIterator<>(rootNode); } /* * For analysis purposes only. */ protected int getNodeCount() { final Iterator<AbstractSetNode<K>> it = nodeIterator(); int sumNodes = 0; for (; it.hasNext(); it.next()) { sumNodes += 1; } return sumNodes; } /* * For analysis purposes only. Payload X Node */ protected int[][] arityCombinationsHistogram() { final Iterator<AbstractSetNode<K>> it = nodeIterator(); final int[][] sumArityCombinations = new int[33][33]; while (it.hasNext()) { final AbstractSetNode<K> node = it.next(); sumArityCombinations[node.payloadArity()][node.nodeArity()] += 1; } return sumArityCombinations; } /* * For analysis purposes only. */ protected int[] arityHistogram() { final int[][] sumArityCombinations = arityCombinationsHistogram(); final int[] sumArity = new int[33]; final int maxArity = 32; // TODO: factor out constant for (int j = 0; j <= maxArity; j++) { for (int maxRestArity = maxArity - j, k = 0; k <= maxRestArity - j; k++) { sumArity[j + k] += sumArityCombinations[j][k]; } } return sumArity; } /* * For analysis purposes only. */ public void printStatistics() { final int[][] sumArityCombinations = arityCombinationsHistogram(); final int[] sumArity = arityHistogram(); final int sumNodes = getNodeCount(); final int[] cumsumArity = new int[33]; for (int cumsum = 0, i = 0; i < 33; i++) { cumsum += sumArity[i]; cumsumArity[i] = cumsum; } final float threshhold = 0.01f; // for printing results for (int i = 0; i < 33; i++) { float arityPercentage = (float) (sumArity[i]) / sumNodes; float cumsumArityPercentage = (float) (cumsumArity[i]) / sumNodes; if (arityPercentage != 0 && arityPercentage >= threshhold) { // details per level StringBuilder bldr = new StringBuilder(); int max = i; for (int j = 0; j <= max; j++) { for (int k = max - j; k <= max - j; k++) { float arityCombinationsPercentage = (float) (sumArityCombinations[j][k]) / sumNodes; if (arityCombinationsPercentage != 0 && arityCombinationsPercentage >= threshhold) { bldr.append(String.format("%d/%d: %s, ", j, k, new DecimalFormat("0.00%").format(arityCombinationsPercentage))); } } } final String detailPercentages = bldr.toString(); // overview System.out.println(String.format("%2d: %s\t[cumsum = %s]\t%s", i, new DecimalFormat("0.00%").format(arityPercentage), new DecimalFormat("0.00%").format(cumsumArityPercentage), detailPercentages)); } } } protected static abstract class AbstractSetNode<K> implements SetNode<K, AbstractSetNode<K>>, Iterable<K>, java.io.Serializable { private static final long serialVersionUID = 42L; static final int TUPLE_LENGTH = 1; static final <T> boolean isAllowedToEdit(AtomicReference<?> x, AtomicReference<?> y) { return x != null && y != null && (x == y || x.get() == y.get()); } @Override public <T> ArrayView<T> dataArray(final int category, final int component) { if (category == 0 && component == 0) { return categoryArrayView0(); } else { throw new IllegalArgumentException("Category %i is not supported."); } } private <T> ArrayView<T> categoryArrayView0() { return new ArrayView<T>() { @Override public int size() { return payloadArity(); } @Override public T get(int index) { return (T) getKey(index); } }; } @Override public abstract ArrayView<AbstractSetNode<K>> nodeArray(); abstract boolean hasNodes(); abstract int nodeArity(); abstract AbstractSetNode<K> getNode(final int index); @Deprecated Iterator<? extends AbstractSetNode<K>> nodeIterator() { return new Iterator<AbstractSetNode<K>>() { int nextIndex = 0; final int nodeArity = AbstractSetNode.this.nodeArity(); @Override public void remove() { throw new UnsupportedOperationException(); } @Override public AbstractSetNode<K> next() { if (!hasNext()) { throw new NoSuchElementException(); } return AbstractSetNode.this.getNode(nextIndex++); } @Override public boolean hasNext() { return nextIndex < nodeArity; } }; } // abstract boolean hasPayload(); // // abstract int payloadArity(); // // abstract K getKey(final int index); @Deprecated abstract boolean hasSlots(); abstract int slotArity(); abstract Object getSlot(final int index); /** * The arity of this trie node (i.e. number of values and nodes stored on this level). * * @return sum of nodes and values stored within */ int arity() { return payloadArity() + nodeArity(); } @Override public int size() { final Iterator<? extends AbstractSetNode<K>> it = new TrieSetNodeIterator(this); int size = 0; while (it.hasNext()) { final AbstractSetNode<K> node = it.next(); size += node.payloadArity(); } return size; } abstract int localPayloadHashCode(); @Override public int recursivePayloadHashCode() { final Iterator<? extends AbstractSetNode<K>> it = new TrieSetNodeIterator(this); int hashCode = 0; while (it.hasNext()) { final AbstractSetNode<K> node = it.next(); hashCode += node.localPayloadHashCode(); } return hashCode; } @Override public Iterator<K> iterator() { return new SetKeyIterator<>(this); } @Override public Spliterator<K> spliterator() { return Spliterators.spliteratorUnknownSize(iterator(), Spliterator.DISTINCT); } public Stream<K> stream() { return StreamSupport.stream(spliterator(), false); } } protected static abstract class CompactSetNode<K> extends AbstractSetNode<K> { static final int HASH_CODE_LENGTH = 32; static final int BIT_PARTITION_SIZE = 5; static final int BIT_PARTITION_MASK = 0b11111; static final int mask(final int keyHash, final int shift) { return (keyHash >>> shift) & BIT_PARTITION_MASK; } static final int bitpos(final int mask) { return 1 << mask; } abstract int nodeMap(); abstract int dataMap(); @Override abstract CompactSetNode<K> getNode(final int index); boolean nodeInvariant() { boolean inv1 = (size() - payloadArity() >= 2 * (arity() - payloadArity())); boolean inv2 = (this.arity() == 0) ? sizePredicate() == SIZE_EMPTY : true; boolean inv3 = (this.arity() == 1 && payloadArity() == 1) ? sizePredicate() == SIZE_ONE : true; boolean inv4 = (this.arity() >= 2) ? sizePredicate() == SIZE_MORE_THAN_ONE : true; boolean inv5 = (this.nodeArity() >= 0) && (this.payloadArity() >= 0) && ((this.payloadArity() + this.nodeArity()) == this.arity()); return inv1 && inv2 && inv3 && inv4 && inv5; } abstract CompactSetNode<K> copyAndInsertValue(final AtomicReference<Thread> mutator, final int bitpos, final K key); abstract CompactSetNode<K> copyAndRemoveValue(final AtomicReference<Thread> mutator, final int bitpos); abstract CompactSetNode<K> copyAndSetNode(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node); abstract CompactSetNode<K> copyAndMigrateFromInlineToNode(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node); abstract CompactSetNode<K> copyAndMigrateFromNodeToInline(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node); static final <K> CompactSetNode<K> mergeTwoKeyValPairs(final K key0, final int keyHash0, final K key1, final int keyHash1, final int shift) { assert !(key0.equals(key1)); if (shift >= HASH_CODE_LENGTH) { // throw new // IllegalStateException("Hash collision not yet fixed."); return new HashCollisionSetNode<>(keyHash0, (K[]) new Object[]{key0, key1}); } final int mask0 = mask(keyHash0, shift); final int mask1 = mask(keyHash1, shift); if (mask0 != mask1) { // both nodes fit on same level final int dataMap = bitpos(mask0) | bitpos(mask1); if (mask0 < mask1) { return nodeOf(null, dataMap, key0, keyHash0, key1, keyHash1); } else { return nodeOf(null, dataMap, key1, keyHash1, key0, keyHash0); } } else { final CompactSetNode<K> node = mergeTwoKeyValPairs(key0, keyHash0, key1, keyHash1, shift + BIT_PARTITION_SIZE); // values fit on next level final int nodeMap = bitpos(mask0); return nodeOf(null, nodeMap, node); } } static final <K> CompactSetNode<K> nodeOf(final AtomicReference<Thread> mutator, final int nodeMap, final int dataMap, final Object[] nodes) { return new BitmapIndexedSetNode<>(mutator, nodeMap, dataMap, nodes); } static final <K> CompactSetNode<K> nodeOf(AtomicReference<Thread> mutator) { return EMPTY_NODE; } static final <K> CompactSetNode<K> nodeOf(AtomicReference<Thread> mutator, final int dataMap, final K key, final int keyHash) { return nodeOf(mutator, 0, dataMap, new Object[]{key}); } static final <K> CompactSetNode<K> nodeOf(AtomicReference<Thread> mutator, final int dataMap, final K key0, final int keyHash0, final K key1, final int keyHash1) { return nodeOf(mutator, 0, dataMap, new Object[]{key0, key1}); } static final <K> CompactSetNode<K> nodeOf(AtomicReference<Thread> mutator, final int nodeMap, final AbstractSetNode<K> node) { return nodeOf(mutator, nodeMap, 0, new Object[]{node}); } static final int index(final int bitmap, final int bitpos) { return java.lang.Integer.bitCount(bitmap & (bitpos - 1)); } static final int index(final int bitmap, final int mask, final int bitpos) { return (bitmap == -1) ? mask : index(bitmap, bitpos); } int dataIndex(final int bitpos) { return java.lang.Integer.bitCount(dataMap() & (bitpos - 1)); } int nodeIndex(final int bitpos) { return java.lang.Integer.bitCount(nodeMap() & (bitpos - 1)); } CompactSetNode<K> nodeAt(final int bitpos) { return getNode(nodeIndex(bitpos)); } @Override public boolean contains(final K key, final int keyHash, final int shift, final EqualityComparator<Object> cmp) { final int mask = mask(keyHash, shift); final int bitpos = bitpos(mask); final int dataMap = dataMap(); if ((dataMap & bitpos) != 0) { final int index = index(dataMap, mask, bitpos); return cmp.equals(getKey(index), key); } final int nodeMap = nodeMap(); if ((nodeMap & bitpos) != 0) { final int index = index(nodeMap, mask, bitpos); return getNode(index).contains(key, keyHash, shift + BIT_PARTITION_SIZE, cmp); } return false; } @Override public Optional<K> findByKey(final K key, final int keyHash, final int shift, final EqualityComparator<Object> cmp) { final int mask = mask(keyHash, shift); final int bitpos = bitpos(mask); if ((dataMap() & bitpos) != 0) { // inplace value final int index = dataIndex(bitpos); if (cmp.equals(getKey(index), key)) { return Optional.of(getKey(index)); } return Optional.empty(); } if ((nodeMap() & bitpos) != 0) { // node (not value) final AbstractSetNode<K> subNode = nodeAt(bitpos); return subNode.findByKey(key, keyHash, shift + BIT_PARTITION_SIZE, cmp); } return Optional.empty(); } @Override public AbstractSetNode<K> updated(final AtomicReference<Thread> mutator, final K key, final int keyHash, final int shift, final SetNodeResult<K> details, final EqualityComparator<Object> cmp) { final int mask = mask(keyHash, shift); final int bitpos = bitpos(mask); if ((dataMap() & bitpos) != 0) { // inplace value final int dataIndex = dataIndex(bitpos); final K currentKey = getKey(dataIndex); if (cmp.equals(currentKey, key)) { return this; } else { final AbstractSetNode<K> subNodeNew = mergeTwoKeyValPairs(currentKey, transformHashCode(currentKey.hashCode()), key, keyHash, shift + BIT_PARTITION_SIZE); details.modified(); details.updateDeltaSize(1); details.updateDeltaHashCode(keyHash); return copyAndMigrateFromInlineToNode(mutator, bitpos, subNodeNew); } } else if ((nodeMap() & bitpos) != 0) { // node (not value) final AbstractSetNode<K> subNode = nodeAt(bitpos); final AbstractSetNode<K> subNodeNew = subNode.updated(mutator, key, keyHash, shift + BIT_PARTITION_SIZE, details, cmp); if (details.isModified()) { /* * NOTE: subNode and subNodeNew may be referential equal if updated transiently in-place. * Therefore diffing nodes is not an option. Changes to content and meta-data need to be * explicitly tracked and passed when descending from recursion (i.e., {@code details}). */ return copyAndSetNode(mutator, bitpos, subNodeNew); } else { return this; } } else { // no value details.modified(); details.updateDeltaSize(1); details.updateDeltaHashCode(keyHash); return copyAndInsertValue(mutator, bitpos, key); } } @Override public AbstractSetNode<K> removed(final AtomicReference<Thread> mutator, final K key, final int keyHash, final int shift, final SetNodeResult<K> details, final EqualityComparator<Object> cmp) { final int mask = mask(keyHash, shift); final int bitpos = bitpos(mask); if ((dataMap() & bitpos) != 0) { // inplace value final int dataIndex = dataIndex(bitpos); if (cmp.equals(getKey(dataIndex), key)) { details.modified(); details.updateDeltaSize(-1); details.updateDeltaHashCode(-keyHash); if (this.payloadArity() == 2 && this.nodeArity() == 0) { /* * Create new node with remaining pair. The new node will a) either become the new root * returned, or b) unwrapped and inlined during returning. */ final int newDataMap = (shift == 0) ? (int) (dataMap() ^ bitpos) : bitpos(mask(keyHash, 0)); if (dataIndex == 0) { return CompactSetNode.<K>nodeOf(mutator, newDataMap, getKey(1), getKeyHash(1)); } else { return CompactSetNode.<K>nodeOf(mutator, newDataMap, getKey(0), getKeyHash(0)); } } else { return copyAndRemoveValue(mutator, bitpos); } } else { return this; } } else if ((nodeMap() & bitpos) != 0) { // node (not value) final AbstractSetNode<K> subNode = nodeAt(bitpos); final AbstractSetNode<K> subNodeNew = subNode.removed(mutator, key, keyHash, shift + BIT_PARTITION_SIZE, details, cmp); if (!details.isModified()) { return this; } switch (subNodeNew.sizePredicate()) { case 0: { throw new IllegalStateException("Sub-node must have at least one element."); } case 1: { if (this.payloadArity() == 0 && this.nodeArity() == 1) { // escalate (singleton or empty) result return subNodeNew; } else { // inline value (move to front) return copyAndMigrateFromNodeToInline(mutator, bitpos, subNodeNew); } } default: { // modify current node (set replacement node) return copyAndSetNode(mutator, bitpos, subNodeNew); } } } return this; } /** * @return 0 <= mask <= 2^BIT_PARTITION_SIZE - 1 */ static byte recoverMask(int map, byte i_th) { assert 1 <= i_th && i_th <= 32; byte cnt1 = 0; byte mask = 0; while (mask < 32) { if ((map & 0x01) == 0x01) { cnt1 += 1; if (cnt1 == i_th) { return mask; } } map = map >> 1; mask += 1; } assert cnt1 != i_th; throw new RuntimeException("Called with invalid arguments."); } @Override public String toString() { final StringBuilder bldr = new StringBuilder(); bldr.append('['); for (byte i = 0; i < payloadArity(); i++) { final byte pos = recoverMask(dataMap(), (byte) (i + 1)); bldr.append(String.format("@%d<#%d>", pos, Objects.hashCode(getKey(i)))); if (!((i + 1) == payloadArity())) { bldr.append(", "); } } if (payloadArity() > 0 && nodeArity() > 0) { bldr.append(", "); } for (byte i = 0; i < nodeArity(); i++) { final byte pos = recoverMask(nodeMap(), (byte) (i + 1)); bldr.append(String.format("@%d: %s", pos, getNode(i))); if (!((i + 1) == nodeArity())) { bldr.append(", "); } } bldr.append(']'); return bldr.toString(); } } protected static abstract class CompactMixedSetNode<K> extends CompactSetNode<K> { private final int nodeMap; private final int dataMap; CompactMixedSetNode(final AtomicReference<Thread> mutator, final int nodeMap, final int dataMap) { this.nodeMap = nodeMap; this.dataMap = dataMap; } @Override final int nodeMap() { return nodeMap; } @Override final int dataMap() { return dataMap; } } private static final class BitmapIndexedSetNode<K> extends CompactMixedSetNode<K> { transient final AtomicReference<Thread> mutator; final Object[] nodes; private BitmapIndexedSetNode(final AtomicReference<Thread> mutator, final int nodeMap, final int dataMap, final Object[] nodes) { super(mutator, nodeMap, dataMap); this.mutator = mutator; this.nodes = nodes; if (DEBUG) { assert (TUPLE_LENGTH * java.lang.Integer.bitCount(dataMap) + java.lang.Integer.bitCount(nodeMap) == nodes.length); for (int i = 0; i < TUPLE_LENGTH * payloadArity(); i++) { assert ((nodes[i] instanceof CompactSetNode) == false); } for (int i = TUPLE_LENGTH * payloadArity(); i < nodes.length; i++) { assert ((nodes[i] instanceof CompactSetNode) == true); } assert nodeInvariant(); } } @Override public ArrayView<AbstractSetNode<K>> nodeArray() { return new ArrayView<AbstractSetNode<K>>() { @Override public int size() { return BitmapIndexedSetNode.this.nodeArity(); } @Override public AbstractSetNode<K> get(int index) { return BitmapIndexedSetNode.this.getNode(index); } /** * TODO: replace with {{@link #set(int, AbstractSetNode, AtomicReference)}} */ @Override public void set(int index, AbstractSetNode<K> item) { // if (!isAllowedToEdit(BitmapIndexedSetNode.this.mutator, writeCapabilityToken)) { // throw new IllegalStateException(); // } nodes[nodes.length - 1 - index] = item; } @Override public void set(int index, AbstractSetNode<K> item, AtomicReference<?> writeCapabilityToken) { if (!isAllowedToEdit(BitmapIndexedSetNode.this.mutator, writeCapabilityToken)) { throw new IllegalStateException(); } nodes[nodes.length - 1 - index] = item; } }; } @Override public K getKey(final int index) { return (K) nodes[TUPLE_LENGTH * index]; } @Override public int getKeyHash(int index) { return getKey(index).hashCode(); } @Override CompactSetNode<K> getNode(final int index) { return (CompactSetNode<K>) nodes[nodes.length - 1 - index]; } // @Override // public void setNode(final AtomicReference<Thread> mutator, final int index, // final AbstractSetNode<K> node) { // if (isAllowedToEdit(this.mutator, mutator)) { // nodes[nodes.length - 1 - index] = node; // } else { // throw new IllegalStateException(); // } // } @Override public boolean hasPayload() { return dataMap() != 0; } @Override public int payloadArity() { return java.lang.Integer.bitCount(dataMap()); } @Override boolean hasNodes() { return nodeMap() != 0; } @Override int nodeArity() { return java.lang.Integer.bitCount(nodeMap()); } @Override Object getSlot(final int index) { return nodes[index]; } @Override boolean hasSlots() { return nodes.length != 0; } @Override int slotArity() { return nodes.length; } @Override int localPayloadHashCode() { final Stream<K> keyStream = StreamSupport.stream(this.<K>dataArray(0, 0).spliterator(), false); return keyStream.mapToInt(Object::hashCode).sum(); } @Override public int hashCode() { final int prime = 31; int result = 0; result = prime * result + (nodeMap()); result = prime * result + (dataMap()); result = prime * result + Arrays.hashCode(nodes); return result; } @Override public boolean equals(final Object other) { return equivalent(other, Object::equals); } @Override public boolean equivalent(final Object other, EqualityComparator<Object> cmp) { if (null == other) { return false; } if (this == other) { return true; } if (getClass() != other.getClass()) { return false; } BitmapIndexedSetNode<?> that = (BitmapIndexedSetNode<?>) other; if (nodeMap() != that.nodeMap()) { return false; } if (dataMap() != that.dataMap()) { return false; } if (!deepContentEquality(nodes, that.nodes, payloadArity(), slotArity(), cmp)) { return false; } return true; } private final boolean deepContentEquality( /* @NotNull */ Object[] a1, /* @NotNull */ Object[] a2, int splitAt, int length, EqualityComparator<Object> cmp) { // assert a1 != null && a2 != null; // assert a1.length == a2.length; if (a1 == a2) { return true; } // compare local payload for (int i = 0; i < splitAt; i++) { Object o1 = a1[i]; Object o2 = a2[i]; if (!EqualityComparator.equals(o1, o2, cmp::equals)) { return false; } } // recursively compare nested nodes for (int i = splitAt; i < length; i++) { AbstractSetNode o1 = (AbstractSetNode) a1[i]; AbstractSetNode o2 = (AbstractSetNode) a2[i]; if (!EqualityComparator.equals(o1, o2, (a, b) -> a.equivalent(b, cmp))) { return false; } } return true; } @Override public byte sizePredicate() { if (this.nodeArity() == 0) { switch (this.payloadArity()) { case 0: return SIZE_EMPTY; case 1: return SIZE_ONE; default: return SIZE_MORE_THAN_ONE; } } else { return SIZE_MORE_THAN_ONE; } } @Override public final int size() { return super.size(); } @Override public int recursivePayloadHashCode() { return super.recursivePayloadHashCode(); } @Override CompactSetNode<K> copyAndSetNode(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> newNode) { final int nodeIndex = nodeIndex(bitpos); final AbstractSetNode<K> node = getNode(nodeIndex); final int newCachedHashCode; final int newCachedSize; final int idx = this.nodes.length - 1 - nodeIndex; if (isAllowedToEdit(this.mutator, mutator)) { // no copying if already editable this.nodes[idx] = newNode; return this; } else { final Object[] src = this.nodes; final Object[] dst = new Object[src.length]; // copy 'src' and set 1 element(s) at position 'idx' System.arraycopy(src, 0, dst, 0, src.length); dst[idx + 0] = newNode; return nodeOf(mutator, nodeMap(), dataMap(), dst); } } @Override CompactSetNode<K> copyAndInsertValue(final AtomicReference<Thread> mutator, final int bitpos, final K key) { final int idx = TUPLE_LENGTH * dataIndex(bitpos); final Object[] src = this.nodes; final Object[] dst = new Object[src.length + 1]; // copy 'src' and insert 1 element(s) at position 'idx' System.arraycopy(src, 0, dst, 0, idx); dst[idx + 0] = key; System.arraycopy(src, idx, dst, idx + 1, src.length - idx); return nodeOf(mutator, nodeMap(), dataMap() | bitpos, dst); } @Override CompactSetNode<K> copyAndRemoveValue(final AtomicReference<Thread> mutator, final int bitpos) { final int idx = TUPLE_LENGTH * dataIndex(bitpos); final Object[] src = this.nodes; final Object[] dst = new Object[src.length - 1]; // copy 'src' and remove 1 element(s) at position 'idx' System.arraycopy(src, 0, dst, 0, idx); System.arraycopy(src, idx + 1, dst, idx, src.length - idx - 1); return nodeOf(mutator, nodeMap(), dataMap() ^ bitpos, dst); } @Override CompactSetNode<K> copyAndMigrateFromInlineToNode(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node) { final int idxOld = TUPLE_LENGTH * dataIndex(bitpos); final int idxNew = this.nodes.length - TUPLE_LENGTH - nodeIndex(bitpos); final Object[] src = this.nodes; final Object[] dst = new Object[src.length - 1 + 1]; // copy 'src' and remove 1 element(s) at position 'idxOld' and // insert 1 element(s) at position 'idxNew' (TODO: carefully test) assert idxOld <= idxNew; System.arraycopy(src, 0, dst, 0, idxOld); System.arraycopy(src, idxOld + 1, dst, idxOld, idxNew - idxOld); dst[idxNew + 0] = node; System.arraycopy(src, idxNew + 1, dst, idxNew + 1, src.length - idxNew - 1); return nodeOf(mutator, nodeMap() | bitpos, dataMap() ^ bitpos, dst); } @Override CompactSetNode<K> copyAndMigrateFromNodeToInline(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node) { final int idxOld = this.nodes.length - 1 - nodeIndex(bitpos); final int idxNew = TUPLE_LENGTH * dataIndex(bitpos); final Object[] src = this.nodes; final Object[] dst = new Object[src.length - 1 + 1]; // copy 'src' and remove 1 element(s) at position 'idxOld' and // insert 1 element(s) at position 'idxNew' (TODO: carefully test) assert idxOld >= idxNew; System.arraycopy(src, 0, dst, 0, idxNew); dst[idxNew + 0] = node.getKey(0); System.arraycopy(src, idxNew, dst, idxNew + 1, idxOld - idxNew); System.arraycopy(src, idxOld + 1, dst, idxOld + 1, src.length - idxOld - 1); return nodeOf(mutator, nodeMap() ^ bitpos, dataMap() | bitpos, dst); } } private static final class HashCollisionSetNode<K> extends CompactSetNode<K> { private final K[] keys; private final int hash; HashCollisionSetNode(final int hash, final K[] keys) { this.keys = keys; this.hash = hash; assert payloadArity() >= 2; } @Override public ArrayView<AbstractSetNode<K>> nodeArray() { return ArrayView.empty(); } @Override public boolean contains(final K key, final int keyHash, final int shift, final EqualityComparator<Object> cmp) { if (this.hash == keyHash) { for (K k : keys) { if (cmp.equals(k, key)) { return true; } } } return false; } @Override public Optional<K> findByKey(final K key, final int keyHash, final int shift, final EqualityComparator<Object> cmp) { for (int i = 0; i < keys.length; i++) { final K _key = keys[i]; if (cmp.equals(key, _key)) { return Optional.of(_key); } } return Optional.empty(); } @Override public AbstractSetNode<K> updated(final AtomicReference<Thread> mutator, final K key, final int keyHash, final int shift, final SetNodeResult<K> details, final EqualityComparator<Object> cmp) { assert this.hash == keyHash; for (int idx = 0; idx < keys.length; idx++) { if (cmp.equals(keys[idx], key)) { return this; } } final K[] keysNew = (K[]) new Object[this.keys.length + 1]; // copy 'this.keys' and insert 1 element(s) at position // 'keys.length' System.arraycopy(this.keys, 0, keysNew, 0, keys.length); keysNew[keys.length + 0] = key; System.arraycopy(this.keys, keys.length, keysNew, keys.length + 1, this.keys.length - keys.length); details.modified(); details.updateDeltaSize(1); details.updateDeltaHashCode(keyHash); return new HashCollisionSetNode<>(keyHash, keysNew); } @Override public AbstractSetNode<K> removed(final AtomicReference<Thread> mutator, final K key, final int keyHash, final int shift, final SetNodeResult<K> details, final EqualityComparator<Object> cmp) { for (int idx = 0; idx < keys.length; idx++) { if (cmp.equals(keys[idx], key)) { details.modified(); details.updateDeltaSize(-1); details.updateDeltaHashCode(-keyHash); if (this.arity() == 1) { return nodeOf(mutator); } else if (this.arity() == 2) { /* * Create root node with singleton element. This node will be a) either be the new root * returned, or b) unwrapped and inlined. */ final K theOtherKey = (idx == 0) ? keys[1] : keys[0]; return CompactSetNode.<K>nodeOf(mutator).updated(mutator, theOtherKey, keyHash, 0, SetNodeResult.unchanged(), cmp); } else { final K[] keysNew = (K[]) new Object[this.keys.length - 1]; // copy 'this.keys' and remove 1 element(s) at position // 'idx' System.arraycopy(this.keys, 0, keysNew, 0, idx); System.arraycopy(this.keys, idx + 1, keysNew, idx, this.keys.length - idx - 1); return new HashCollisionSetNode<>(keyHash, keysNew); } } } return this; } @Override public boolean hasPayload() { return true; } @Override public int payloadArity() { return keys.length; } @Override boolean hasNodes() { return false; } @Override int nodeArity() { return 0; } @Override int arity() { return payloadArity(); } @Override public byte sizePredicate() { return SIZE_MORE_THAN_ONE; } @Override public K getKey(final int index) { return keys[index]; } @Override public int getKeyHash(int index) { return getKey(index).hashCode(); } @Override public CompactSetNode<K> getNode(int index) { throw new IllegalStateException("Is leaf node."); } // @Override // public void setNode(AtomicReference<Thread> mutator, int index, AbstractSetNode<K> node) { // throw new IllegalStateException("Is leaf node."); // } @Override Object getSlot(final int index) { throw new UnsupportedOperationException(); } @Override boolean hasSlots() { throw new UnsupportedOperationException(); } @Override int slotArity() { throw new UnsupportedOperationException(); } @Override int localPayloadHashCode() { return hash * keys.length; } @Override public int hashCode() { final int prime = 31; int result = 0; result = prime * result + hash; result = prime * result + Arrays.hashCode(keys); return result; } @Override public boolean equals(final Object other) { return equivalent(other, Object::equals); } @Override public boolean equivalent(Object other, EqualityComparator<Object> cmp) { if (null == other) { return false; } if (this == other) { return true; } if (getClass() != other.getClass()) { return false; } HashCollisionSetNode<?> that = (HashCollisionSetNode<?>) other; if (hash != that.hash) { return false; } if (arity() != that.arity()) { return false; } /* * Linear scan for each key, because of arbitrary element order. */ outerLoop: for (int i = 0; i < that.payloadArity(); i++) { final Object otherKey = that.getKey(i); for (int j = 0; j < keys.length; j++) { final K key = keys[j]; if (cmp.equals(key, otherKey)) { continue outerLoop; } } return false; } return true; } @Override CompactSetNode<K> copyAndInsertValue(final AtomicReference<Thread> mutator, final int bitpos, final K key) { throw new UnsupportedOperationException(); } @Override CompactSetNode<K> copyAndRemoveValue(final AtomicReference<Thread> mutator, final int bitpos) { throw new UnsupportedOperationException(); } @Override CompactSetNode<K> copyAndSetNode(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node) { throw new UnsupportedOperationException(); } @Override CompactSetNode<K> copyAndMigrateFromInlineToNode(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node) { throw new UnsupportedOperationException(); } @Override CompactSetNode<K> copyAndMigrateFromNodeToInline(final AtomicReference<Thread> mutator, final int bitpos, final AbstractSetNode<K> node) { throw new UnsupportedOperationException(); } @Override final int nodeMap() { throw new UnsupportedOperationException(); } @Override final int dataMap() { throw new UnsupportedOperationException(); } } /** * Iterator skeleton that uses a fixed stack in depth. */ private static abstract class AbstractSetIterator<K> { private static final int MAX_DEPTH = 7; protected int currentValueCursor; protected int currentValueLength; protected AbstractSetNode<K> currentValueNode; private int currentStackLevel = -1; private final int[] nodeCursorsAndLengths = new int[MAX_DEPTH * 2]; AbstractSetNode<K>[] nodes = new AbstractSetNode[MAX_DEPTH]; AbstractSetIterator(AbstractSetNode<K> rootNode) { if (rootNode.hasNodes()) { currentStackLevel = 0; nodes[0] = rootNode; nodeCursorsAndLengths[0] = 0; nodeCursorsAndLengths[1] = rootNode.nodeArity(); } if (rootNode.hasPayload()) { currentValueNode = rootNode; currentValueCursor = 0; currentValueLength = rootNode.payloadArity(); } } /* * search for next node that contains values */ private boolean searchNextValueNode() { while (currentStackLevel >= 0) { final int currentCursorIndex = currentStackLevel * 2; final int currentLengthIndex = currentCursorIndex + 1; final int nodeCursor = nodeCursorsAndLengths[currentCursorIndex]; final int nodeLength = nodeCursorsAndLengths[currentLengthIndex]; if (nodeCursor < nodeLength) { final AbstractSetNode<K> nextNode = nodes[currentStackLevel].getNode(nodeCursor); nodeCursorsAndLengths[currentCursorIndex]++; if (nextNode.hasNodes()) { /* * put node on next stack level for depth-first traversal */ final int nextStackLevel = ++currentStackLevel; final int nextCursorIndex = nextStackLevel * 2; final int nextLengthIndex = nextCursorIndex + 1; nodes[nextStackLevel] = nextNode; nodeCursorsAndLengths[nextCursorIndex] = 0; nodeCursorsAndLengths[nextLengthIndex] = nextNode.nodeArity(); } if (nextNode.hasPayload()) { /* * found next node that contains values */ currentValueNode = nextNode; currentValueCursor = 0; currentValueLength = nextNode.payloadArity(); return true; } } else { currentStackLevel--; } } return false; } public boolean hasNext() { if (currentValueCursor < currentValueLength) { return true; } else { return searchNextValueNode(); } } public void remove() { throw new UnsupportedOperationException(); } } protected static class SetKeyIterator<K> extends AbstractSetIterator<K> implements Iterator<K> { SetKeyIterator(AbstractSetNode<K> rootNode) { super(rootNode); } @Override public K next() { if (!hasNext()) { throw new NoSuchElementException(); } else { return currentValueNode.getKey(currentValueCursor++); } } } /** * Iterator that first iterates over inlined-values and then continues depth first recursively. */ private static class TrieSetNodeIterator<K> implements Iterator<AbstractSetNode<K>> { final Deque<Iterator<? extends AbstractSetNode<K>>> nodeIteratorStack; TrieSetNodeIterator(AbstractSetNode<K> rootNode) { nodeIteratorStack = new ArrayDeque<>(); nodeIteratorStack.push(Collections.singleton(rootNode).iterator()); } @Override public boolean hasNext() { while (true) { if (nodeIteratorStack.isEmpty()) { return false; } else { if (nodeIteratorStack.peek().hasNext()) { return true; } else { nodeIteratorStack.pop(); continue; } } } } @Override public AbstractSetNode<K> next() { if (!hasNext()) { throw new NoSuchElementException(); } AbstractSetNode<K> innerNode = nodeIteratorStack.peek().next(); if (innerNode.hasNodes()) { nodeIteratorStack.push(innerNode.nodeIterator()); } return innerNode; } @Override public void remove() { throw new UnsupportedOperationException(); } } static abstract class AbstractTransientTrieSet<K> implements Set.Transient<K> { protected AbstractSetNode<K> rootNode; protected int cachedHashCode; protected int cachedSize; AbstractTransientTrieSet(PersistentTrieSet<K> trieSet) { this.rootNode = trieSet.rootNode; this.cachedHashCode = trieSet.cachedHashCode; this.cachedSize = trieSet.cachedSize; if (DEBUG) { assert checkHashCodeAndSize(cachedHashCode, cachedSize); } } private boolean checkHashCodeAndSize(final int targetHash, final int targetSize) { int hash = 0; int size = 0; for (Iterator<K> it = keyIterator(); it.hasNext(); ) { final K key = it.next(); hash += key.hashCode(); size += 1; } return hash == targetHash && size == targetSize; } @Override public boolean add(final K key) { throw new UnsupportedOperationException(); } @Override public boolean addAll(final Collection<? extends K> c) { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public boolean remove(final Object key) { throw new UnsupportedOperationException(); } @Override public boolean removeAll(final Collection<?> c) { throw new UnsupportedOperationException(); } @Override public boolean retainAll(final Collection<?> c) { throw new UnsupportedOperationException(); } @Override public boolean contains(final Object o) { return containsEquivalent(o, Object::equals); } @Override public boolean containsEquivalent(final Object o, final EqualityComparator<Object> cmp) { try { final K key = (K) o; return rootNode.contains(key, transformHashCode(key.hashCode()), 0, cmp); } catch (ClassCastException unused) { return false; } } @Override public K get(final Object o) { return getEquivalent(o, Object::equals); } @Override public K getEquivalent(final Object o, final EqualityComparator<Object> cmp) { try { final K key = (K) o; final Optional<K> result = rootNode.findByKey(key, transformHashCode(key.hashCode()), 0, cmp); if (result.isPresent()) { return result.get(); } else { return null; } } catch (ClassCastException unused) { return null; } } protected boolean __insertWithCapability(AtomicReference<Thread> mutator, K key) { return __insertEquivalentWithCapability(mutator, key, Object::equals); } protected boolean __insertEquivalentWithCapability(AtomicReference<Thread> mutator, final K key, final EqualityComparator<Object> cmp) { if (mutator.get() == null) { throw new IllegalStateException("Transient already frozen."); } final int keyHash = key.hashCode(); final SetNodeResult<K> details = SetNodeResult.unchanged(); final AbstractSetNode<K> newRootNode = rootNode.updated(mutator, key, transformHashCode(keyHash), 0, details, cmp); if (details.isModified()) { rootNode = newRootNode; cachedHashCode += keyHash; cachedSize += 1; if (DEBUG) { assert checkHashCodeAndSize(cachedHashCode, cachedSize); } return true; } if (DEBUG) { assert checkHashCodeAndSize(cachedHashCode, cachedSize); } return false; } @Override public boolean __insertAll(final java.util.Set<? extends K> set) { return __insertAllEquivalent(set, Object::equals); } @Override public boolean __insertAllEquivalent(final java.util.Set<? extends K> set, final EqualityComparator<Object> cmp) { boolean modified = false; for (final K key : set) { modified |= this.__insertEquivalent(key, cmp); } return modified; } protected boolean __removeWithCapability(AtomicReference<Thread> mutator, final K key) { return __removeEquivalentWithCapability(mutator, key, Object::equals); } protected boolean __removeEquivalentWithCapability(AtomicReference<Thread> mutator, final K key, final EqualityComparator<Object> cmp) { if (mutator.get() == null) { throw new IllegalStateException("Transient already frozen."); } final int keyHash = key.hashCode(); final SetNodeResult<K> details = SetNodeResult.unchanged(); final AbstractSetNode<K> newRootNode = rootNode.removed(mutator, key, transformHashCode(keyHash), 0, details, cmp); if (details.isModified()) { rootNode = newRootNode; cachedHashCode = cachedHashCode - keyHash; cachedSize = cachedSize - 1; if (DEBUG) { assert checkHashCodeAndSize(cachedHashCode, cachedSize); } return true; } if (DEBUG) { assert checkHashCodeAndSize(cachedHashCode, cachedSize); } return false; } @Override public boolean __removeAll(final java.util.Set<? extends K> set) { return __removeAllEquivalent(set, Object::equals); } @Override public boolean __removeAllEquivalent(final java.util.Set<? extends K> set, final EqualityComparator<Object> cmp) { boolean modified = false; for (final K key : set) { modified |= this.__removeEquivalent(key, cmp); } return modified; } @Override public boolean __retainAll(final java.util.Set<? extends K> set) { boolean modified = false; Iterator<K> thisIterator = iterator(); while (thisIterator.hasNext()) { if (!set.contains(thisIterator.next())) { thisIterator.remove(); modified = true; } } return modified; } @Override public boolean __retainAllEquivalent(final Set.Transient<? extends K> transientSet, final EqualityComparator<Object> cmp) { boolean modified = false; Iterator<K> thisIterator = iterator(); while (thisIterator.hasNext()) { if (!transientSet.containsEquivalent(thisIterator.next(), cmp)) { thisIterator.remove(); modified = true; } } return modified; } @Override public boolean containsAll(Collection<?> c) { return containsAllEquivalent(c, Object::equals); } @Override public boolean containsAllEquivalent(Collection<?> c, EqualityComparator<Object> cmp) { for (Object item : c) { if (!containsEquivalent(item, cmp)) { return false; } } return true; } @Override public int size() { return cachedSize; } @Override public boolean isEmpty() { return cachedSize == 0; } @Override public Iterator<K> iterator() { return keyIterator(); } @Override public Iterator<K> keyIterator() { return new TransientSetKeyIterator<>(this); } public static class TransientSetKeyIterator<K> extends SetKeyIterator<K> { final AbstractTransientTrieSet<K> collection; K lastKey; public TransientSetKeyIterator(final AbstractTransientTrieSet<K> collection) { super(collection.rootNode); this.collection = collection; } @Override public K next() { return lastKey = super.next(); } @Override public void remove() { // TODO: test removal at iteration rigorously collection.__remove(lastKey); } } @Override public Object[] toArray() { Object[] array = new Object[cachedSize]; int idx = 0; for (K key : this) { array[idx++] = key; } return array; } @Override public <T> T[] toArray(final T[] a) { List<K> list = new ArrayList<K>(cachedSize); for (K key : this) { list.add(key); } return list.toArray(a); } @Override public boolean equals(final Object other) { return equivalent(other, Object::equals); } @Override public boolean equivalent(final Object other, final EqualityComparator<Object> cmp) { if (other == this) { return true; } if (other == null) { return false; } if (other instanceof PersistentTrieSet.AbstractTransientTrieSet) { AbstractTransientTrieSet<?> that = (AbstractTransientTrieSet<?>) other; if (this.cachedSize != that.cachedSize) { return false; } if (this.cachedHashCode != that.cachedHashCode) { return false; } return rootNode.equivalent(that.rootNode, cmp); } else if (other instanceof java.util.Set) { java.util.Set that = (java.util.Set) other; if (this.size() != that.size()) { return false; } return containsAllEquivalent(that, cmp); } return false; } @Override public int hashCode() { return cachedHashCode; } } static final class TransientTrieSet<K> extends AbstractTransientTrieSet<K> { final private AtomicReference<Thread> mutator; TransientTrieSet(PersistentTrieSet<K> trieSet) { super(trieSet); this.mutator = new AtomicReference<Thread>(Thread.currentThread()); } @Override public boolean __insert(final K key) { return __insertWithCapability(this.mutator, key); } @Override public boolean __insertEquivalent(final K key, final EqualityComparator<Object> cmp) { return __insertEquivalentWithCapability(this.mutator, key, cmp); } @Override public boolean __remove(final K key) { return __removeWithCapability(this.mutator, key); } @Override public boolean __removeEquivalent(final K key, final EqualityComparator<Object> cmp) { return __removeEquivalentWithCapability(this.mutator, key, cmp); } @Override public Set.Immutable<K> freeze() { if (mutator.get() == null) { throw new IllegalStateException("Transient already frozen."); } mutator.set(null); return new PersistentTrieSet<K>(rootNode, cachedHashCode, cachedSize); } } }
24,631
406
<reponame>rhowe/igv<filename>src/main/java/org/broad/igv/ui/panel/TrackPanelComponent.java /* * The MIT License (MIT) * * Copyright (c) 2007-2015 Broad Institute * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* * To change this template, choose Tools | Templates * and open the template in the editor. */ package org.broad.igv.ui.panel; //~--- non-JDK imports -------------------------------------------------------- import org.apache.log4j.Logger; import org.broad.igv.Globals; import org.broad.igv.track.Track; import org.broad.igv.track.TrackClickEvent; import org.broad.igv.track.TrackMenuUtils; import org.broad.igv.ui.IGV; import javax.swing.*; import java.awt.*; import java.awt.event.ActionEvent; import java.awt.event.KeyEvent; import java.awt.event.MouseEvent; import java.util.ArrayList; import java.util.Collection; import java.util.List; /** * @author eflakes */ abstract public class TrackPanelComponent extends JPanel { private static Logger log = Logger.getLogger(TrackPanelComponent.class); List<MouseableRegion> mouseRegions; private TrackPanel trackPanel; /** * A scheduler is used to distinguish a click from a double click. */ protected ClickTaskScheduler clickScheduler = new ClickTaskScheduler(); public TrackPanelComponent(TrackPanel trackPanel) { this.trackPanel = trackPanel; setFocusable(true); mouseRegions = new ArrayList(); initKeyDispatcher(); } private void initKeyDispatcher() { final Action delTracksAction = new AbstractAction() { @Override public void actionPerformed(ActionEvent e) { TrackMenuUtils.removeTracksAction(IGV.getInstance().getSelectedTracks()); } }; if (Globals.isDevelopment()) { final KeyStroke delKey = KeyStroke.getKeyStroke(KeyEvent.VK_DELETE, 0, false); final KeyStroke backspaceKey = KeyStroke.getKeyStroke(KeyEvent.VK_BACK_SPACE, 0, false); getInputMap().put(delKey, "deleteTracks"); getInputMap().put(backspaceKey, "deleteTracks"); getActionMap().put("deleteTracks", delTracksAction); } } public TrackPanel getTrackPanel() { if (trackPanel == null) { trackPanel = (TrackPanel) getParent(); } return trackPanel; } public String getTrackSetID() { return getTrackPanel().getName(); } protected void addMousableRegion(MouseableRegion region) { mouseRegions.add(region); } protected void removeMousableRegions() { mouseRegions.clear(); } protected List<MouseableRegion> getMouseRegions() { return mouseRegions; } public boolean scrollTo(String trackName) { Track t = findNextTrackMatching(trackName); if (t != null) { IGV.getInstance().clearSelections(); t.setSelected(true); if (trackPanel.getScrollPane().getVerticalScrollBar().isShowing()) { trackPanel.getScrollPane().getVerticalScrollBar().setValue(t.getY()); } return true; } return false; } int searchIdx = 0; private synchronized Track findNextTrackMatching(String trackName) { List<Track> tracks = getAllTracks(); searchIdx = Math.min(searchIdx, tracks.size()); for (int i = searchIdx; i < tracks.size(); i++) { Track t = tracks.get(i); if (t.getName().toUpperCase().contains(trackName.toUpperCase())) { searchIdx = i + 1; return t; } } for (int i = 0; i < searchIdx; i++) { Track t = tracks.get(i); if (t.getName().toUpperCase().contains(trackName.toUpperCase())) { searchIdx = i + 1; return t; } } return null; } public String getPopupMenuTitle(int x, int y) { Collection<Track> tracks = getSelectedTracks(); String popupTitle; if (tracks.size() == 1) { popupTitle = tracks.iterator().next().getName(); } else { popupTitle = "Total Tracks Selected: " + tracks.size(); } return popupTitle; } protected Collection<Track> getSelectedTracks() { return IGV.getInstance().getSelectedTracks(); } public List<Track> getAllTracks() { TrackPanel dataTrackView = (TrackPanel) getParent(); return dataTrackView.getTracks(); } protected void openPopupMenu(TrackClickEvent te) { openPopupMenu(te, null); } protected void openPopupMenu(TrackClickEvent te, List<Component> extraItems) { MouseEvent e = te.getMouseEvent(); final Collection<Track> selectedTracks = getSelectedTracks(); if (selectedTracks.size() == 0) { return; } IGVPopupMenu menu = null; // If a single track is selected, give it an opportunity to provide the popup menu if (selectedTracks.size() == 1) { Track track = selectedTracks.iterator().next(); menu = track.getPopupMenu(te); } // If still no menu, create a generic one with common items if (menu == null) { String title = getPopupMenuTitle(e.getX(), e.getY()); menu = TrackMenuUtils.getPopupMenu(selectedTracks, title, te); } // Add additional items, if any if (extraItems != null) { menu.addSeparator(); for (Component item : extraItems) { menu.add(item); } } if (menu.includeStandardItems()) { TrackMenuUtils.addPluginItems(menu, selectedTracks, te); // Add saveImage items menu.addSeparator(); JMenuItem savePng = new JMenuItem("Save PNG image..."); savePng.addActionListener(e1 -> saveImage("png")); menu.add(savePng); JMenuItem saveSvg = new JMenuItem("Save SVG image..."); saveSvg.addActionListener(e1 -> saveImage("svg")); menu.add(saveSvg); // Add export features ReferenceFrame frame = FrameManager.getDefaultFrame(); JMenuItem exportFeats = TrackMenuUtils.getExportFeatures(selectedTracks, frame); if (exportFeats != null) menu.add(exportFeats); JMenuItem exportNames = new JMenuItem("Export track names..."); exportNames.addActionListener(e12 -> TrackMenuUtils.exportTrackNames(selectedTracks)); menu.add(exportNames); menu.addSeparator(); menu.add(TrackMenuUtils.getRemoveMenuItem(selectedTracks)); } menu.show(e.getComponent(), e.getX(), e.getY()); } protected void toggleTrackSelections(MouseEvent e) { for (MouseableRegion mouseRegion : mouseRegions) { if (mouseRegion.containsPoint(e.getX(), e.getY())) { IGV.getInstance().toggleTrackSelections(mouseRegion.getTracks()); return; } } } protected void clearTrackSelections() { IGV.getInstance().clearSelections(); IGV.getMainFrame().repaint(); } protected void selectTracks(MouseEvent e) { for (MouseableRegion mouseRegion : mouseRegions) { if (mouseRegion.containsPoint(e.getX(), e.getY())) { IGV.getInstance().setTrackSelections(mouseRegion.getTracks()); return; } } } protected boolean isTrackSelected(MouseEvent e) { for (MouseableRegion mouseRegion : mouseRegions) { if (mouseRegion.containsPoint(e.getX(), e.getY())) { for (Track t : mouseRegion.getTracks()) { if (t.isSelected()) { return true; } } } } return false; } public void saveImage(String extension) { IGV.getInstance().saveImage(getTrackPanel().getScrollPane(), "igv_panel", extension); } }
3,798
679
<filename>main/xmloff/source/core/xmltkmap.cxx /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ // MARKER(update_precomp.py): autogen include statement, do not remove #include "precompiled_xmloff.hxx" #include <tools/debug.hxx> #include <rtl/ustring.hxx> #include <svl/svarray.hxx> #include <xmloff/xmltkmap.hxx> #include <xmloff/xmltoken.hxx> using namespace rtl; using namespace ::xmloff::token; class SvXMLTokenMapEntry_Impl { sal_uInt16 nPrefixKey; OUString sLocalName; sal_uInt16 nToken; public: sal_uInt16 GetToken() const { return nToken; } SvXMLTokenMapEntry_Impl( sal_uInt16 nPrefix, const OUString& rLName, sal_uInt16 nTok=XML_TOK_UNKNOWN ) : nPrefixKey( nPrefix ), sLocalName( rLName ), nToken( nTok ) {} SvXMLTokenMapEntry_Impl( const SvXMLTokenMapEntry& rEntry ) : nPrefixKey( rEntry.nPrefixKey ), sLocalName( GetXMLToken( rEntry.eLocalName ) ), nToken( rEntry.nToken ) {} sal_Bool operator==( const SvXMLTokenMapEntry_Impl& r ) const { return nPrefixKey == r.nPrefixKey && sLocalName == r.sLocalName; } sal_Bool operator<( const SvXMLTokenMapEntry_Impl& r ) const { return nPrefixKey < r.nPrefixKey || ( nPrefixKey == r.nPrefixKey && sLocalName < r.sLocalName); } }; typedef SvXMLTokenMapEntry_Impl *SvXMLTokenMapEntry_ImplPtr; SV_DECL_PTRARR_SORT_DEL( SvXMLTokenMap_Impl, SvXMLTokenMapEntry_ImplPtr, 5, 5 ) SV_IMPL_OP_PTRARR_SORT( SvXMLTokenMap_Impl, SvXMLTokenMapEntry_ImplPtr ) // --------------------------------------------------------------------- SvXMLTokenMapEntry_Impl *SvXMLTokenMap::_Find( sal_uInt16 nKeyPrefix, const OUString& rLName ) const { SvXMLTokenMapEntry_Impl *pRet = 0; SvXMLTokenMapEntry_Impl aTst( nKeyPrefix, rLName ); sal_uInt16 nPos; if( pImpl->Seek_Entry( &aTst, &nPos ) ) { pRet = (*pImpl)[nPos]; } return pRet; } SvXMLTokenMap::SvXMLTokenMap( const SvXMLTokenMapEntry *pMap ) : pImpl( new SvXMLTokenMap_Impl ) { while( pMap->eLocalName != XML_TOKEN_INVALID ) { pImpl->Insert( new SvXMLTokenMapEntry_Impl( *pMap ) ); pMap++; } } SvXMLTokenMap::~SvXMLTokenMap() { delete pImpl; } sal_uInt16 SvXMLTokenMap::Get( sal_uInt16 nKeyPrefix, const OUString& rLName ) const { SvXMLTokenMapEntry_Impl *pEntry = _Find( nKeyPrefix, rLName ); if( pEntry ) return pEntry->GetToken(); else return XML_TOK_UNKNOWN; }
1,228
739
package org.fxmisc.richtext.mouse; import javafx.geometry.Point2D; import javafx.geometry.Pos; import javafx.scene.control.ContextMenu; import javafx.scene.control.MenuItem; import javafx.scene.input.KeyCode; import javafx.scene.input.MouseButton; import javafx.stage.Stage; import org.fxmisc.richtext.InlineCssTextAreaAppTest; import org.junit.After; import org.junit.Test; import static junit.framework.TestCase.assertFalse; import static junit.framework.TestCase.assertTrue; public class ContextMenuTests extends InlineCssTextAreaAppTest { private ContextMenu menu; // offset needs to be 5+ to prevent test failures private double offset = 30; @Override public void start(Stage stage) throws Exception { super.start(stage); menu = new ContextMenu(new MenuItem("A menu item")); area.setContextMenu(menu); area.setContextMenuXOffset(offset); area.setContextMenuYOffset(offset); } @After public void cleanup() { interact(menu::hide); } @Test public void clicking_secondary_shows_context_menu() { // Linux passes; Mac fails; Windows untested // so for now, only run on Linux // TODO: See if tests pass on Windows run_only_on_linux(); // when rightClickOnFirstLine(); // then assertTrue(area.getContextMenu().isShowing()); } @Test public void pressing_secondary_shows_context_menu() { // Linux passes; Mac fails; Windows untested // so for now, only run on Linux // TODO: See if tests pass on Windows run_only_on_linux(); // when moveTo(firstLineOfArea()).press(MouseButton.SECONDARY); // then assertTrue(area.getContextMenu().isShowing()); } @Test public void pressing_primary_mouse_button_hides_context_menu() { // given menu is showing showContextMenuAt(); moveTo(firstLineOfArea()).press(MouseButton.PRIMARY); assertFalse(area.getContextMenu().isShowing()); } @Test public void pressing_middle_mouse_button_hides_context_menu() { // given menu is showing showContextMenuAt(); moveTo(firstLineOfArea()).press(MouseButton.MIDDLE); assertFalse(area.getContextMenu().isShowing()); } @Test public void requesting_context_nenu_via_keyboard_works_on_windows() { run_only_on_windows(); leftClickOnFirstLine(); push(KeyCode.CONTEXT_MENU); assertTrue(area.getContextMenu().isShowing()); } private void showContextMenuAt() { Point2D screenPoint = position(Pos.TOP_LEFT, offset, offset).query(); interact(() -> area.getContextMenu().show(area, screenPoint.getX(), screenPoint.getY())); } }
1,092
547
[ { "id": 0, "src": "images/proexpress-cover.jpg", "title": "Pro Express.js", "url": "http://amzn.to/1D6qiqk" }, { "id": 1, "src": "images/practicalnode-cover.jpeg", "title": "Practical Node.js", "url": "http://amzn.to/NuQ0fM" }, { "id": 2, "src": "images/expressapiref-cover.jpg", "title": "Express API Reference", "url": "http://amzn.to/1xcHanf" }, { "id": 3, "src": "images/reactquickly-cover.jpg", "title": "React Quickly", "url": "https://www.manning.com/books/react-quickly"}, { "id": 4, "src": "images/fullstack-cover.png", "title": "Full Stack JavaScript", "url": "http://www.apress.com/9781484217504"} ]
252
14,668
<filename>ui/base/ime/text_input_action.h // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_BASE_IME_TEXT_INPUT_ACTION_H_ #define UI_BASE_IME_TEXT_INPUT_ACTION_H_ namespace ui { // This mode corresponds to enterkeyhint // https://html.spec.whatwg.org/multipage/interaction.html#input-modalities:-the-enterkeyhint-attribute // // A Java counterpart will be generated for this enum. // GENERATED_JAVA_ENUM_PACKAGE: org.chromium.ui.base.ime enum class TextInputAction { kDefault, kEnter, kDone, kGo, kNext, kPrevious, kSearch, kSend, kMaxValue = kSend, }; } // namespace ui #endif // UI_BASE_IME_TEXT_INPUT_ACTION_H_
282
4,640
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """CMSIS-NN integration tests: extract_constants pass""" import numpy as np import pytest import tvm import tvm.testing from tvm import relay tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__) class CheckFunctionsForConstants(tvm.relay.ExprVisitor): """Provides methods to test number of constants present in a function""" def __init__(self): super().__init__() self.num_constants_ = 0 def visit_call(self, call): super().visit_call(call) for arg in call.args: if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0: self.num_constants_ += 1 def check_num_constants(self): assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls" def set_external_func_attr(func, compiler, ext_symbol): func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Compiler", compiler) func = func.with_attr("global_symbol", ext_symbol) return func def set_composite_func_attr(func, name): func = func.with_attr("Composite", name) return func @tvm.testing.requires_cmsisnn def test_external_function(): """Tests the pass ExternConstants when the function is a global function""" input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32") input0 = relay.var("input0", shape=(8, 8)) input1_const = relay.const(input1_data, "float32") binary_op = input0 + input1_const extern_func = relay.Function([input0], binary_op, relay.TensorType((8, 8), "float32")) global_var = relay.GlobalVar("external_function") extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint) arg = relay.var("arg", shape=(8, 8)) call_extern_func = relay.Call(global_var, [arg]) main_func = relay.Function([arg], call_extern_func, relay.TensorType((8, 8), "float32")) main_var = relay.GlobalVar("main") mod = tvm.IRModule() mod[global_var] = extern_func mod[main_var] = main_func mod = ExtractConstantsFromPartitionedFunction()(mod) constant_verifier = CheckFunctionsForConstants() constant_verifier.visit_function(mod[global_var]) constant_verifier.check_num_constants() relay.transform.InferType()(mod) @tvm.testing.requires_cmsisnn def test_nested_function(): """Tests the pass ExternConstants when a composite function is present within global function """ input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32") input0 = relay.var("input0", shape=(8, 8)) input1_const = relay.const(input1_data, "float32") binary_op0 = input0 + input1_const binary_op1 = binary_op0 * relay.const(5.0, "float32") local_func = relay.Function([input0], binary_op1, relay.TensorType((8, 8), "float32")) local_func = set_composite_func_attr(local_func, "cmsis-nn") arg = relay.var("arg", shape=(8, 8)) call_local_func = relay.Call(local_func, [arg]) extern_func = relay.Function([arg], call_local_func, relay.TensorType((8, 8), "float32")) global_arg = relay.var("garg", shape=(8, 8)) global_var = relay.GlobalVar("external_function") extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint) call_extern_func = relay.Call(global_var, [global_arg]) main_func = relay.Function([global_arg], call_extern_func, relay.TensorType((8, 8), "float32")) main_var = relay.GlobalVar("main") mod = tvm.IRModule() mod[global_var] = extern_func mod[main_var] = main_func mod = ExtractConstantsFromPartitionedFunction()(mod) constant_verifier = CheckFunctionsForConstants() constant_verifier.visit_function(mod[global_var]) constant_verifier.check_num_constants() relay.transform.InferType()(mod) @tvm.testing.requires_cmsisnn def test_internal_function_with_duplicate_arguments(): """Tests the pass ExternConstants when a composite function is present within global function with repeating arguments to one of the binary ops. """ input0 = relay.var("input0", shape=(8, 8)) binary_op0 = input0 + input0 binary_op1 = binary_op0 * relay.const(5.0, "float32") local_func = relay.Function([input0], binary_op1, relay.TensorType((8, 8), "float32")) local_func = set_composite_func_attr(local_func, "cmsis-nn") arg = relay.var("arg", shape=(8, 8)) call_local_func = relay.Call(local_func, [arg]) extern_func = relay.Function([arg], call_local_func, relay.TensorType((8, 8), "float32")) global_arg = relay.var("global_var", shape=(8, 8)) global_var = relay.GlobalVar("external_function") extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint) call_extern_func = relay.Call(global_var, [global_arg]) main_func = relay.Function([global_arg], call_extern_func, relay.TensorType((8, 8), "float32")) main_var = relay.GlobalVar("main") mod = tvm.IRModule() mod[global_var] = extern_func mod[main_var] = main_func mod = ExtractConstantsFromPartitionedFunction()(mod) constant_verifier = CheckFunctionsForConstants() constant_verifier.visit_function(mod[global_var]) constant_verifier.check_num_constants() relay.transform.InferType()(mod) @tvm.testing.requires_cmsisnn def test_multiple_functions(): """Tests the pass ExternConstants when global function contains multiple composite functions inside it """ f0_input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32") f0_input0 = relay.var("f0_in0", shape=(8, 8)) f0_input1_const = relay.const(f0_input1_data, "float32") f0_binary_op = f0_input0 + f0_input1_const f0_func = relay.Function([f0_input0], f0_binary_op, relay.TensorType((8, 8), "float32")) f0_func = set_composite_func_attr(f0_func, "cmsis-nn") f1_input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32") f1_input0 = relay.var("f1_in0", shape=(8, 8)) f1_input1_const = relay.const(f1_input1_data, "float32") f1_binary_op = f1_input0 + f1_input1_const f1_func = relay.Function([f1_input0], f1_binary_op, relay.TensorType((8, 8), "float32")) f1_func = set_composite_func_attr(f1_func, "cmsis-nn") arg0 = relay.var("arg0", shape=(8, 8)) call_local_func0 = relay.Call(f0_func, [arg0]) call_local_func1 = relay.Call(f1_func, [call_local_func0]) extern_func = relay.Function([arg0], call_local_func1, relay.TensorType((8, 8), "float32")) input0 = relay.var("input0", shape=(8, 8)) global_var = relay.GlobalVar("cmsis-nn") extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint) call_extern_func = relay.Call(global_var, [input0]) main_func = relay.Function([input0], call_extern_func, relay.TensorType((8, 8), "float32")) main_var = relay.GlobalVar("main") mod = tvm.IRModule() mod[global_var] = extern_func mod[main_var] = main_func mod = ExtractConstantsFromPartitionedFunction()(mod) constant_verifier = CheckFunctionsForConstants() constant_verifier.visit_function(mod[global_var]) constant_verifier.check_num_constants() relay.transform.InferType()(mod) @tvm.testing.requires_cmsisnn def test_main_function(): """Tests the pass ExternConstants on main function""" input0 = relay.var("input0", shape=(8, 8)) input1 = relay.var("input1", shape=(8, 8)) binary_op = input0 + input1 extern_func = relay.Function([input0, input1], binary_op, relay.TensorType((8, 8), "float32")) global_var = relay.GlobalVar("external_function") extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint) arg = relay.var("arg", shape=(8, 8)) input_data = np.random.uniform(0, 1, (8, 8)).astype("float32") input_const = relay.const(input_data, "float32") binary_op = arg + input_const call_extern_func = relay.Call(global_var, [arg, binary_op]) main_func = relay.Function([arg], call_extern_func, relay.TensorType((8, 8), "float32")) main_var = relay.GlobalVar("main") mod = tvm.IRModule() mod[global_var] = extern_func mod[main_var] = main_func mod = ExtractConstantsFromPartitionedFunction()(mod) check_for_constants = CheckFunctionsForConstants() check_for_constants.visit_call(mod[main_var].body) assert ( check_for_constants.num_constants_ == 1 ), "main() should have same number of arguments as before" @tvm.testing.requires_cmsisnn @pytest.mark.parametrize("external_compiler", ["cmsis-nn", "other_compiler"]) def test_multiple_functions_non_cmsisnn_compiler(external_compiler): """Tests the pass ExternConstants on non CMSIS-NN targets""" y20_data = np.random.uniform(0, 1, (8, 8)).astype("float32") x20 = relay.var("x20", shape=(8, 8)) y20_const = relay.const(y20_data, "float32") z20 = x20 + y20_const f20 = relay.Function([x20], z20, relay.TensorType((8, 8), "float32")) f20 = set_composite_func_attr(f20, "cmsis-nn.qnn_op_1") x10 = relay.var("x10", shape=(8, 8)) call_local_func0 = relay.Call(f20, [x10]) extern_func0 = relay.Function([x10], call_local_func0, relay.TensorType((8, 8), "float32")) y21_data = np.random.uniform(0, 1, (8, 8)).astype("float32") x21 = relay.var("x21", shape=(8, 8)) y21_const = relay.const(y21_data, "float32") z21 = x21 + y21_const f21 = relay.Function([x21], z21, relay.TensorType((8, 8), "float32")) f21 = set_composite_func_attr(f21, "cmsis-nn.qnn_op_2") x11 = relay.var("x11", shape=(8, 8)) call_local_func1 = relay.Call(f21, [x11]) extern_func1 = relay.Function([x11], call_local_func1, relay.TensorType((8, 8), "float32")) input0 = relay.var("input0", shape=(8, 8)) global_var0 = relay.GlobalVar("external_function_0") extern_func0 = set_external_func_attr(extern_func0, external_compiler, global_var0.name_hint) call_extern_func0 = relay.Call(global_var0, [input0]) global_var1 = relay.GlobalVar("external_function_1") extern_func1 = set_external_func_attr(extern_func1, external_compiler, global_var1.name_hint) call_extern_func1 = relay.Call(global_var1, [call_extern_func0]) main_func = relay.Function([input0], call_extern_func1, relay.TensorType((8, 8), "float32")) main_var = relay.GlobalVar("main") mod = tvm.IRModule() mod[global_var0] = extern_func0 mod[global_var1] = extern_func1 mod[main_var] = main_func mod = ExtractConstantsFromPartitionedFunction()(mod) check_for_constants = CheckFunctionsForConstants() check_for_constants.visit_call(mod[main_var].body) num_extracted_constants = 0 if external_compiler == "cmsis-nn": num_extracted_constants = 2 assert ( check_for_constants.num_constants_ == num_extracted_constants ), "main() should have same number of arguments as before" if __name__ == "__main__": tvm.testing.main()
4,512
348
<filename>docs/data/leg-t2/036/03602229.json<gh_stars>100-1000 {"nom":"Val-Fouzon","circ":"2ème circonscription","dpt":"Indre","inscrits":820,"abs":451,"votants":369,"blancs":28,"nuls":22,"exp":319,"res":[{"nuance":"LR","nom":"<NAME>","voix":201},{"nuance":"MDM","nom":"<NAME>","voix":118}]}
120
1,176
<reponame>zuzhi/rssant # Generated by Django 2.2.6 on 2019-10-27 05:17 from django.db import migrations, models import ool class Migration(migrations.Migration): dependencies = [ ('rssant_api', '0012_auto_20191025_1526'), ] operations = [ migrations.CreateModel( name='ImageInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('_version', ool.VersionField(default=0)), ('_created', models.DateTimeField(auto_now_add=True, help_text='创建时间')), ('_updated', models.DateTimeField(auto_now=True, help_text='更新时间')), ('url_root', models.CharField(help_text='eg: https://image.example.com/root-path', max_length=120)), ('sample_url', models.CharField(blank=True, help_text='sample image url', max_length=240, null=True)), ('user_agent', models.CharField(blank=True, help_text='the user-agent used to request sample image', max_length=240, null=True)), ('referer', models.CharField(blank=True, help_text='the referer used to request sample image', max_length=240, null=True)), ('status_code', models.IntegerField(blank=True, help_text='the response status code when request sample image', null=True)), ('dt_created', models.DateTimeField(auto_now_add=True, help_text='created datatime')), ], bases=(ool.VersionedMixin, models.Model), ), migrations.AddIndex( model_name='imageinfo', index=models.Index(fields=['url_root', 'dt_created'], name='rssant_api__url_roo_0d7c51_idx'), ), ]
767
5,169
{ "name": "fpingx", "version": "0.1.2", "summary": "A swift wrapper of fping which is a high performance ping tool", "description": "A swift wrapper of fping which is a high performance ping tool", "homepage": "https://github.com/jackymelb/fpingx", "license": { "type": "BSD", "file": "LICENSE" }, "authors": { "jackymelb": "<EMAIL>" }, "source": { "git": "https://github.com/jackymelb/fpingx.git", "tag": "0.1.2" }, "social_media_url": "https://twitter.com/jackymelb", "platforms": { "ios": "8.0", "osx": "10.10" }, "source_files": [ "fpingx/fpingx.swift", "fpingx/fpingx.h", "fping/*" ], "public_header_files": [ "fpingx/fpingx.h", "fping/*.h" ], "frameworks": "Foundation" }
346
337
/** * iptables API * iptables API generated from iptables.yang * * OpenAPI spec version: 1.0.0 * * NOTE: This class is auto generated by the swagger code generator program. * https://github.com/polycube-network/swagger-codegen.git * branch polycube */ /* Do not edit this file manually */ /* * ChainStatsJsonObject.h * * */ #pragma once #include "JsonObjectBase.h" namespace io { namespace swagger { namespace server { namespace model { /// <summary> /// /// </summary> class ChainStatsJsonObject : public JsonObjectBase { public: ChainStatsJsonObject(); ChainStatsJsonObject(const nlohmann::json &json); ~ChainStatsJsonObject() final = default; nlohmann::json toJson() const final; /// <summary> /// Rule Identifier /// </summary> uint32_t getId() const; void setId(uint32_t value); bool idIsSet() const; /// <summary> /// Number of packets matching the rule /// </summary> uint64_t getPkts() const; void setPkts(uint64_t value); bool pktsIsSet() const; void unsetPkts(); /// <summary> /// Number of bytes matching the rule /// </summary> uint64_t getBytes() const; void setBytes(uint64_t value); bool bytesIsSet() const; void unsetBytes(); /// <summary> /// Description /// </summary> std::string getDescription() const; void setDescription(std::string value); bool descriptionIsSet() const; void unsetDescription(); private: uint32_t m_id; bool m_idIsSet; uint64_t m_pkts; bool m_pktsIsSet; uint64_t m_bytes; bool m_bytesIsSet; std::string m_description; bool m_descriptionIsSet; }; } } } }
561
700
<reponame>OakCityLabs/ios_system /* $OpenBSD: mux.c,v 1.86 2020/10/29 02:52:43 djm Exp $ */ /* * Copyright (c) 2002-2008 <NAME> <<EMAIL>> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* ssh session multiplexing support */ #include "includes.h" #include <sys/types.h> #include <sys/stat.h> #include <sys/socket.h> #include <sys/un.h> #include <errno.h> #include <fcntl.h> #include <signal.h> #include <stdarg.h> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #ifdef HAVE_PATHS_H #include <paths.h> #endif #ifdef HAVE_POLL_H #include <poll.h> #else # ifdef HAVE_SYS_POLL_H # include <sys/poll.h> # endif #endif #ifdef HAVE_UTIL_H # include <util.h> #endif #include "openbsd-compat/sys-queue.h" #include "xmalloc.h" #include "log.h" #include "ssh.h" #include "ssh2.h" #include "pathnames.h" #include "misc.h" #include "match.h" #include "sshbuf.h" #include "channels.h" #include "msg.h" #include "packet.h" #include "monitor_fdpass.h" #include "sshpty.h" #include "sshkey.h" #include "readconf.h" #include "clientloop.h" #include "ssherr.h" /* from ssh.c */ extern __thread int tty_flag; extern Options options; extern __thread int stdin_null_flag; extern __thread char *host; extern __thread int subsystem_flag; extern __thread struct sshbuf *command; extern __thread volatile sig_atomic_t quit_pending; /* Context for session open confirmation callback */ struct mux_session_confirm_ctx { u_int want_tty; u_int want_subsys; u_int want_x_fwd; u_int want_agent_fwd; struct sshbuf *cmd; char *term; struct termios tio; char **env; u_int rid; }; /* Context for stdio fwd open confirmation callback */ struct mux_stdio_confirm_ctx { u_int rid; }; /* Context for global channel callback */ struct mux_channel_confirm_ctx { u_int cid; /* channel id */ u_int rid; /* request id */ int fid; /* forward id */ }; /* fd to control socket */ int muxserver_sock = -1; /* client request id */ u_int muxclient_request_id = 0; /* Multiplexing control command */ u_int muxclient_command = 0; /* Set when signalled. */ static volatile sig_atomic_t muxclient_terminate = 0; /* PID of multiplex server */ static u_int muxserver_pid = 0; static Channel *mux_listener_channel = NULL; struct mux_master_state { int hello_rcvd; }; /* mux protocol messages */ #define MUX_MSG_HELLO 0x00000001 #define MUX_C_NEW_SESSION 0x10000002 #define MUX_C_ALIVE_CHECK 0x10000004 #define MUX_C_TERMINATE 0x10000005 #define MUX_C_OPEN_FWD 0x10000006 #define MUX_C_CLOSE_FWD 0x10000007 #define MUX_C_NEW_STDIO_FWD 0x10000008 #define MUX_C_STOP_LISTENING 0x10000009 #define MUX_C_PROXY 0x1000000f #define MUX_S_OK 0x80000001 #define MUX_S_PERMISSION_DENIED 0x80000002 #define MUX_S_FAILURE 0x80000003 #define MUX_S_EXIT_MESSAGE 0x80000004 #define MUX_S_ALIVE 0x80000005 #define MUX_S_SESSION_OPENED 0x80000006 #define MUX_S_REMOTE_PORT 0x80000007 #define MUX_S_TTY_ALLOC_FAIL 0x80000008 #define MUX_S_PROXY 0x8000000f /* type codes for MUX_C_OPEN_FWD and MUX_C_CLOSE_FWD */ #define MUX_FWD_LOCAL 1 #define MUX_FWD_REMOTE 2 #define MUX_FWD_DYNAMIC 3 static void mux_session_confirm(struct ssh *, int, int, void *); static void mux_stdio_confirm(struct ssh *, int, int, void *); static int mux_master_process_hello(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_new_session(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_alive_check(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_terminate(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_open_fwd(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_close_fwd(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_stdio_fwd(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_stop_listening(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static int mux_master_process_proxy(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); static const struct { u_int type; int (*handler)(struct ssh *, u_int, Channel *, struct sshbuf *, struct sshbuf *); } mux_master_handlers[] = { { MUX_MSG_HELLO, mux_master_process_hello }, { MUX_C_NEW_SESSION, mux_master_process_new_session }, { MUX_C_ALIVE_CHECK, mux_master_process_alive_check }, { MUX_C_TERMINATE, mux_master_process_terminate }, { MUX_C_OPEN_FWD, mux_master_process_open_fwd }, { MUX_C_CLOSE_FWD, mux_master_process_close_fwd }, { MUX_C_NEW_STDIO_FWD, mux_master_process_stdio_fwd }, { MUX_C_STOP_LISTENING, mux_master_process_stop_listening }, { MUX_C_PROXY, mux_master_process_proxy }, { 0, NULL } }; /* Cleanup callback fired on closure of mux client _session_ channel */ /* ARGSUSED */ static void mux_master_session_cleanup_cb(struct ssh *ssh, int cid, void *unused) { Channel *cc, *c = channel_by_id(ssh, cid); debug3_f("entering for channel %d", cid); if (c == NULL) fatal_f("channel_by_id(%i) == NULL", cid); if (c->ctl_chan != -1) { if ((cc = channel_by_id(ssh, c->ctl_chan)) == NULL) fatal_f("channel %d missing control channel %d", c->self, c->ctl_chan); c->ctl_chan = -1; cc->remote_id = 0; cc->have_remote_id = 0; chan_rcvd_oclose(ssh, cc); } channel_cancel_cleanup(ssh, c->self); } /* Cleanup callback fired on closure of mux client _control_ channel */ /* ARGSUSED */ static void mux_master_control_cleanup_cb(struct ssh *ssh, int cid, void *unused) { Channel *sc, *c = channel_by_id(ssh, cid); debug3_f("entering for channel %d", cid); if (c == NULL) fatal_f("channel_by_id(%i) == NULL", cid); if (c->have_remote_id) { if ((sc = channel_by_id(ssh, c->remote_id)) == NULL) fatal_f("channel %d missing session channel %u", c->self, c->remote_id); c->remote_id = 0; c->have_remote_id = 0; sc->ctl_chan = -1; if (sc->type != SSH_CHANNEL_OPEN && sc->type != SSH_CHANNEL_OPENING) { debug2_f("channel %d: not open", sc->self); chan_mark_dead(ssh, sc); } else { if (sc->istate == CHAN_INPUT_OPEN) chan_read_failed(ssh, sc); if (sc->ostate == CHAN_OUTPUT_OPEN) chan_write_failed(ssh, sc); } } channel_cancel_cleanup(ssh, c->self); } /* Check mux client environment variables before passing them to mux master. */ static int env_permitted(char *env) { int i, ret; char name[1024], *cp; if ((cp = strchr(env, '=')) == NULL || cp == env) return 0; ret = snprintf(name, sizeof(name), "%.*s", (int)(cp - env), env); if (ret <= 0 || (size_t)ret >= sizeof(name)) { error_f("name '%.100s...' too long", env); return 0; } for (i = 0; i < options.num_send_env; i++) if (match_pattern(name, options.send_env[i])) return 1; return 0; } /* Mux master protocol message handlers */ static int mux_master_process_hello(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { u_int ver; struct mux_master_state *state = (struct mux_master_state *)c->mux_ctx; int r; if (state == NULL) fatal_f("channel %d: c->mux_ctx == NULL", c->self); if (state->hello_rcvd) { error_f("HELLO received twice"); return -1; } if ((r = sshbuf_get_u32(m, &ver)) != 0) { error_fr(r, "parse"); return -1; } if (ver != SSHMUX_VER) { error_f("unsupported multiplexing protocol version %u " "(expected %u)", ver, SSHMUX_VER); return -1; } debug2_f("channel %d client version %u", c->self, ver); /* No extensions are presently defined */ while (sshbuf_len(m) > 0) { char *name = NULL; size_t value_len = 0; if ((r = sshbuf_get_cstring(m, &name, NULL)) != 0 || (r = sshbuf_get_string_direct(m, NULL, &value_len)) != 0) { error_fr(r, "parse extension"); return -1; } debug2_f("Unrecognised extension \"%s\" length %zu", name, value_len); free(name); } state->hello_rcvd = 1; return 0; } /* Enqueue a "ok" response to the reply buffer */ static void reply_ok(struct sshbuf *reply, u_int rid) { int r; if ((r = sshbuf_put_u32(reply, MUX_S_OK)) != 0 || (r = sshbuf_put_u32(reply, rid)) != 0) fatal_fr(r, "reply"); } /* Enqueue an error response to the reply buffer */ static void reply_error(struct sshbuf *reply, u_int type, u_int rid, const char *msg) { int r; if ((r = sshbuf_put_u32(reply, type)) != 0 || (r = sshbuf_put_u32(reply, rid)) != 0 || (r = sshbuf_put_cstring(reply, msg)) != 0) fatal_fr(r, "reply"); } static int mux_master_process_new_session(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { Channel *nc; struct mux_session_confirm_ctx *cctx; char *cmd, *cp; u_int i, j, env_len, escape_char, window, packetmax; int r, new_fd[3]; /* Reply for SSHMUX_COMMAND_OPEN */ cctx = xcalloc(1, sizeof(*cctx)); cctx->term = NULL; cctx->rid = rid; cmd = NULL; cctx->env = NULL; env_len = 0; if ((r = sshbuf_skip_string(m)) != 0 || /* reserved */ (r = sshbuf_get_u32(m, &cctx->want_tty)) != 0 || (r = sshbuf_get_u32(m, &cctx->want_x_fwd)) != 0 || (r = sshbuf_get_u32(m, &cctx->want_agent_fwd)) != 0 || (r = sshbuf_get_u32(m, &cctx->want_subsys)) != 0 || (r = sshbuf_get_u32(m, &escape_char)) != 0 || (r = sshbuf_get_cstring(m, &cctx->term, NULL)) != 0 || (r = sshbuf_get_cstring(m, &cmd, NULL)) != 0) { malf: free(cmd); for (j = 0; j < env_len; j++) free(cctx->env[j]); free(cctx->env); free(cctx->term); free(cctx); error_f("malformed message"); return -1; } #define MUX_MAX_ENV_VARS 4096 while (sshbuf_len(m) > 0) { if ((r = sshbuf_get_cstring(m, &cp, NULL)) != 0) goto malf; if (!env_permitted(cp)) { free(cp); continue; } cctx->env = xreallocarray(cctx->env, env_len + 2, sizeof(*cctx->env)); cctx->env[env_len++] = cp; cctx->env[env_len] = NULL; if (env_len > MUX_MAX_ENV_VARS) { error_f(">%d environment variables received, " "ignoring additional", MUX_MAX_ENV_VARS); break; } } debug2_f("channel %d: request tty %d, X %d, agent %d, subsys %d, " "term \"%s\", cmd \"%s\", env %u", c->self, cctx->want_tty, cctx->want_x_fwd, cctx->want_agent_fwd, cctx->want_subsys, cctx->term, cmd, env_len); if ((cctx->cmd = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put(cctx->cmd, cmd, strlen(cmd))) != 0) fatal_fr(r, "sshbuf_put"); free(cmd); cmd = NULL; /* Gather fds from client */ for(i = 0; i < 3; i++) { if ((new_fd[i] = mm_receive_fd(c->sock)) == -1) { error_f("failed to receive fd %d from client", i); for (j = 0; j < i; j++) close(new_fd[j]); for (j = 0; j < env_len; j++) free(cctx->env[j]); free(cctx->env); free(cctx->term); sshbuf_free(cctx->cmd); free(cctx); reply_error(reply, MUX_S_FAILURE, rid, "did not receive file descriptors"); return -1; } } debug3_f("got fds stdin %d, stdout %d, stderr %d", new_fd[0], new_fd[1], new_fd[2]); /* XXX support multiple child sessions in future */ if (c->have_remote_id) { debug2_f("session already open"); reply_error(reply, MUX_S_FAILURE, rid, "Multiple sessions not supported"); cleanup: close(new_fd[0]); close(new_fd[1]); close(new_fd[2]); free(cctx->term); if (env_len != 0) { for (i = 0; i < env_len; i++) free(cctx->env[i]); free(cctx->env); } sshbuf_free(cctx->cmd); free(cctx); return 0; } if (options.control_master == SSHCTL_MASTER_ASK || options.control_master == SSHCTL_MASTER_AUTO_ASK) { if (!ask_permission("Allow shared connection to %s? ", host)) { debug2_f("session refused by user"); reply_error(reply, MUX_S_PERMISSION_DENIED, rid, "Permission denied"); goto cleanup; } } /* Try to pick up ttymodes from client before it goes raw */ if (cctx->want_tty && tcgetattr(new_fd[0], &cctx->tio) == -1) error_f("tcgetattr: %s", strerror(errno)); /* enable nonblocking unless tty */ if (!isatty(new_fd[0])) set_nonblock(new_fd[0]); if (!isatty(new_fd[1])) set_nonblock(new_fd[1]); if (!isatty(new_fd[2])) set_nonblock(new_fd[2]); window = CHAN_SES_WINDOW_DEFAULT; packetmax = CHAN_SES_PACKET_DEFAULT; if (cctx->want_tty) { window >>= 1; packetmax >>= 1; } nc = channel_new(ssh, "session", SSH_CHANNEL_OPENING, new_fd[0], new_fd[1], new_fd[2], window, packetmax, CHAN_EXTENDED_WRITE, "client-session", /*nonblock*/0); nc->ctl_chan = c->self; /* link session -> control channel */ c->remote_id = nc->self; /* link control -> session channel */ c->have_remote_id = 1; if (cctx->want_tty && escape_char != 0xffffffff) { channel_register_filter(ssh, nc->self, client_simple_escape_filter, NULL, client_filter_cleanup, client_new_escape_filter_ctx((int)escape_char)); } debug2_f("channel_new: %d linked to control channel %d", nc->self, nc->ctl_chan); channel_send_open(ssh, nc->self); channel_register_open_confirm(ssh, nc->self, mux_session_confirm, cctx); c->mux_pause = 1; /* stop handling messages until open_confirm done */ channel_register_cleanup(ssh, nc->self, mux_master_session_cleanup_cb, 1); /* reply is deferred, sent by mux_session_confirm */ return 0; } static int mux_master_process_alive_check(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { int r; debug2_f("channel %d: alive check", c->self); /* prepare reply */ if ((r = sshbuf_put_u32(reply, MUX_S_ALIVE)) != 0 || (r = sshbuf_put_u32(reply, rid)) != 0 || (r = sshbuf_put_u32(reply, (u_int)getpid())) != 0) fatal_fr(r, "reply"); return 0; } static int mux_master_process_terminate(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { debug2_f("channel %d: terminate request", c->self); if (options.control_master == SSHCTL_MASTER_ASK || options.control_master == SSHCTL_MASTER_AUTO_ASK) { if (!ask_permission("Terminate shared connection to %s? ", host)) { debug2_f("termination refused by user"); reply_error(reply, MUX_S_PERMISSION_DENIED, rid, "Permission denied"); return 0; } } quit_pending = 1; reply_ok(reply, rid); /* XXX exit happens too soon - message never makes it to client */ return 0; } static char * format_forward(u_int ftype, struct Forward *fwd) { char *ret; switch (ftype) { case MUX_FWD_LOCAL: xasprintf(&ret, "local forward %.200s:%d -> %.200s:%d", (fwd->listen_path != NULL) ? fwd->listen_path : (fwd->listen_host == NULL) ? (options.fwd_opts.gateway_ports ? "*" : "LOCALHOST") : fwd->listen_host, fwd->listen_port, (fwd->connect_path != NULL) ? fwd->connect_path : fwd->connect_host, fwd->connect_port); break; case MUX_FWD_DYNAMIC: xasprintf(&ret, "dynamic forward %.200s:%d -> *", (fwd->listen_host == NULL) ? (options.fwd_opts.gateway_ports ? "*" : "LOCALHOST") : fwd->listen_host, fwd->listen_port); break; case MUX_FWD_REMOTE: xasprintf(&ret, "remote forward %.200s:%d -> %.200s:%d", (fwd->listen_path != NULL) ? fwd->listen_path : (fwd->listen_host == NULL) ? "LOCALHOST" : fwd->listen_host, fwd->listen_port, (fwd->connect_path != NULL) ? fwd->connect_path : fwd->connect_host, fwd->connect_port); break; default: fatal_f("unknown forward type %u", ftype); } return ret; } static int compare_host(const char *a, const char *b) { if (a == NULL && b == NULL) return 1; if (a == NULL || b == NULL) return 0; return strcmp(a, b) == 0; } static int compare_forward(struct Forward *a, struct Forward *b) { if (!compare_host(a->listen_host, b->listen_host)) return 0; if (!compare_host(a->listen_path, b->listen_path)) return 0; if (a->listen_port != b->listen_port) return 0; if (!compare_host(a->connect_host, b->connect_host)) return 0; if (!compare_host(a->connect_path, b->connect_path)) return 0; if (a->connect_port != b->connect_port) return 0; return 1; } static void mux_confirm_remote_forward(struct ssh *ssh, int type, u_int32_t seq, void *ctxt) { struct mux_channel_confirm_ctx *fctx = ctxt; char *failmsg = NULL; struct Forward *rfwd; Channel *c; struct sshbuf *out; u_int port; int r; if ((c = channel_by_id(ssh, fctx->cid)) == NULL) { /* no channel for reply */ error_f("unknown channel"); return; } if ((out = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if (fctx->fid >= options.num_remote_forwards || (options.remote_forwards[fctx->fid].connect_path == NULL && options.remote_forwards[fctx->fid].connect_host == NULL)) { xasprintf(&failmsg, "unknown forwarding id %d", fctx->fid); goto fail; } rfwd = &options.remote_forwards[fctx->fid]; debug_f("%s for: listen %d, connect %s:%d", type == SSH2_MSG_REQUEST_SUCCESS ? "success" : "failure", rfwd->listen_port, rfwd->connect_path ? rfwd->connect_path : rfwd->connect_host, rfwd->connect_port); if (type == SSH2_MSG_REQUEST_SUCCESS) { if (rfwd->listen_port == 0) { if ((r = sshpkt_get_u32(ssh, &port)) != 0) fatal_fr(r, "parse port"); if (port > 65535) { fatal("Invalid allocated port %u for " "mux remote forward to %s:%d", port, rfwd->connect_host, rfwd->connect_port); } rfwd->allocated_port = (int)port; debug("Allocated port %u for mux remote forward" " to %s:%d", rfwd->allocated_port, rfwd->connect_host, rfwd->connect_port); if ((r = sshbuf_put_u32(out, MUX_S_REMOTE_PORT)) != 0 || (r = sshbuf_put_u32(out, fctx->rid)) != 0 || (r = sshbuf_put_u32(out, rfwd->allocated_port)) != 0) fatal_fr(r, "reply"); channel_update_permission(ssh, rfwd->handle, rfwd->allocated_port); } else { reply_ok(out, fctx->rid); } goto out; } else { if (rfwd->listen_port == 0) channel_update_permission(ssh, rfwd->handle, -1); if (rfwd->listen_path != NULL) xasprintf(&failmsg, "remote port forwarding failed for " "listen path %s", rfwd->listen_path); else xasprintf(&failmsg, "remote port forwarding failed for " "listen port %d", rfwd->listen_port); debug2_f("clearing registered forwarding for listen %d, " "connect %s:%d", rfwd->listen_port, rfwd->connect_path ? rfwd->connect_path : rfwd->connect_host, rfwd->connect_port); free(rfwd->listen_host); free(rfwd->listen_path); free(rfwd->connect_host); free(rfwd->connect_path); memset(rfwd, 0, sizeof(*rfwd)); } fail: error_f("%s", failmsg); reply_error(out, MUX_S_FAILURE, fctx->rid, failmsg); free(failmsg); out: if ((r = sshbuf_put_stringb(c->output, out)) != 0) fatal_fr(r, "enqueue"); sshbuf_free(out); if (c->mux_pause <= 0) fatal_f("mux_pause %d", c->mux_pause); c->mux_pause = 0; /* start processing messages again */ } static int mux_master_process_open_fwd(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { struct Forward fwd; char *fwd_desc = NULL; char *listen_addr, *connect_addr; u_int ftype; u_int lport, cport; int r, i, ret = 0, freefwd = 1; memset(&fwd, 0, sizeof(fwd)); /* XXX - lport/cport check redundant */ if ((r = sshbuf_get_u32(m, &ftype)) != 0 || (r = sshbuf_get_cstring(m, &listen_addr, NULL)) != 0 || (r = sshbuf_get_u32(m, &lport)) != 0 || (r = sshbuf_get_cstring(m, &connect_addr, NULL)) != 0 || (r = sshbuf_get_u32(m, &cport)) != 0 || (lport != (u_int)PORT_STREAMLOCAL && lport > 65535) || (cport != (u_int)PORT_STREAMLOCAL && cport > 65535)) { error_f("malformed message"); ret = -1; goto out; } if (*listen_addr == '\0') { free(listen_addr); listen_addr = NULL; } if (*connect_addr == '\0') { free(connect_addr); connect_addr = NULL; } memset(&fwd, 0, sizeof(fwd)); fwd.listen_port = lport; if (fwd.listen_port == PORT_STREAMLOCAL) fwd.listen_path = listen_addr; else fwd.listen_host = listen_addr; fwd.connect_port = cport; if (fwd.connect_port == PORT_STREAMLOCAL) fwd.connect_path = connect_addr; else fwd.connect_host = connect_addr; debug2_f("channel %d: request %s", c->self, (fwd_desc = format_forward(ftype, &fwd))); if (ftype != MUX_FWD_LOCAL && ftype != MUX_FWD_REMOTE && ftype != MUX_FWD_DYNAMIC) { logit_f("invalid forwarding type %u", ftype); invalid: free(listen_addr); free(connect_addr); reply_error(reply, MUX_S_FAILURE, rid, "Invalid forwarding request"); return 0; } if (ftype == MUX_FWD_DYNAMIC && fwd.listen_path) { logit_f("streamlocal and dynamic forwards " "are mutually exclusive"); goto invalid; } if (fwd.listen_port != PORT_STREAMLOCAL && fwd.listen_port >= 65536) { logit_f("invalid listen port %u", fwd.listen_port); goto invalid; } if ((fwd.connect_port != PORT_STREAMLOCAL && fwd.connect_port >= 65536) || (ftype != MUX_FWD_DYNAMIC && ftype != MUX_FWD_REMOTE && fwd.connect_port == 0)) { logit_f("invalid connect port %u", fwd.connect_port); goto invalid; } if (ftype != MUX_FWD_DYNAMIC && fwd.connect_host == NULL && fwd.connect_path == NULL) { logit_f("missing connect host"); goto invalid; } /* Skip forwards that have already been requested */ switch (ftype) { case MUX_FWD_LOCAL: case MUX_FWD_DYNAMIC: for (i = 0; i < options.num_local_forwards; i++) { if (compare_forward(&fwd, options.local_forwards + i)) { exists: debug2_f("found existing forwarding"); reply_ok(reply, rid); goto out; } } break; case MUX_FWD_REMOTE: for (i = 0; i < options.num_remote_forwards; i++) { if (!compare_forward(&fwd, options.remote_forwards + i)) continue; if (fwd.listen_port != 0) goto exists; debug2_f("found allocated port"); if ((r = sshbuf_put_u32(reply, MUX_S_REMOTE_PORT)) != 0 || (r = sshbuf_put_u32(reply, rid)) != 0 || (r = sshbuf_put_u32(reply, options.remote_forwards[i].allocated_port)) != 0) fatal_fr(r, "reply FWD_REMOTE"); goto out; } break; } if (options.control_master == SSHCTL_MASTER_ASK || options.control_master == SSHCTL_MASTER_AUTO_ASK) { if (!ask_permission("Open %s on %s?", fwd_desc, host)) { debug2_f("forwarding refused by user"); reply_error(reply, MUX_S_PERMISSION_DENIED, rid, "Permission denied"); goto out; } } if (ftype == MUX_FWD_LOCAL || ftype == MUX_FWD_DYNAMIC) { if (!channel_setup_local_fwd_listener(ssh, &fwd, &options.fwd_opts)) { fail: logit_f("requested %s failed", fwd_desc); reply_error(reply, MUX_S_FAILURE, rid, "Port forwarding failed"); goto out; } add_local_forward(&options, &fwd); freefwd = 0; } else { struct mux_channel_confirm_ctx *fctx; fwd.handle = channel_request_remote_forwarding(ssh, &fwd); if (fwd.handle < 0) goto fail; add_remote_forward(&options, &fwd); fctx = xcalloc(1, sizeof(*fctx)); fctx->cid = c->self; fctx->rid = rid; fctx->fid = options.num_remote_forwards - 1; client_register_global_confirm(mux_confirm_remote_forward, fctx); freefwd = 0; c->mux_pause = 1; /* wait for mux_confirm_remote_forward */ /* delayed reply in mux_confirm_remote_forward */ goto out; } reply_ok(reply, rid); out: free(fwd_desc); if (freefwd) { free(fwd.listen_host); free(fwd.listen_path); free(fwd.connect_host); free(fwd.connect_path); } return ret; } static int mux_master_process_close_fwd(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { struct Forward fwd, *found_fwd; char *fwd_desc = NULL; const char *error_reason = NULL; char *listen_addr = NULL, *connect_addr = NULL; u_int ftype; int r, i, ret = 0; u_int lport, cport; memset(&fwd, 0, sizeof(fwd)); if ((r = sshbuf_get_u32(m, &ftype)) != 0 || (r = sshbuf_get_cstring(m, &listen_addr, NULL)) != 0 || (r = sshbuf_get_u32(m, &lport)) != 0 || (r = sshbuf_get_cstring(m, &connect_addr, NULL)) != 0 || (r = sshbuf_get_u32(m, &cport)) != 0 || (lport != (u_int)PORT_STREAMLOCAL && lport > 65535) || (cport != (u_int)PORT_STREAMLOCAL && cport > 65535)) { error_f("malformed message"); ret = -1; goto out; } if (*listen_addr == '\0') { free(listen_addr); listen_addr = NULL; } if (*connect_addr == '\0') { free(connect_addr); connect_addr = NULL; } memset(&fwd, 0, sizeof(fwd)); fwd.listen_port = lport; if (fwd.listen_port == PORT_STREAMLOCAL) fwd.listen_path = listen_addr; else fwd.listen_host = listen_addr; fwd.connect_port = cport; if (fwd.connect_port == PORT_STREAMLOCAL) fwd.connect_path = connect_addr; else fwd.connect_host = connect_addr; debug2_f("channel %d: request cancel %s", c->self, (fwd_desc = format_forward(ftype, &fwd))); /* make sure this has been requested */ found_fwd = NULL; switch (ftype) { case MUX_FWD_LOCAL: case MUX_FWD_DYNAMIC: for (i = 0; i < options.num_local_forwards; i++) { if (compare_forward(&fwd, options.local_forwards + i)) { found_fwd = options.local_forwards + i; break; } } break; case MUX_FWD_REMOTE: for (i = 0; i < options.num_remote_forwards; i++) { if (compare_forward(&fwd, options.remote_forwards + i)) { found_fwd = options.remote_forwards + i; break; } } break; } if (found_fwd == NULL) error_reason = "port not forwarded"; else if (ftype == MUX_FWD_REMOTE) { /* * This shouldn't fail unless we confused the host/port * between options.remote_forwards and permitted_opens. * However, for dynamic allocated listen ports we need * to use the actual listen port. */ if (channel_request_rforward_cancel(ssh, found_fwd) == -1) error_reason = "port not in permitted opens"; } else { /* local and dynamic forwards */ /* Ditto */ if (channel_cancel_lport_listener(ssh, &fwd, fwd.connect_port, &options.fwd_opts) == -1) error_reason = "port not found"; } if (error_reason != NULL) reply_error(reply, MUX_S_FAILURE, rid, error_reason); else { reply_ok(reply, rid); free(found_fwd->listen_host); free(found_fwd->listen_path); free(found_fwd->connect_host); free(found_fwd->connect_path); found_fwd->listen_host = found_fwd->connect_host = NULL; found_fwd->listen_path = found_fwd->connect_path = NULL; found_fwd->listen_port = found_fwd->connect_port = 0; } out: free(fwd_desc); free(listen_addr); free(connect_addr); return ret; } static int mux_master_process_stdio_fwd(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { Channel *nc; char *chost = NULL; u_int cport, i, j; int r, new_fd[2]; struct mux_stdio_confirm_ctx *cctx; if ((r = sshbuf_skip_string(m)) != 0 || /* reserved */ (r = sshbuf_get_cstring(m, &chost, NULL)) != 0 || (r = sshbuf_get_u32(m, &cport)) != 0) { free(chost); error_f("malformed message"); return -1; } debug2_f("channel %d: stdio fwd to %s:%u", c->self, chost, cport); /* Gather fds from client */ for(i = 0; i < 2; i++) { if ((new_fd[i] = mm_receive_fd(c->sock)) == -1) { error_f("failed to receive fd %d from client", i); for (j = 0; j < i; j++) close(new_fd[j]); free(chost); /* prepare reply */ reply_error(reply, MUX_S_FAILURE, rid, "did not receive file descriptors"); return -1; } } debug3_f("got fds stdin %d, stdout %d", new_fd[0], new_fd[1]); /* XXX support multiple child sessions in future */ if (c->have_remote_id) { debug2_f("session already open"); reply_error(reply, MUX_S_FAILURE, rid, "Multiple sessions not supported"); cleanup: close(new_fd[0]); close(new_fd[1]); free(chost); return 0; } if (options.control_master == SSHCTL_MASTER_ASK || options.control_master == SSHCTL_MASTER_AUTO_ASK) { if (!ask_permission("Allow forward to %s:%u? ", chost, cport)) { debug2_f("stdio fwd refused by user"); reply_error(reply, MUX_S_PERMISSION_DENIED, rid, "Permission denied"); goto cleanup; } } /* enable nonblocking unless tty */ if (!isatty(new_fd[0])) set_nonblock(new_fd[0]); if (!isatty(new_fd[1])) set_nonblock(new_fd[1]); nc = channel_connect_stdio_fwd(ssh, chost, cport, new_fd[0], new_fd[1]); free(chost); nc->ctl_chan = c->self; /* link session -> control channel */ c->remote_id = nc->self; /* link control -> session channel */ c->have_remote_id = 1; debug2_f("channel_new: %d control %d", nc->self, nc->ctl_chan); channel_register_cleanup(ssh, nc->self, mux_master_session_cleanup_cb, 1); cctx = xcalloc(1, sizeof(*cctx)); cctx->rid = rid; channel_register_open_confirm(ssh, nc->self, mux_stdio_confirm, cctx); c->mux_pause = 1; /* stop handling messages until open_confirm done */ /* reply is deferred, sent by mux_session_confirm */ return 0; } /* Callback on open confirmation in mux master for a mux stdio fwd session. */ static void mux_stdio_confirm(struct ssh *ssh, int id, int success, void *arg) { struct mux_stdio_confirm_ctx *cctx = arg; Channel *c, *cc; struct sshbuf *reply; int r; if (cctx == NULL) fatal_f("cctx == NULL"); if ((c = channel_by_id(ssh, id)) == NULL) fatal_f("no channel for id %d", id); if ((cc = channel_by_id(ssh, c->ctl_chan)) == NULL) fatal_f("channel %d lacks control channel %d", id, c->ctl_chan); if ((reply = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if (!success) { debug3_f("sending failure reply"); reply_error(reply, MUX_S_FAILURE, cctx->rid, "Session open refused by peer"); /* prepare reply */ goto done; } debug3_f("sending success reply"); /* prepare reply */ if ((r = sshbuf_put_u32(reply, MUX_S_SESSION_OPENED)) != 0 || (r = sshbuf_put_u32(reply, cctx->rid)) != 0 || (r = sshbuf_put_u32(reply, c->self)) != 0) fatal_fr(r, "reply"); done: /* Send reply */ if ((r = sshbuf_put_stringb(cc->output, reply)) != 0) fatal_fr(r, "enqueue"); sshbuf_free(reply); if (cc->mux_pause <= 0) fatal_f("mux_pause %d", cc->mux_pause); cc->mux_pause = 0; /* start processing messages again */ c->open_confirm_ctx = NULL; free(cctx); } static int mux_master_process_stop_listening(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { debug_f("channel %d: stop listening", c->self); if (options.control_master == SSHCTL_MASTER_ASK || options.control_master == SSHCTL_MASTER_AUTO_ASK) { if (!ask_permission("Disable further multiplexing on shared " "connection to %s? ", host)) { debug2_f("stop listen refused by user"); reply_error(reply, MUX_S_PERMISSION_DENIED, rid, "Permission denied"); return 0; } } if (mux_listener_channel != NULL) { channel_free(ssh, mux_listener_channel); client_stop_mux(); free(options.control_path); options.control_path = NULL; mux_listener_channel = NULL; muxserver_sock = -1; } reply_ok(reply, rid); return 0; } static int mux_master_process_proxy(struct ssh *ssh, u_int rid, Channel *c, struct sshbuf *m, struct sshbuf *reply) { int r; debug_f("channel %d: proxy request", c->self); c->mux_rcb = channel_proxy_downstream; if ((r = sshbuf_put_u32(reply, MUX_S_PROXY)) != 0 || (r = sshbuf_put_u32(reply, rid)) != 0) fatal_fr(r, "reply"); return 0; } /* Channel callbacks fired on read/write from mux client fd */ static int mux_master_read_cb(struct ssh *ssh, Channel *c) { struct mux_master_state *state = (struct mux_master_state *)c->mux_ctx; struct sshbuf *in = NULL, *out = NULL; u_int type, rid, i; int r, ret = -1; if ((out = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); /* Setup ctx and */ if (c->mux_ctx == NULL) { state = xcalloc(1, sizeof(*state)); c->mux_ctx = state; channel_register_cleanup(ssh, c->self, mux_master_control_cleanup_cb, 0); /* Send hello */ if ((r = sshbuf_put_u32(out, MUX_MSG_HELLO)) != 0 || (r = sshbuf_put_u32(out, SSHMUX_VER)) != 0) fatal_fr(r, "reply"); /* no extensions */ if ((r = sshbuf_put_stringb(c->output, out)) != 0) fatal_fr(r, "enqueue"); debug3_f("channel %d: hello sent", c->self); ret = 0; goto out; } /* Channel code ensures that we receive whole packets */ if ((r = sshbuf_froms(c->input, &in)) != 0) { malf: error_f("malformed message"); goto out; } if ((r = sshbuf_get_u32(in, &type)) != 0) goto malf; debug3_f("channel %d packet type 0x%08x len %zu", c->self, type, sshbuf_len(in)); if (type == MUX_MSG_HELLO) rid = 0; else { if (!state->hello_rcvd) { error_f("expected MUX_MSG_HELLO(0x%08x), " "received 0x%08x", MUX_MSG_HELLO, type); goto out; } if ((r = sshbuf_get_u32(in, &rid)) != 0) goto malf; } for (i = 0; mux_master_handlers[i].handler != NULL; i++) { if (type == mux_master_handlers[i].type) { ret = mux_master_handlers[i].handler(ssh, rid, c, in, out); break; } } if (mux_master_handlers[i].handler == NULL) { error_f("unsupported mux message 0x%08x", type); reply_error(out, MUX_S_FAILURE, rid, "unsupported request"); ret = 0; } /* Enqueue reply packet */ if (sshbuf_len(out) != 0 && (r = sshbuf_put_stringb(c->output, out)) != 0) fatal_fr(r, "enqueue"); out: sshbuf_free(in); sshbuf_free(out); return ret; } void mux_exit_message(struct ssh *ssh, Channel *c, int exitval) { struct sshbuf *m; Channel *mux_chan; int r; debug3_f("channel %d: exit message, exitval %d", c->self, exitval); if ((mux_chan = channel_by_id(ssh, c->ctl_chan)) == NULL) fatal_f("channel %d missing mux %d", c->self, c->ctl_chan); /* Append exit message packet to control socket output queue */ if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_S_EXIT_MESSAGE)) != 0 || (r = sshbuf_put_u32(m, c->self)) != 0 || (r = sshbuf_put_u32(m, exitval)) != 0 || (r = sshbuf_put_stringb(mux_chan->output, m)) != 0) fatal_fr(r, "reply"); sshbuf_free(m); } void mux_tty_alloc_failed(struct ssh *ssh, Channel *c) { struct sshbuf *m; Channel *mux_chan; int r; debug3_f("channel %d: TTY alloc failed", c->self); if ((mux_chan = channel_by_id(ssh, c->ctl_chan)) == NULL) fatal_f("channel %d missing mux %d", c->self, c->ctl_chan); /* Append exit message packet to control socket output queue */ if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_S_TTY_ALLOC_FAIL)) != 0 || (r = sshbuf_put_u32(m, c->self)) != 0 || (r = sshbuf_put_stringb(mux_chan->output, m)) != 0) fatal_fr(r, "reply"); sshbuf_free(m); } /* Prepare a mux master to listen on a Unix domain socket. */ void muxserver_listen(struct ssh *ssh) { mode_t old_umask; char *orig_control_path = options.control_path; char rbuf[16+1]; u_int i, r; int oerrno; if (options.control_path == NULL || options.control_master == SSHCTL_MASTER_NO) return; debug("setting up multiplex master socket"); /* * Use a temporary path before listen so we can pseudo-atomically * establish the listening socket in its final location to avoid * other processes racing in between bind() and listen() and hitting * an unready socket. */ for (i = 0; i < sizeof(rbuf) - 1; i++) { r = arc4random_uniform(26+26+10); rbuf[i] = (r < 26) ? 'a' + r : (r < 26*2) ? 'A' + r - 26 : '0' + r - 26 - 26; } rbuf[sizeof(rbuf) - 1] = '\0'; options.control_path = NULL; xasprintf(&options.control_path, "%s.%s", orig_control_path, rbuf); debug3_f("temporary control path %s", options.control_path); old_umask = umask(0177); muxserver_sock = unix_listener(options.control_path, 64, 0); oerrno = errno; umask(old_umask); if (muxserver_sock < 0) { if (oerrno == EINVAL || oerrno == EADDRINUSE) { error("ControlSocket %s already exists, " "disabling multiplexing", options.control_path); disable_mux_master: if (muxserver_sock != -1) { close(muxserver_sock); muxserver_sock = -1; } free(orig_control_path); free(options.control_path); options.control_path = NULL; options.control_master = SSHCTL_MASTER_NO; return; } else { /* unix_listener() logs the error */ cleanup_exit(255); } } /* Now atomically "move" the mux socket into position */ if (link(options.control_path, orig_control_path) != 0) { if (errno != EEXIST) { fatal_f("link mux listener %s => %s: %s", options.control_path, orig_control_path, strerror(errno)); } error("ControlSocket %s already exists, disabling multiplexing", orig_control_path); unlink(options.control_path); goto disable_mux_master; } unlink(options.control_path); free(options.control_path); options.control_path = orig_control_path; set_nonblock(muxserver_sock); mux_listener_channel = channel_new(ssh, "mux listener", SSH_CHANNEL_MUX_LISTENER, muxserver_sock, muxserver_sock, -1, CHAN_TCP_WINDOW_DEFAULT, CHAN_TCP_PACKET_DEFAULT, 0, options.control_path, 1); mux_listener_channel->mux_rcb = mux_master_read_cb; debug3_f("mux listener channel %d fd %d", mux_listener_channel->self, mux_listener_channel->sock); } /* Callback on open confirmation in mux master for a mux client session. */ static void mux_session_confirm(struct ssh *ssh, int id, int success, void *arg) { struct mux_session_confirm_ctx *cctx = arg; const char *display; Channel *c, *cc; int i, r; struct sshbuf *reply; if (cctx == NULL) fatal_f("cctx == NULL"); if ((c = channel_by_id(ssh, id)) == NULL) fatal_f("no channel for id %d", id); if ((cc = channel_by_id(ssh, c->ctl_chan)) == NULL) fatal_f("channel %d lacks control channel %d", id, c->ctl_chan); if ((reply = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if (!success) { debug3_f("sending failure reply"); reply_error(reply, MUX_S_FAILURE, cctx->rid, "Session open refused by peer"); goto done; } display = getenv("DISPLAY"); if (cctx->want_x_fwd && options.forward_x11 && display != NULL) { char *proto, *data; /* Get reasonable local authentication information. */ if (client_x11_get_proto(ssh, display, options.xauth_location, options.forward_x11_trusted, options.forward_x11_timeout, &proto, &data) == 0) { /* Request forwarding with authentication spoofing. */ debug("Requesting X11 forwarding with authentication " "spoofing."); x11_request_forwarding_with_spoofing(ssh, id, display, proto, data, 1); /* XXX exit_on_forward_failure */ client_expect_confirm(ssh, id, "X11 forwarding", CONFIRM_WARN); } } if (cctx->want_agent_fwd && options.forward_agent) { debug("Requesting authentication agent forwarding."); channel_request_start(ssh, id, "<EMAIL>", 0); if ((r = sshpkt_send(ssh)) != 0) fatal_fr(r, "send"); } client_session2_setup(ssh, id, cctx->want_tty, cctx->want_subsys, cctx->term, &cctx->tio, c->rfd, cctx->cmd, cctx->env); debug3_f("sending success reply"); /* prepare reply */ if ((r = sshbuf_put_u32(reply, MUX_S_SESSION_OPENED)) != 0 || (r = sshbuf_put_u32(reply, cctx->rid)) != 0 || (r = sshbuf_put_u32(reply, c->self)) != 0) fatal_fr(r, "reply"); done: /* Send reply */ if ((r = sshbuf_put_stringb(cc->output, reply)) != 0) fatal_fr(r, "enqueue"); sshbuf_free(reply); if (cc->mux_pause <= 0) fatal_f("mux_pause %d", cc->mux_pause); cc->mux_pause = 0; /* start processing messages again */ c->open_confirm_ctx = NULL; sshbuf_free(cctx->cmd); free(cctx->term); if (cctx->env != NULL) { for (i = 0; cctx->env[i] != NULL; i++) free(cctx->env[i]); free(cctx->env); } free(cctx); } /* ** Multiplexing client support */ /* Exit signal handler */ static void control_client_sighandler(int signo) { muxclient_terminate = signo; } /* * Relay signal handler - used to pass some signals from mux client to * mux master. */ static void control_client_sigrelay(int signo) { int save_errno = errno; if (muxserver_pid > 1) kill(muxserver_pid, signo); errno = save_errno; } static int mux_client_read(int fd, struct sshbuf *b, size_t need) { size_t have; ssize_t len; u_char *p; struct pollfd pfd; int r; pfd.fd = fd; pfd.events = POLLIN; if ((r = sshbuf_reserve(b, need, &p)) != 0) fatal_fr(r, "reserve"); for (have = 0; have < need; ) { if (muxclient_terminate) { errno = EINTR; return -1; } len = read(fd, p + have, need - have); if (len == -1) { switch (errno) { #if defined(EWOULDBLOCK) && (EWOULDBLOCK != EAGAIN) case EWOULDBLOCK: #endif case EAGAIN: (void)poll(&pfd, 1, -1); /* FALLTHROUGH */ case EINTR: continue; default: return -1; } } if (len == 0) { errno = EPIPE; return -1; } have += (size_t)len; } return 0; } static int mux_client_write_packet(int fd, struct sshbuf *m) { struct sshbuf *queue; u_int have, need; int r, oerrno, len; const u_char *ptr; struct pollfd pfd; pfd.fd = fd; pfd.events = POLLOUT; if ((queue = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_stringb(queue, m)) != 0) fatal_fr(r, "enqueue"); need = sshbuf_len(queue); ptr = sshbuf_ptr(queue); for (have = 0; have < need; ) { if (muxclient_terminate) { sshbuf_free(queue); errno = EINTR; return -1; } len = write(fd, ptr + have, need - have); if (len == -1) { switch (errno) { #if defined(EWOULDBLOCK) && (EWOULDBLOCK != EAGAIN) case EWOULDBLOCK: #endif case EAGAIN: (void)poll(&pfd, 1, -1); /* FALLTHROUGH */ case EINTR: continue; default: oerrno = errno; sshbuf_free(queue); errno = oerrno; return -1; } } if (len == 0) { sshbuf_free(queue); errno = EPIPE; return -1; } have += (u_int)len; } sshbuf_free(queue); return 0; } static int mux_client_read_packet(int fd, struct sshbuf *m) { struct sshbuf *queue; size_t need, have; const u_char *ptr; int r, oerrno; if ((queue = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if (mux_client_read(fd, queue, 4) != 0) { if ((oerrno = errno) == EPIPE) debug3_f("read header failed: %s", strerror(errno)); sshbuf_free(queue); errno = oerrno; return -1; } need = PEEK_U32(sshbuf_ptr(queue)); if (mux_client_read(fd, queue, need) != 0) { oerrno = errno; debug3_f("read body failed: %s", strerror(errno)); sshbuf_free(queue); errno = oerrno; return -1; } if ((r = sshbuf_get_string_direct(queue, &ptr, &have)) != 0 || (r = sshbuf_put(m, ptr, have)) != 0) fatal_fr(r, "dequeue"); sshbuf_free(queue); return 0; } static int mux_client_hello_exchange(int fd) { struct sshbuf *m; u_int type, ver; int r, ret = -1; if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_MSG_HELLO)) != 0 || (r = sshbuf_put_u32(m, SSHMUX_VER)) != 0) fatal_fr(r, "assemble hello"); /* no extensions */ if (mux_client_write_packet(fd, m) != 0) { debug_f("write packet: %s", strerror(errno)); goto out; } sshbuf_reset(m); /* Read their HELLO */ if (mux_client_read_packet(fd, m) != 0) { debug_f("read packet failed"); goto out; } if ((r = sshbuf_get_u32(m, &type)) != 0) fatal_fr(r, "parse type"); if (type != MUX_MSG_HELLO) { error_f("expected HELLO (%u) got %u", MUX_MSG_HELLO, type); goto out; } if ((r = sshbuf_get_u32(m, &ver)) != 0) fatal_fr(r, "parse version"); if (ver != SSHMUX_VER) { error("Unsupported multiplexing protocol version %d " "(expected %d)", ver, SSHMUX_VER); goto out; } debug2_f("master version %u", ver); /* No extensions are presently defined */ while (sshbuf_len(m) > 0) { char *name = NULL; if ((r = sshbuf_get_cstring(m, &name, NULL)) != 0 || (r = sshbuf_skip_string(m)) != 0) { /* value */ error_fr(r, "parse extension"); goto out; } debug2("Unrecognised master extension \"%s\"", name); free(name); } /* success */ ret = 0; out: sshbuf_free(m); return ret; } static u_int mux_client_request_alive(int fd) { struct sshbuf *m; char *e; u_int pid, type, rid; int r; debug3_f("entering"); if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_C_ALIVE_CHECK)) != 0 || (r = sshbuf_put_u32(m, muxclient_request_id)) != 0) fatal_fr(r, "assemble"); if (mux_client_write_packet(fd, m) != 0) fatal_f("write packet: %s", strerror(errno)); sshbuf_reset(m); /* Read their reply */ if (mux_client_read_packet(fd, m) != 0) { sshbuf_free(m); return 0; } if ((r = sshbuf_get_u32(m, &type)) != 0) fatal_fr(r, "parse type"); if (type != MUX_S_ALIVE) { if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); fatal_f("master returned error: %s", e); } if ((r = sshbuf_get_u32(m, &rid)) != 0) fatal_fr(r, "parse remote ID"); if (rid != muxclient_request_id) fatal_f("out of sequence reply: my id %u theirs %u", muxclient_request_id, rid); if ((r = sshbuf_get_u32(m, &pid)) != 0) fatal_fr(r, "parse PID"); sshbuf_free(m); debug3_f("done pid = %u", pid); muxclient_request_id++; return pid; } static void mux_client_request_terminate(int fd) { struct sshbuf *m; char *e; u_int type, rid; int r; debug3_f("entering"); if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_C_TERMINATE)) != 0 || (r = sshbuf_put_u32(m, muxclient_request_id)) != 0) fatal_fr(r, "request"); if (mux_client_write_packet(fd, m) != 0) fatal_f("write packet: %s", strerror(errno)); sshbuf_reset(m); /* Read their reply */ if (mux_client_read_packet(fd, m) != 0) { /* Remote end exited already */ if (errno == EPIPE) { sshbuf_free(m); return; } fatal_f("read from master failed: %s", strerror(errno)); } if ((r = sshbuf_get_u32(m, &type)) != 0 || (r = sshbuf_get_u32(m, &rid)) != 0) fatal_fr(r, "parse"); if (rid != muxclient_request_id) fatal_f("out of sequence reply: my id %u theirs %u", muxclient_request_id, rid); switch (type) { case MUX_S_OK: break; case MUX_S_PERMISSION_DENIED: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); fatal("Master refused termination request: %s", e); case MUX_S_FAILURE: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); fatal_f("termination request failed: %s", e); default: fatal_f("unexpected response from master 0x%08x", type); } sshbuf_free(m); muxclient_request_id++; } static int mux_client_forward(int fd, int cancel_flag, u_int ftype, struct Forward *fwd) { struct sshbuf *m; char *e, *fwd_desc; const char *lhost, *chost; u_int type, rid; int r; fwd_desc = format_forward(ftype, fwd); debug("Requesting %s %s", cancel_flag ? "cancellation of" : "forwarding of", fwd_desc); free(fwd_desc); type = cancel_flag ? MUX_C_CLOSE_FWD : MUX_C_OPEN_FWD; if (fwd->listen_path != NULL) lhost = fwd->listen_path; else if (fwd->listen_host == NULL) lhost = ""; else if (*fwd->listen_host == '\0') lhost = "*"; else lhost = fwd->listen_host; if (fwd->connect_path != NULL) chost = fwd->connect_path; else if (fwd->connect_host == NULL) chost = ""; else chost = fwd->connect_host; if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, type)) != 0 || (r = sshbuf_put_u32(m, muxclient_request_id)) != 0 || (r = sshbuf_put_u32(m, ftype)) != 0 || (r = sshbuf_put_cstring(m, lhost)) != 0 || (r = sshbuf_put_u32(m, fwd->listen_port)) != 0 || (r = sshbuf_put_cstring(m, chost)) != 0 || (r = sshbuf_put_u32(m, fwd->connect_port)) != 0) fatal_fr(r, "request"); if (mux_client_write_packet(fd, m) != 0) fatal_f("write packet: %s", strerror(errno)); sshbuf_reset(m); /* Read their reply */ if (mux_client_read_packet(fd, m) != 0) { sshbuf_free(m); return -1; } if ((r = sshbuf_get_u32(m, &type)) != 0 || (r = sshbuf_get_u32(m, &rid)) != 0) fatal_fr(r, "parse"); if (rid != muxclient_request_id) fatal_f("out of sequence reply: my id %u theirs %u", muxclient_request_id, rid); switch (type) { case MUX_S_OK: break; case MUX_S_REMOTE_PORT: if (cancel_flag) fatal_f("got MUX_S_REMOTE_PORT for cancel"); if ((r = sshbuf_get_u32(m, &fwd->allocated_port)) != 0) fatal_fr(r, "parse port"); verbose("Allocated port %u for remote forward to %s:%d", fwd->allocated_port, fwd->connect_host ? fwd->connect_host : "", fwd->connect_port); if (muxclient_command == SSHMUX_COMMAND_FORWARD) fprintf(stdout, "%i\n", fwd->allocated_port); break; case MUX_S_PERMISSION_DENIED: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); sshbuf_free(m); error("Master refused forwarding request: %s", e); return -1; case MUX_S_FAILURE: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); sshbuf_free(m); error_f("forwarding request failed: %s", e); return -1; default: fatal_f("unexpected response from master 0x%08x", type); } sshbuf_free(m); muxclient_request_id++; return 0; } static int mux_client_forwards(int fd, int cancel_flag) { int i, ret = 0; debug3_f("%s forwardings: %d local, %d remote", cancel_flag ? "cancel" : "request", options.num_local_forwards, options.num_remote_forwards); /* XXX ExitOnForwardingFailure */ for (i = 0; i < options.num_local_forwards; i++) { if (mux_client_forward(fd, cancel_flag, options.local_forwards[i].connect_port == 0 ? MUX_FWD_DYNAMIC : MUX_FWD_LOCAL, options.local_forwards + i) != 0) ret = -1; } for (i = 0; i < options.num_remote_forwards; i++) { if (mux_client_forward(fd, cancel_flag, MUX_FWD_REMOTE, options.remote_forwards + i) != 0) ret = -1; } return ret; } static int mux_client_request_session(int fd) { struct sshbuf *m; char *e; const char *term; u_int echar, rid, sid, esid, exitval, type, exitval_seen; extern char **environ; int r, i, rawmode; debug3_f("entering"); if ((muxserver_pid = mux_client_request_alive(fd)) == 0) { error_f("master alive request failed"); return -1; } ssh_signal(SIGPIPE, SIG_IGN); if (stdin_null_flag && stdfd_devnull(1, 0, 0) == -1) fatal_f("stdfd_devnull failed"); if ((term = getenv("TERM")) == NULL) term = ""; echar = 0xffffffff; if (options.escape_char != SSH_ESCAPECHAR_NONE) echar = (u_int)options.escape_char; if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_C_NEW_SESSION)) != 0 || (r = sshbuf_put_u32(m, muxclient_request_id)) != 0 || (r = sshbuf_put_string(m, NULL, 0)) != 0 || /* reserved */ (r = sshbuf_put_u32(m, tty_flag)) != 0 || (r = sshbuf_put_u32(m, options.forward_x11)) != 0 || (r = sshbuf_put_u32(m, options.forward_agent)) != 0 || (r = sshbuf_put_u32(m, subsystem_flag)) != 0 || (r = sshbuf_put_u32(m, echar)) != 0 || (r = sshbuf_put_cstring(m, term)) != 0 || (r = sshbuf_put_stringb(m, command)) != 0) fatal_fr(r, "request"); /* Pass environment */ if (options.num_send_env > 0 && environ != NULL) { for (i = 0; environ[i] != NULL; i++) { if (!env_permitted(environ[i])) continue; if ((r = sshbuf_put_cstring(m, environ[i])) != 0) fatal_fr(r, "request sendenv"); } } for (i = 0; i < options.num_setenv; i++) { if ((r = sshbuf_put_cstring(m, options.setenv[i])) != 0) fatal_fr(r, "request setenv"); } if (mux_client_write_packet(fd, m) != 0) fatal_f("write packet: %s", strerror(errno)); /* Send the stdio file descriptors */ if (mm_send_fd(fd, STDIN_FILENO) == -1 || mm_send_fd(fd, STDOUT_FILENO) == -1 || mm_send_fd(fd, STDERR_FILENO) == -1) fatal_f("send fds failed"); debug3_f("session request sent"); /* Read their reply */ sshbuf_reset(m); if (mux_client_read_packet(fd, m) != 0) { error_f("read from master failed: %s", strerror(errno)); sshbuf_free(m); return -1; } if ((r = sshbuf_get_u32(m, &type)) != 0 || (r = sshbuf_get_u32(m, &rid)) != 0) fatal_fr(r, "parse"); if (rid != muxclient_request_id) fatal_f("out of sequence reply: my id %u theirs %u", muxclient_request_id, rid); switch (type) { case MUX_S_SESSION_OPENED: if ((r = sshbuf_get_u32(m, &sid)) != 0) fatal_fr(r, "parse session ID"); debug_f("master session id: %u", sid); break; case MUX_S_PERMISSION_DENIED: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); error("Master refused session request: %s", e); sshbuf_free(m); return -1; case MUX_S_FAILURE: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); error_f("session request failed: %s", e); sshbuf_free(m); return -1; default: sshbuf_free(m); error_f("unexpected response from master 0x%08x", type); return -1; } muxclient_request_id++; if (pledge("stdio proc tty", NULL) == -1) fatal_f("pledge(): %s", strerror(errno)); platform_pledge_mux(); ssh_signal(SIGHUP, control_client_sighandler); ssh_signal(SIGINT, control_client_sighandler); ssh_signal(SIGTERM, control_client_sighandler); ssh_signal(SIGWINCH, control_client_sigrelay); rawmode = tty_flag; if (tty_flag) enter_raw_mode(options.request_tty == REQUEST_TTY_FORCE); /* * Stick around until the controlee closes the client_fd. * Before it does, it is expected to write an exit message. * This process must read the value and wait for the closure of * the client_fd; if this one closes early, the multiplex master will * terminate early too (possibly losing data). */ for (exitval = 255, exitval_seen = 0;;) { sshbuf_reset(m); if (mux_client_read_packet(fd, m) != 0) break; if ((r = sshbuf_get_u32(m, &type)) != 0) fatal_fr(r, "parse type"); switch (type) { case MUX_S_TTY_ALLOC_FAIL: if ((r = sshbuf_get_u32(m, &esid)) != 0) fatal_fr(r, "parse session ID"); if (esid != sid) fatal_f("tty alloc fail on unknown session: " "my id %u theirs %u", sid, esid); leave_raw_mode(options.request_tty == REQUEST_TTY_FORCE); rawmode = 0; continue; case MUX_S_EXIT_MESSAGE: if ((r = sshbuf_get_u32(m, &esid)) != 0) fatal_fr(r, "parse session ID"); if (esid != sid) fatal_f("exit on unknown session: " "my id %u theirs %u", sid, esid); if (exitval_seen) fatal_f("exitval sent twice"); if ((r = sshbuf_get_u32(m, &exitval)) != 0) fatal_fr(r, "parse exitval"); exitval_seen = 1; continue; default: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); fatal_f("master returned error: %s", e); } } close(fd); if (rawmode) leave_raw_mode(options.request_tty == REQUEST_TTY_FORCE); if (muxclient_terminate) { debug2("Exiting on signal: %s", strsignal(muxclient_terminate)); exitval = 255; } else if (!exitval_seen) { debug2("Control master terminated unexpectedly"); exitval = 255; } else debug2("Received exit status from master %d", exitval); if (tty_flag && options.log_level != SYSLOG_LEVEL_QUIET) fprintf(stderr, "Shared connection to %s closed.\r\n", host); exit(exitval); } static int mux_client_proxy(int fd) { struct sshbuf *m; char *e; u_int type, rid; int r; if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_C_PROXY)) != 0 || (r = sshbuf_put_u32(m, muxclient_request_id)) != 0) fatal_fr(r, "request"); if (mux_client_write_packet(fd, m) != 0) fatal_f("write packet: %s", strerror(errno)); sshbuf_reset(m); /* Read their reply */ if (mux_client_read_packet(fd, m) != 0) { sshbuf_free(m); return 0; } if ((r = sshbuf_get_u32(m, &type)) != 0 || (r = sshbuf_get_u32(m, &rid)) != 0) fatal_fr(r, "parse"); if (rid != muxclient_request_id) fatal_f("out of sequence reply: my id %u theirs %u", muxclient_request_id, rid); if (type != MUX_S_PROXY) { if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); fatal_f("master returned error: %s", e); } sshbuf_free(m); debug3_f("done"); muxclient_request_id++; return 0; } static int mux_client_request_stdio_fwd(int fd) { struct sshbuf *m; char *e; u_int type, rid, sid; int r; debug3_f("entering"); if ((muxserver_pid = mux_client_request_alive(fd)) == 0) { error_f("master alive request failed"); return -1; } ssh_signal(SIGPIPE, SIG_IGN); if (stdin_null_flag && stdfd_devnull(1, 0, 0) == -1) fatal_f("stdfd_devnull failed"); if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_C_NEW_STDIO_FWD)) != 0 || (r = sshbuf_put_u32(m, muxclient_request_id)) != 0 || (r = sshbuf_put_string(m, NULL, 0)) != 0 || /* reserved */ (r = sshbuf_put_cstring(m, options.stdio_forward_host)) != 0 || (r = sshbuf_put_u32(m, options.stdio_forward_port)) != 0) fatal_fr(r, "request"); if (mux_client_write_packet(fd, m) != 0) fatal_f("write packet: %s", strerror(errno)); /* Send the stdio file descriptors */ if (mm_send_fd(fd, STDIN_FILENO) == -1 || mm_send_fd(fd, STDOUT_FILENO) == -1) fatal_f("send fds failed"); if (pledge("stdio proc tty", NULL) == -1) fatal_f("pledge(): %s", strerror(errno)); platform_pledge_mux(); debug3_f("stdio forward request sent"); /* Read their reply */ sshbuf_reset(m); if (mux_client_read_packet(fd, m) != 0) { error_f("read from master failed: %s", strerror(errno)); sshbuf_free(m); return -1; } if ((r = sshbuf_get_u32(m, &type)) != 0 || (r = sshbuf_get_u32(m, &rid)) != 0) fatal_fr(r, "parse"); if (rid != muxclient_request_id) fatal_f("out of sequence reply: my id %u theirs %u", muxclient_request_id, rid); switch (type) { case MUX_S_SESSION_OPENED: if ((r = sshbuf_get_u32(m, &sid)) != 0) fatal_fr(r, "parse session ID"); debug_f("master session id: %u", sid); break; case MUX_S_PERMISSION_DENIED: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); sshbuf_free(m); fatal("Master refused stdio forwarding request: %s", e); case MUX_S_FAILURE: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); sshbuf_free(m); fatal("Stdio forwarding request failed: %s", e); default: sshbuf_free(m); error_f("unexpected response from master 0x%08x", type); return -1; } muxclient_request_id++; ssh_signal(SIGHUP, control_client_sighandler); ssh_signal(SIGINT, control_client_sighandler); ssh_signal(SIGTERM, control_client_sighandler); ssh_signal(SIGWINCH, control_client_sigrelay); /* * Stick around until the controlee closes the client_fd. */ sshbuf_reset(m); if (mux_client_read_packet(fd, m) != 0) { if (errno == EPIPE || (errno == EINTR && muxclient_terminate != 0)) return 0; fatal_f("mux_client_read_packet: %s", strerror(errno)); } fatal_f("master returned unexpected message %u", type); } static void mux_client_request_stop_listening(int fd) { struct sshbuf *m; char *e; u_int type, rid; int r; debug3_f("entering"); if ((m = sshbuf_new()) == NULL) fatal_f("sshbuf_new"); if ((r = sshbuf_put_u32(m, MUX_C_STOP_LISTENING)) != 0 || (r = sshbuf_put_u32(m, muxclient_request_id)) != 0) fatal_fr(r, "request"); if (mux_client_write_packet(fd, m) != 0) fatal_f("write packet: %s", strerror(errno)); sshbuf_reset(m); /* Read their reply */ if (mux_client_read_packet(fd, m) != 0) fatal_f("read from master failed: %s", strerror(errno)); if ((r = sshbuf_get_u32(m, &type)) != 0 || (r = sshbuf_get_u32(m, &rid)) != 0) fatal_fr(r, "parse"); if (rid != muxclient_request_id) fatal_f("out of sequence reply: my id %u theirs %u", muxclient_request_id, rid); switch (type) { case MUX_S_OK: break; case MUX_S_PERMISSION_DENIED: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); fatal("Master refused stop listening request: %s", e); case MUX_S_FAILURE: if ((r = sshbuf_get_cstring(m, &e, NULL)) != 0) fatal_fr(r, "parse error message"); fatal_f("stop listening request failed: %s", e); default: fatal_f("unexpected response from master 0x%08x", type); } sshbuf_free(m); muxclient_request_id++; } /* Multiplex client main loop. */ int muxclient(const char *path) { struct sockaddr_un addr; int sock; u_int pid; if (muxclient_command == 0) { if (options.stdio_forward_host != NULL) muxclient_command = SSHMUX_COMMAND_STDIO_FWD; else muxclient_command = SSHMUX_COMMAND_OPEN; } switch (options.control_master) { case SSHCTL_MASTER_AUTO: case SSHCTL_MASTER_AUTO_ASK: debug("auto-mux: Trying existing master"); /* FALLTHROUGH */ case SSHCTL_MASTER_NO: break; default: return -1; } memset(&addr, '\0', sizeof(addr)); addr.sun_family = AF_UNIX; if (strlcpy(addr.sun_path, path, sizeof(addr.sun_path)) >= sizeof(addr.sun_path)) fatal("ControlPath too long ('%s' >= %u bytes)", path, (unsigned int)sizeof(addr.sun_path)); if ((sock = socket(PF_UNIX, SOCK_STREAM, 0)) == -1) fatal_f("socket(): %s", strerror(errno)); if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) == -1) { switch (muxclient_command) { case SSHMUX_COMMAND_OPEN: case SSHMUX_COMMAND_STDIO_FWD: break; default: fatal("Control socket connect(%.100s): %s", path, strerror(errno)); } if (errno == ECONNREFUSED && options.control_master != SSHCTL_MASTER_NO) { debug("Stale control socket %.100s, unlinking", path); unlink(path); } else if (errno == ENOENT) { debug("Control socket \"%.100s\" does not exist", path); } else { error("Control socket connect(%.100s): %s", path, strerror(errno)); } close(sock); return -1; } set_nonblock(sock); if (mux_client_hello_exchange(sock) != 0) { error_f("master hello exchange failed"); close(sock); return -1; } switch (muxclient_command) { case SSHMUX_COMMAND_ALIVE_CHECK: if ((pid = mux_client_request_alive(sock)) == 0) fatal_f("master alive check failed"); fprintf(stderr, "Master running (pid=%u)\r\n", pid); exit(0); case SSHMUX_COMMAND_TERMINATE: mux_client_request_terminate(sock); if (options.log_level != SYSLOG_LEVEL_QUIET) fprintf(stderr, "Exit request sent.\r\n"); exit(0); case SSHMUX_COMMAND_FORWARD: if (mux_client_forwards(sock, 0) != 0) fatal_f("master forward request failed"); exit(0); case SSHMUX_COMMAND_OPEN: if (mux_client_forwards(sock, 0) != 0) { error_f("master forward request failed"); return -1; } mux_client_request_session(sock); return -1; case SSHMUX_COMMAND_STDIO_FWD: mux_client_request_stdio_fwd(sock); exit(0); case SSHMUX_COMMAND_STOP: mux_client_request_stop_listening(sock); if (options.log_level != SYSLOG_LEVEL_QUIET) fprintf(stderr, "Stop listening request sent.\r\n"); exit(0); case SSHMUX_COMMAND_CANCEL_FWD: if (mux_client_forwards(sock, 1) != 0) error_f("master cancel forward request failed"); exit(0); case SSHMUX_COMMAND_PROXY: mux_client_proxy(sock); return (sock); default: fatal("unrecognised muxclient_command %d", muxclient_command); } }
28,154
457
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Nov 2019 @author: <NAME> <<EMAIL>> """ from typing import Union import numpy as np from scipy import sparse from scipy.sparse.linalg import LinearOperator def diag_pinv(weights: np.ndarray) -> sparse.csr_matrix: """Compute :math:`W^+ = \\text{diag}(w)^+`, the pseudo inverse of the diagonal matrix with diagonal the weights :math:`w`. Parameters ---------- weights: The weights to invert. Returns ------- sparse.csr_matrix :math:`W^+` """ diag: sparse.csr_matrix = sparse.diags(weights, format='csr') diag.data = 1 / diag.data return diag def normalize(matrix: Union[sparse.csr_matrix, np.ndarray, LinearOperator], p=1): """Normalize rows of a matrix. Null rows remain null. Parameters ---------- matrix : Input matrix. p : Order of the norm Returns ------- normalized matrix : same as input """ if p == 1: norm = matrix.dot(np.ones(matrix.shape[1])) elif p == 2: if isinstance(matrix, np.ndarray): norm = np.linalg.norm(matrix, axis=1) elif isinstance(matrix, sparse.csr_matrix): data = matrix.data.copy() matrix.data = data ** 2 norm = np.sqrt(matrix.dot(np.ones(matrix.shape[1]))) matrix.data = data else: raise NotImplementedError('Norm 2 is not available for LinearOperator.') else: raise NotImplementedError('Only norms 1 and 2 are available at the moment.') diag = diag_pinv(norm) if hasattr(matrix, 'left_sparse_dot') and callable(matrix.left_sparse_dot): return matrix.left_sparse_dot(diag) return diag.dot(matrix)
760
6,663
namespace outer { int x = 10; int outer_value = 10; namespace inner { int x = 100; int inner_value = 100; } } namespace A { typedef int A_t; struct S { A_t k; double x; }; A_t A_func(A_t first, A_t second) { return first + second; } }
164
310
{ "name": "CourtSearch", "description": "A service for searching court records.", "url": "https://www.courtsearch.com/" }
40
777
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/website_settings/website_settings_popup_view.h" #include "base/macros.h" #include "base/strings/utf_string_conversions.h" #include "chrome/browser/ui/exclusive_access/exclusive_access_manager.h" #include "chrome/browser/ui/views/website_settings/chosen_object_row.h" #include "chrome/browser/ui/views/website_settings/permission_selector_row.h" #include "chrome/browser/usb/usb_chooser_context.h" #include "chrome/browser/usb/usb_chooser_context_factory.h" #include "chrome/test/base/testing_profile.h" #include "content/public/browser/ssl_status.h" #include "content/public/test/test_browser_thread_bundle.h" #include "content/public/test/test_web_contents_factory.h" #include "device/base/mock_device_client.h" #include "device/usb/mock_usb_device.h" #include "device/usb/mock_usb_service.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/events/event_utils.h" #include "ui/views/controls/button/menu_button.h" #include "ui/views/controls/combobox/combobox.h" #include "ui/views/controls/label.h" #include "ui/views/test/scoped_views_test_helper.h" const char* kUrl = "http://www.example.com/index.html"; namespace test { class WebsiteSettingsPopupViewTestApi { public: WebsiteSettingsPopupViewTestApi(gfx::NativeView parent, Profile* profile, content::WebContents* web_contents) : view_(nullptr), parent_(parent), profile_(profile), web_contents_(web_contents) { CreateView(); } void CreateView() { if (view_) view_->GetWidget()->CloseNow(); security_state::SecurityInfo security_info; views::View* anchor_view = nullptr; view_ = new WebsiteSettingsPopupView(anchor_view, parent_, profile_, web_contents_, GURL(kUrl), security_info); } WebsiteSettingsPopupView* view() { return view_; } views::View* permissions_view() { return view_->permissions_view_; } PermissionSelectorRow* GetPermissionSelectorAt(int index) { return static_cast<PermissionSelectorRow*>( permissions_view()->child_at(index)); } base::string16 GetPermissionButtonTextAt(int index) { const int kButtonIndex = 2; // Button should be the third child. views::View* view = GetPermissionSelectorAt(index)->child_at(kButtonIndex); if (view->GetClassName() == views::MenuButton::kViewClassName) { return static_cast<views::MenuButton*>(view)->GetText(); } else if (view->GetClassName() == views::Combobox::kViewClassName) { views::Combobox* combobox = static_cast<views::Combobox*>(view); return combobox->GetTextForRow(combobox->GetSelectedRow()); } else { NOTREACHED() << "Unknown class " << view->GetClassName(); return base::string16(); } } // Simulates recreating the dialog with a new PermissionInfoList. void SetPermissionInfo(const PermissionInfoList& list) { for (const WebsiteSettingsPopupView::PermissionInfo& info : list) view_->presenter_->OnSitePermissionChanged(info.type, info.setting); CreateView(); } private: WebsiteSettingsPopupView* view_; // Weak. Owned by its Widget. // For recreating the view. gfx::NativeView parent_; Profile* profile_; content::WebContents* web_contents_; DISALLOW_COPY_AND_ASSIGN(WebsiteSettingsPopupViewTestApi); }; } // namespace test namespace { // Helper class that wraps a TestingProfile and a TestWebContents for a test // harness. Inspired by RenderViewHostTestHarness, but doesn't use inheritance // so the helper can be composed with other helpers in the test harness. class ScopedWebContentsTestHelper { public: ScopedWebContentsTestHelper() { web_contents_ = factory_.CreateWebContents(&profile_); } Profile* profile() { return &profile_; } content::WebContents* web_contents() { return web_contents_; } private: content::TestBrowserThreadBundle thread_bundle_; TestingProfile profile_; content::TestWebContentsFactory factory_; content::WebContents* web_contents_; // Weak. Owned by factory_. DISALLOW_COPY_AND_ASSIGN(ScopedWebContentsTestHelper); }; class WebsiteSettingsPopupViewTest : public testing::Test { public: WebsiteSettingsPopupViewTest() {} // testing::Test: void SetUp() override { views::Widget::InitParams parent_params; parent_params.context = views_helper_.GetContext(); parent_window_ = new views::Widget(); parent_window_->Init(parent_params); content::WebContents* web_contents = web_contents_helper_.web_contents(); TabSpecificContentSettings::CreateForWebContents(web_contents); api_.reset(new test::WebsiteSettingsPopupViewTestApi( parent_window_->GetNativeView(), web_contents_helper_.profile(), web_contents)); } void TearDown() override { parent_window_->CloseNow(); } protected: device::MockDeviceClient device_client_; ScopedWebContentsTestHelper web_contents_helper_; views::ScopedViewsTestHelper views_helper_; views::Widget* parent_window_ = nullptr; // Weak. Owned by the NativeWidget. std::unique_ptr<test::WebsiteSettingsPopupViewTestApi> api_; private: DISALLOW_COPY_AND_ASSIGN(WebsiteSettingsPopupViewTest); }; } // namespace // TODO(ellyjones): re-enable this test for OSX. // This test exercises PermissionSelectorRow in a way that it is not used in // practice. In practice, every setting in PermissionSelectorRow starts off // "set", so there is always one option checked in the resulting MenuModel. This // test creates settings that are left at their defaults, leading to zero // checked options, and checks that the text on the MenuButtons is right. Since // the Comboboxes the MacViews version of this dialog uses don't have separate // text, this test doesn't work. #if defined(OS_MACOSX) #define MAYBE_SetPermissionInfo DISABLED_SetPermissionInfo #else #define MAYBE_SetPermissionInfo SetPermissionInfo #endif // Test UI construction and reconstruction via // WebsiteSettingsPopupView::SetPermissionInfo(). TEST_F(WebsiteSettingsPopupViewTest, MAYBE_SetPermissionInfo) { PermissionInfoList list(1); list.back().type = CONTENT_SETTINGS_TYPE_GEOLOCATION; list.back().source = content_settings::SETTING_SOURCE_USER; list.back().is_incognito = false; list.back().setting = CONTENT_SETTING_DEFAULT; const int kExpectedChildren = ExclusiveAccessManager::IsSimplifiedFullscreenUIEnabled() ? 11 : 13; EXPECT_EQ(kExpectedChildren, api_->permissions_view()->child_count()); list.back().setting = CONTENT_SETTING_ALLOW; api_->SetPermissionInfo(list); EXPECT_EQ(kExpectedChildren, api_->permissions_view()->child_count()); PermissionSelectorRow* selector = api_->GetPermissionSelectorAt(0); EXPECT_EQ(3, selector->child_count()); // Verify labels match the settings on the PermissionInfoList. const int kLabelIndex = 1; EXPECT_EQ(views::Label::kViewClassName, selector->child_at(kLabelIndex)->GetClassName()); views::Label* label = static_cast<views::Label*>(selector->child_at(kLabelIndex)); EXPECT_EQ(base::ASCIIToUTF16("Location"), label->text()); EXPECT_EQ(base::ASCIIToUTF16("Allow"), api_->GetPermissionButtonTextAt(0)); // Verify calling SetPermisisonInfo() directly updates the UI. list.back().setting = CONTENT_SETTING_BLOCK; api_->SetPermissionInfo(list); EXPECT_EQ(base::ASCIIToUTF16("Block"), api_->GetPermissionButtonTextAt(0)); // Simulate a user selection via the UI. Note this will also cover logic in // WebsiteSettings to update the pref. list.back().setting = CONTENT_SETTING_ALLOW; api_->GetPermissionSelectorAt(0)->PermissionChanged(list.back()); EXPECT_EQ(kExpectedChildren, api_->permissions_view()->child_count()); EXPECT_EQ(base::ASCIIToUTF16("Allow"), api_->GetPermissionButtonTextAt(0)); // Setting to the default via the UI should keep the button around. list.back().setting = CONTENT_SETTING_ASK; api_->GetPermissionSelectorAt(0)->PermissionChanged(list.back()); EXPECT_EQ(kExpectedChildren, api_->permissions_view()->child_count()); EXPECT_EQ(base::ASCIIToUTF16("Ask"), api_->GetPermissionButtonTextAt(0)); // However, since the setting is now default, recreating the dialog with those // settings should omit the permission from the UI. api_->SetPermissionInfo(list); EXPECT_EQ(kExpectedChildren, api_->permissions_view()->child_count()); } // Test UI construction and reconstruction with USB devices. TEST_F(WebsiteSettingsPopupViewTest, SetPermissionInfoWithUsbDevice) { const int kExpectedChildren = ExclusiveAccessManager::IsSimplifiedFullscreenUIEnabled() ? 11 : 13; EXPECT_EQ(kExpectedChildren, api_->permissions_view()->child_count()); const GURL origin = GURL(kUrl).GetOrigin(); scoped_refptr<device::UsbDevice> device = new device::MockUsbDevice(0, 0, "Google", "Gizmo", "1234567890"); device_client_.usb_service()->AddDevice(device); UsbChooserContext* store = UsbChooserContextFactory::GetForProfile(web_contents_helper_.profile()); store->GrantDevicePermission(origin, origin, device->guid()); PermissionInfoList list; api_->SetPermissionInfo(list); EXPECT_EQ(kExpectedChildren + 1, api_->permissions_view()->child_count()); ChosenObjectRow* object_view = static_cast<ChosenObjectRow*>( api_->permissions_view()->child_at(kExpectedChildren)); EXPECT_EQ(3, object_view->child_count()); const int kLabelIndex = 1; views::Label* label = static_cast<views::Label*>(object_view->child_at(kLabelIndex)); EXPECT_EQ(base::ASCIIToUTF16("Gizmo"), label->text()); const int kButtonIndex = 2; views::Button* button = static_cast<views::Button*>(object_view->child_at(kButtonIndex)); const ui::MouseEvent event(ui::ET_MOUSE_PRESSED, gfx::Point(), gfx::Point(), ui::EventTimeForNow(), 0, 0); views::ButtonListener* button_listener = static_cast<views::ButtonListener*>(object_view); button_listener->ButtonPressed(button, event); api_->SetPermissionInfo(list); EXPECT_EQ(kExpectedChildren, api_->permissions_view()->child_count()); EXPECT_FALSE(store->HasDevicePermission(origin, origin, device)); }
3,558
435
<filename>warehouse/query-core/src/main/java/datawave/query/jexl/visitors/GeoFeatureVisitor.java package datawave.query.jexl.visitors; import datawave.query.jexl.functions.GeoFunctionsDescriptor; import datawave.query.jexl.functions.GeoWaveFunctionsDescriptor; import datawave.query.jexl.functions.JexlFunctionArgumentDescriptorFactory; import datawave.query.jexl.functions.arguments.JexlArgumentDescriptor; import datawave.webservice.common.logging.ThreadConfigurableLogger; import datawave.webservice.query.map.QueryGeometry; import org.apache.commons.jexl2.parser.ASTFunctionNode; import org.apache.commons.jexl2.parser.JexlNode; import org.apache.log4j.Logger; import org.geotools.geojson.geom.GeometryJSON; import org.locationtech.jts.io.WKTReader; import java.util.LinkedHashSet; import java.util.Set; /** * This visitor will traverse the query tree, and extract both the geo function and associated query geometry (as GeoJSON). */ public class GeoFeatureVisitor extends BaseVisitor { private static final Logger log = ThreadConfigurableLogger.getLogger(GeoFeatureVisitor.class); private Set<QueryGeometry> geoFeatures; private GeometryJSON geoJson = new GeometryJSON(); private WKTReader wktReader = new WKTReader(); private boolean isLuceneQuery; private GeoFeatureVisitor(Set<QueryGeometry> geoFeatures) { this(geoFeatures, false); } private GeoFeatureVisitor(Set<QueryGeometry> geoFeatures, boolean isLuceneQuery) { this.geoFeatures = geoFeatures; this.isLuceneQuery = isLuceneQuery; } public static Set<QueryGeometry> getGeoFeatures(JexlNode node) { return getGeoFeatures(node, false); } public static Set<QueryGeometry> getGeoFeatures(JexlNode node, boolean isLuceneQuery) { Set<QueryGeometry> geoFeatures = new LinkedHashSet<>(); node.jjtAccept(new GeoFeatureVisitor(geoFeatures, isLuceneQuery), null); return geoFeatures; } @Override public Object visit(ASTFunctionNode node, Object data) { JexlArgumentDescriptor desc = JexlFunctionArgumentDescriptorFactory.F.getArgumentDescriptor(node); try { if (desc instanceof GeoFunctionsDescriptor.GeoJexlArgumentDescriptor) { JexlNode geowaveNode = ((GeoFunctionsDescriptor.GeoJexlArgumentDescriptor) desc).toGeoWaveFunction(desc.fields(null, null)); geowaveNode.jjtAccept(this, JexlStringBuildingVisitor.buildQuery(node)); } else if (desc instanceof GeoWaveFunctionsDescriptor.GeoWaveJexlArgumentDescriptor) { String function = null; if (data != null) function = (String) data; else function = JexlStringBuildingVisitor.buildQuery(node); // reformat as a lucene function if (isLuceneQuery) { int paramsIdx = function.indexOf('('); String op = function.substring(0, function.indexOf('(')); String params = function.substring(paramsIdx); if (op.startsWith("geowave:")) { function = op.replace("geowave:", "#").toUpperCase() + params; } else if (op.startsWith("geo:")) { String opParam = op.substring("geo:within_".length()); function = "#GEO(" + opParam + ", " + params.substring(1); } } String geometry = geoJson.toString(wktReader.read(((GeoWaveFunctionsDescriptor.GeoWaveJexlArgumentDescriptor) desc).getWkt())); geoFeatures.add(new QueryGeometry(function, geometry)); } } catch (Exception e) { log.error("Unable to extract geo feature from function", e); } return node; } }
1,756
397
<filename>CVE-2019-1253/AppXSvcEoP/CommonUtils.h #pragma once #pragma once #include <Windows.h> #include <string> typedef void(__stdcall *console_output)(const char*); void DebugSetOutput(console_output pout); void DebugPrintf(const char* lpFormat, ...); std::wstring GetErrorMessage(DWORD dwError); std::wstring GetErrorMessage(); BOOL SetPrivilege(HANDLE hToken, LPCTSTR lpszPrivilege, BOOL bEnablePrivilege); DWORD NtStatusToDosError(NTSTATUS status); bool CreateNativeHardlink(LPCWSTR linkname, LPCWSTR targetname); HANDLE OpenFileNative(LPCWSTR path, HANDLE root, ACCESS_MASK desired_access, ULONG share_access, ULONG open_options); std::wstring BuildFullPath(const std::wstring& path, bool native);
246
886
package com.flowci.core.secret.domain; import com.flowci.domain.SecretField; import lombok.Getter; import lombok.Setter; import org.springframework.data.mongodb.core.mapping.Document; /** * @author yang */ @Getter @Setter @Document(collection = "secret") public class AndroidSign extends Secret { private String keyStoreFileName; private SecretField keyStorePassword; private String keyAlias; private SecretField keyPassword; public AndroidSign() { setCategory(Category.ANDROID_SIGN); } }
176
3,212
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nifi.registry.security.authorization.file; import org.apache.nifi.registry.security.authorization.file.tenants.generated.Groups; import org.apache.nifi.registry.security.authorization.file.tenants.generated.Tenants; import org.apache.nifi.registry.security.authorization.file.tenants.generated.Users; import org.apache.nifi.registry.security.authorization.Group; import org.apache.nifi.registry.security.authorization.User; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; /** * A holder to provide atomic access to user group data structures. */ public class UserGroupHolder { private final Tenants tenants; private final Set<User> allUsers; private final Map<String,User> usersById; private final Map<String,User> usersByIdentity; private final Set<Group> allGroups; private final Map<String,Group> groupsById; private final Map<String, Set<Group>> groupsByUserIdentity; /** * Creates a new holder and populates all convenience data structures. * * @param tenants the current tenants instance */ public UserGroupHolder(final Tenants tenants) { this.tenants = tenants; // load all users final Users users = tenants.getUsers(); final Set<User> allUsers = Collections.unmodifiableSet(createUsers(users)); // load all groups final Groups groups = tenants.getGroups(); final Set<Group> allGroups = Collections.unmodifiableSet(createGroups(groups, users)); // create a convenience map to retrieve a user by id final Map<String, User> userByIdMap = Collections.unmodifiableMap(createUserByIdMap(allUsers)); // create a convenience map to retrieve a user by identity final Map<String, User> userByIdentityMap = Collections.unmodifiableMap(createUserByIdentityMap(allUsers)); // create a convenience map to retrieve a group by id final Map<String, Group> groupByIdMap = Collections.unmodifiableMap(createGroupByIdMap(allGroups)); // create a convenience map to retrieve the groups for a user identity final Map<String, Set<Group>> groupsByUserIdentityMap = Collections.unmodifiableMap(createGroupsByUserIdentityMap(allGroups, allUsers)); // set all the holders this.allUsers = allUsers; this.allGroups = allGroups; this.usersById = userByIdMap; this.usersByIdentity = userByIdentityMap; this.groupsById = groupByIdMap; this.groupsByUserIdentity = groupsByUserIdentityMap; } /** * Creates a set of Users from the JAXB Users. * * @param users the JAXB Users * @return a set of API Users matching the provided JAXB Users */ private Set<User> createUsers(Users users) { Set<User> allUsers = new HashSet<>(); if (users == null || users.getUser() == null) { return allUsers; } for (org.apache.nifi.registry.security.authorization.file.tenants.generated.User user : users.getUser()) { final User.Builder builder = new User.Builder() .identity(user.getIdentity()) .identifier(user.getIdentifier()); allUsers.add(builder.build()); } return allUsers; } /** * Creates a set of Groups from the JAXB Groups. * * @param groups the JAXB Groups * @return a set of API Groups matching the provided JAXB Groups */ private Set<Group> createGroups(Groups groups, Users users) { Set<Group> allGroups = new HashSet<>(); if (groups == null || groups.getGroup() == null) { return allGroups; } for (org.apache.nifi.registry.security.authorization.file.tenants.generated.Group group : groups.getGroup()) { final Group.Builder builder = new Group.Builder() .identifier(group.getIdentifier()) .name(group.getName()); for (org.apache.nifi.registry.security.authorization.file.tenants.generated.Group.User groupUser : group.getUser()) { builder.addUser(groupUser.getIdentifier()); } allGroups.add(builder.build()); } return allGroups; } /** * Creates a Map from user identifier to User. * * @param users the set of all users * @return the Map from user identifier to User */ private Map<String,User> createUserByIdMap(final Set<User> users) { Map<String,User> usersMap = new HashMap<>(); for (User user : users) { usersMap.put(user.getIdentifier(), user); } return usersMap; } /** * Creates a Map from user identity to User. * * @param users the set of all users * @return the Map from user identity to User */ private Map<String,User> createUserByIdentityMap(final Set<User> users) { Map<String,User> usersMap = new HashMap<>(); for (User user : users) { usersMap.put(user.getIdentity(), user); } return usersMap; } /** * Creates a Map from group identifier to Group. * * @param groups the set of all groups * @return the Map from group identifier to Group */ private Map<String,Group> createGroupByIdMap(final Set<Group> groups) { Map<String,Group> groupsMap = new HashMap<>(); for (Group group : groups) { groupsMap.put(group.getIdentifier(), group); } return groupsMap; } /** * Creates a Map from user identity to the set of Groups for that identity. * * @param groups all groups * @param users all users * @return a Map from User identity to the set of Groups for that identity */ private Map<String, Set<Group>> createGroupsByUserIdentityMap(final Set<Group> groups, final Set<User> users) { Map<String, Set<Group>> groupsByUserIdentity = new HashMap<>(); for (User user : users) { Set<Group> userGroups = new HashSet<>(); for (Group group : groups) { for (String groupUser : group.getUsers()) { if (groupUser.equals(user.getIdentifier())) { userGroups.add(group); } } } groupsByUserIdentity.put(user.getIdentity(), userGroups); } return groupsByUserIdentity; } public Tenants getTenants() { return tenants; } public Set<User> getAllUsers() { return allUsers; } public Map<String, User> getUsersById() { return usersById; } public Map<String, User> getUsersByIdentity() { return usersByIdentity; } public Set<Group> getAllGroups() { return allGroups; } public Map<String, Group> getGroupsById() { return groupsById; } public User getUser(String identity) { if (identity == null) { throw new IllegalArgumentException("Identity cannot be null"); } return usersByIdentity.get(identity); } public Set<Group> getGroups(String userIdentity) { if (userIdentity == null) { throw new IllegalArgumentException("User Identity cannot be null"); } return groupsByUserIdentity.get(userIdentity); } }
3,168
372
<filename>clients/google-api-services-drive/v3/1.31.0/com/google/api/services/drive/model/ContentRestriction.java /* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.drive.model; /** * A restriction for accessing the content of the file. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Drive API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class ContentRestriction extends com.google.api.client.json.GenericJson { /** * Whether the content of the file is read-only. If a file is read-only, a new revision of the * file may not be added, comments may not be added or modified, and the title of the file may not * be modified. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Boolean readOnly; /** * Reason for why the content of the file is restricted. This is only mutable on requests that * also set readOnly=true. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String reason; /** * The user who set the content restriction. Only populated if readOnly is true. * The value may be {@code null}. */ @com.google.api.client.util.Key private User restrictingUser; /** * The time at which the content restriction was set (formatted RFC 3339 timestamp). Only * populated if readOnly is true. * The value may be {@code null}. */ @com.google.api.client.util.Key private com.google.api.client.util.DateTime restrictionTime; /** * The type of the content restriction. Currently the only possible value is * globalContentRestriction. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String type; /** * Whether the content of the file is read-only. If a file is read-only, a new revision of the * file may not be added, comments may not be added or modified, and the title of the file may not * be modified. * @return value or {@code null} for none */ public java.lang.Boolean getReadOnly() { return readOnly; } /** * Whether the content of the file is read-only. If a file is read-only, a new revision of the * file may not be added, comments may not be added or modified, and the title of the file may not * be modified. * @param readOnly readOnly or {@code null} for none */ public ContentRestriction setReadOnly(java.lang.Boolean readOnly) { this.readOnly = readOnly; return this; } /** * Reason for why the content of the file is restricted. This is only mutable on requests that * also set readOnly=true. * @return value or {@code null} for none */ public java.lang.String getReason() { return reason; } /** * Reason for why the content of the file is restricted. This is only mutable on requests that * also set readOnly=true. * @param reason reason or {@code null} for none */ public ContentRestriction setReason(java.lang.String reason) { this.reason = reason; return this; } /** * The user who set the content restriction. Only populated if readOnly is true. * @return value or {@code null} for none */ public User getRestrictingUser() { return restrictingUser; } /** * The user who set the content restriction. Only populated if readOnly is true. * @param restrictingUser restrictingUser or {@code null} for none */ public ContentRestriction setRestrictingUser(User restrictingUser) { this.restrictingUser = restrictingUser; return this; } /** * The time at which the content restriction was set (formatted RFC 3339 timestamp). Only * populated if readOnly is true. * @return value or {@code null} for none */ public com.google.api.client.util.DateTime getRestrictionTime() { return restrictionTime; } /** * The time at which the content restriction was set (formatted RFC 3339 timestamp). Only * populated if readOnly is true. * @param restrictionTime restrictionTime or {@code null} for none */ public ContentRestriction setRestrictionTime(com.google.api.client.util.DateTime restrictionTime) { this.restrictionTime = restrictionTime; return this; } /** * The type of the content restriction. Currently the only possible value is * globalContentRestriction. * @return value or {@code null} for none */ public java.lang.String getType() { return type; } /** * The type of the content restriction. Currently the only possible value is * globalContentRestriction. * @param type type or {@code null} for none */ public ContentRestriction setType(java.lang.String type) { this.type = type; return this; } @Override public ContentRestriction set(String fieldName, Object value) { return (ContentRestriction) super.set(fieldName, value); } @Override public ContentRestriction clone() { return (ContentRestriction) super.clone(); } }
1,788
30,785
package jadx.tests.integration.others; import java.util.Arrays; import org.junit.jupiter.api.Test; import jadx.api.data.ICodeComment; import jadx.api.data.IJavaNodeRef.RefType; import jadx.api.data.impl.JadxCodeComment; import jadx.api.data.impl.JadxCodeData; import jadx.api.data.impl.JadxNodeRef; import jadx.core.dex.nodes.ClassNode; import jadx.tests.api.IntegrationTest; import static jadx.tests.api.utils.assertj.JadxAssertions.assertThat; public class TestCodeCommentsOverride extends IntegrationTest { public static class TestCls { public interface I { void mth(); } public static class A implements I { @Override public void mth() { System.out.println("mth"); } } } @Test public void test() { String baseClsId = TestCls.class.getName(); JadxNodeRef iMthRef = new JadxNodeRef(RefType.METHOD, baseClsId + ".I", "mth()V"); ICodeComment iMthComment = new JadxCodeComment(iMthRef, "interface mth comment"); JadxNodeRef mthRef = new JadxNodeRef(RefType.METHOD, baseClsId + ".A", "mth()V"); ICodeComment mthComment = new JadxCodeComment(mthRef, "mth comment"); JadxCodeData codeData = new JadxCodeData(); codeData.setComments(Arrays.asList(iMthComment, mthComment)); getArgs().setCodeData(codeData); ClassNode cls = getClassNode(TestCls.class); assertThat(cls) .decompile() .checkCodeOffsets() .code() .containsOne("@Override") .containsOne("// " + iMthComment.getComment()) .containsOne("// " + mthComment.getComment()); assertThat(cls) .reloadCode(this) .containsOne("@Override") .containsOne("// " + iMthComment.getComment()) .containsOne("// " + mthComment.getComment()); } }
682
5,238
<filename>src/extra/layouts/lv_layouts.h<gh_stars>1000+ /** * @file lv_layouts.h * */ #ifndef LV_LAYOUTS_H #define LV_LAYOUTS_H #ifdef __cplusplus extern "C" { #endif /********************* * INCLUDES *********************/ #include "flex/lv_flex.h" #include "grid/lv_grid.h" /********************* * DEFINES *********************/ /********************** * TYPEDEFS **********************/ /********************** * GLOBAL PROTOTYPES **********************/ /********************** * MACROS **********************/ #if LV_USE_LOG && LV_LOG_TRACE_LAYOUT # define LV_TRACE_LAYOUT(...) LV_LOG_TRACE( __VA_ARGS__) #else # define LV_TRACE_LAYOUT(...) #endif #ifdef __cplusplus } /*extern "C"*/ #endif #endif /*LV_LAYOUTS_H*/
317
1,374
<gh_stars>1000+ package def.dom; public class MessagePort extends EventTarget { public java.util.function.Function<MessageEvent,Object> onmessage; native public void close(); native public void postMessage(Object message, Object ports); native public void start(); native public void addEventListener(jsweet.util.StringTypes.message type, java.util.function.Function<MessageEvent,Object> listener, Boolean useCapture); native public void addEventListener(String type, EventListener listener, Boolean useCapture); public static MessagePort prototype; public MessagePort(){} native public void postMessage(Object message); native public void postMessage(); native public void addEventListener(jsweet.util.StringTypes.message type, java.util.function.Function<MessageEvent,Object> listener); native public void addEventListener(String type, EventListener listener); native public void addEventListener(String type, EventListenerObject listener, Boolean useCapture); native public void addEventListener(String type, EventListenerObject listener); }
282
365
package org.superboot.dao.jpa; import org.springframework.cache.annotation.CacheConfig; import org.springframework.cache.annotation.Cacheable; import org.springframework.stereotype.Repository; import org.superboot.base.BaseJpaDAO; import org.superboot.entity.jpa.SuperbootMenuPermissions; /** * <b> 菜单授权DAO类 </b> * <p> * 功能描述: * </p> */ @Repository @CacheConfig(cacheNames = "menupermissions") public interface MenuPermissionsDAO extends BaseJpaDAO<SuperbootMenuPermissions> { /** * 根据菜单主键、权限主键获取菜单权限信息 * * @param pkMenu 菜单主键 * @param pkPermissions 权限主键 * @param dr 删除标识 * @return */ @Cacheable(key = "#p0+#p1+#p2") SuperbootMenuPermissions findByPkMenuAndPkPermissionsAndDr(long pkMenu, long pkPermissions, int dr); }
400
409
#pragma once #include "TypeResolution.h" #include "catalog.h" #define METADATA_FILE_EXTENSION L"winmd" #define METADATA_FILE_PATH_FORMAT L"%s%s." METADATA_FILE_EXTENSION #define METADATA_FILE_SEARCH_FORMAT L"%s%s*." METADATA_FILE_EXTENSION namespace UndockedRegFreeWinRT { static const UINT32 g_uiMaxTypeName = 512; static wil::unique_process_heap_string g_cachedProcessExeDir; BOOL CALLBACK GetProcessExeDirInitOnceCallback( _Inout_ PINIT_ONCE, _Inout_opt_ PVOID, _Out_opt_ PVOID*) { wil::unique_process_heap_string localExePath; HRESULT hr = wil::GetModuleFileNameW(nullptr, localExePath); if (FAILED_LOG(hr)) { SetLastError(hr); return FALSE; } // Modify the retrieved string to truncate the actual exe name and leave the containing directory path. This API // expects a buffer size including the terminating null, so add 1 to the string length. hr = PathCchRemoveFileSpec(localExePath.get(), wcslen(localExePath.get()) + 1); if (FAILED_LOG(hr)) { SetLastError(hr); return FALSE; } g_cachedProcessExeDir = std::move(localExePath); return TRUE; } // Returned string is cached globally, and should not be freed by the caller. HRESULT GetProcessExeDir(PCWSTR* path) { *path = nullptr; static INIT_ONCE ProcessExeDirInitOnce = INIT_ONCE_STATIC_INIT; RETURN_IF_WIN32_BOOL_FALSE(InitOnceExecuteOnce(&ProcessExeDirInitOnce, GetProcessExeDirInitOnceCallback, nullptr, nullptr)); // The cache has been successfully populated by the InitOnce, so we can just use it directly. *path = g_cachedProcessExeDir.get(); return S_OK; } HRESULT FindTypeInMetaDataFile( _In_ IMetaDataDispenserEx* pMetaDataDispenser, _In_ PCWSTR pszFullName, _In_ PCWSTR pszCandidateFilePath, _In_ TYPE_RESOLUTION_OPTIONS resolutionOptions, _COM_Outptr_opt_result_maybenull_ IMetaDataImport2** ppMetaDataImport, _Out_opt_ mdTypeDef* pmdTypeDef) { HRESULT hr = S_OK; Microsoft::WRL::ComPtr<IMetaDataImport2> spMetaDataImport; MetaDataImportersLRUCache* pMetaDataImporterCache = MetaDataImportersLRUCache::GetMetaDataImportersLRUCacheInstance(); if (pMetaDataImporterCache != nullptr) { hr = pMetaDataImporterCache->GetMetaDataImporter( pMetaDataDispenser, pszCandidateFilePath, &spMetaDataImport); } else { hr = E_OUTOFMEMORY; } if (SUCCEEDED(hr)) { const size_t cFullName = wcslen(pszFullName); wchar_t pszRetrievedName[g_uiMaxTypeName]; HCORENUM hEnum = nullptr; mdTypeDef rgTypeDefs[32]; ULONG cTypeDefs; DWORD dwTypeDefProps; hr = RO_E_METADATA_NAME_NOT_FOUND; if (TRO_RESOLVE_TYPE & resolutionOptions) { hr = spMetaDataImport->FindTypeDefByName(pszFullName, mdTokenNil, &rgTypeDefs[0]); if (SUCCEEDED(hr)) { // Check to confirm that the type we just found is a // winrt type. If it is, we're good, otherwise we // want to fail with RO_E_INVALID_METADATA_FILE. hr = spMetaDataImport->GetTypeDefProps(rgTypeDefs[0], nullptr, 0, nullptr, &dwTypeDefProps, nullptr); if (SUCCEEDED(hr)) { // If we found the type but it's not a winrt type, // it's an error. // // If the type is public, than the metadata file // is corrupt (all public types in a winrt file // must be tdWindowsRuntime). If the type is // private, then we just want to report that the // type wasn't found. if (!IsTdWindowsRuntime(dwTypeDefProps)) { if (IsTdPublic(dwTypeDefProps)) { hr = RO_E_INVALID_METADATA_FILE; } else { hr = RO_E_METADATA_NAME_NOT_FOUND; } } } else { hr = RO_E_INVALID_METADATA_FILE; } if (SUCCEEDED(hr)) { if (pmdTypeDef != nullptr) { *pmdTypeDef = rgTypeDefs[0]; } if (ppMetaDataImport != nullptr) { *ppMetaDataImport = spMetaDataImport.Detach(); } } } else if (hr == CLDB_E_RECORD_NOTFOUND) { hr = RO_E_METADATA_NAME_NOT_FOUND; } } if ((hr == RO_E_METADATA_NAME_NOT_FOUND) && (TRO_RESOLVE_NAMESPACE & resolutionOptions)) { // Check whether the name is a namespace rather than a type. do { hr = spMetaDataImport->EnumTypeDefs( &hEnum, rgTypeDefs, ARRAYSIZE(rgTypeDefs), &cTypeDefs); if (hr == S_OK) { for (UINT32 iTokenIndex = 0; iTokenIndex < cTypeDefs; ++iTokenIndex) { hr = spMetaDataImport->GetTypeDefProps( rgTypeDefs[iTokenIndex], pszRetrievedName, ARRAYSIZE(pszRetrievedName), nullptr, &dwTypeDefProps, nullptr); if (FAILED(hr)) { break; } hr = RO_E_METADATA_NAME_NOT_FOUND; // Only consider windows runtime types when // trying to determine if the name is a // namespace. if (IsTdWindowsRuntime(dwTypeDefProps) && (wcslen(pszRetrievedName) > cFullName)) { if ((wcsncmp(pszRetrievedName, pszFullName, cFullName) == 0) && (pszRetrievedName[cFullName] == L'.')) { hr = RO_E_METADATA_NAME_IS_NAMESPACE; break; } } } } } while (hr == RO_E_METADATA_NAME_NOT_FOUND); // There were no more tokens to enumerate, but the type was still not found. if (hr == S_FALSE) { hr = RO_E_METADATA_NAME_NOT_FOUND; } if (hEnum != nullptr) { spMetaDataImport->CloseEnum(hEnum); hEnum = nullptr; } } } return hr; } HRESULT FindTypeInDirectory( _In_ IMetaDataDispenserEx* pMetaDataDispenser, _In_ PCWSTR pszFullName, _In_ PCWSTR pszDirectoryPath, _Out_opt_ HSTRING* phstrMetaDataFilePath, _COM_Outptr_opt_result_maybenull_ IMetaDataImport2** ppMetaDataImport, _Out_opt_ mdTypeDef* pmdTypeDef) { HRESULT hr; wchar_t szCandidateFilePath[MAX_PATH + 1] = { 0 }; wchar_t szCandidateFileName[MAX_PATH + 1] = { 0 }; PWSTR pszLastDot; hr = StringCchCopy(szCandidateFileName, ARRAYSIZE(szCandidateFileName), pszFullName); if (SUCCEEDED(hr)) { // To resolve type SomeNamespace.B.C, first check if SomeNamespace.B.C is a type or // a namespace in the metadata files in the directory in this order: // 1. SomeNamespace.B.C.WinMD // 2. SomeNamespace.B.WinMD // 3. SomeNamespace.WinMD do { pszLastDot = nullptr; hr = StringCchPrintfEx( szCandidateFilePath, ARRAYSIZE(szCandidateFilePath), nullptr, nullptr, 0, METADATA_FILE_PATH_FORMAT, pszDirectoryPath, szCandidateFileName); if (SUCCEEDED(hr)) { hr = FindTypeInMetaDataFile( pMetaDataDispenser, pszFullName, szCandidateFilePath, TRO_RESOLVE_TYPE_AND_NAMESPACE, ppMetaDataImport, pmdTypeDef); if (SUCCEEDED(hr)) { if (phstrMetaDataFilePath != nullptr) { hr = WindowsCreateString( szCandidateFilePath, static_cast<UINT32>(wcslen(szCandidateFilePath)), phstrMetaDataFilePath); } break; } } hr = RO_E_METADATA_NAME_NOT_FOUND; pszLastDot = wcsrchr(szCandidateFileName, '.'); if (pszLastDot != nullptr) { *pszLastDot = '\0'; } } while (pszLastDot != nullptr); // If name was not found when searching in the "upward direction", then // the name might be a namespace name in a down-level file. if (hr == RO_E_METADATA_NAME_NOT_FOUND) { wchar_t szFilePathSearchTemplate[MAX_PATH + 1] = { 0 }; hr = StringCchPrintfEx( szFilePathSearchTemplate, ARRAYSIZE(szFilePathSearchTemplate), nullptr, nullptr, 0, METADATA_FILE_SEARCH_FORMAT, pszDirectoryPath, pszFullName); if (SUCCEEDED(hr)) { WIN32_FIND_DATA fd; HANDLE hFindFile; // Search in all files in the directory whose name begin with the input string. hFindFile = FindFirstFile(szFilePathSearchTemplate, &fd); if (hFindFile != INVALID_HANDLE_VALUE) { PWSTR pszFilePathPart; size_t cchRemaining; do { if (fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { continue; } pszFilePathPart = szCandidateFilePath; cchRemaining = ARRAYSIZE(szCandidateFilePath); hr = StringCchCopyEx( pszFilePathPart, cchRemaining, pszDirectoryPath, &pszFilePathPart, &cchRemaining, 0); if (SUCCEEDED(hr)) { hr = StringCchCopyEx( pszFilePathPart, cchRemaining, fd.cFileName, &pszFilePathPart, &cchRemaining, 0); } if (SUCCEEDED(hr)) { hr = FindTypeInMetaDataFile( pMetaDataDispenser, pszFullName, szCandidateFilePath, TRO_RESOLVE_NAMESPACE, ppMetaDataImport, pmdTypeDef); if (hr == S_OK) { hr = E_UNEXPECTED; break; } if (hr == RO_E_METADATA_NAME_IS_NAMESPACE) { break; } } } while (FindNextFile(hFindFile, &fd)); FindClose(hFindFile); } else { hr = RO_E_METADATA_NAME_NOT_FOUND; } } } } if (hr == STRSAFE_E_INSUFFICIENT_BUFFER) { hr = RO_E_METADATA_NAME_NOT_FOUND; } return hr; } HRESULT FindTypeInDirectoryWithNormalization( _In_ IMetaDataDispenserEx* pMetaDataDispenser, _In_ PCWSTR pszFullName, _In_ PCWSTR pszDirectoryPath, _Out_opt_ HSTRING* phstrMetaDataFilePath, _COM_Outptr_opt_result_maybenull_ IMetaDataImport2** ppMetaDataImport, _Out_opt_ mdTypeDef* pmdTypeDef) { wchar_t pszPackagePath[MAX_PATH + 1]; PWSTR pszPackagePathWritePtr = pszPackagePath; size_t cchPackagePathRemaining = ARRAYSIZE(pszPackagePath); HRESULT hr = StringCchCopyEx( pszPackagePath, ARRAYSIZE(pszPackagePath), pszDirectoryPath, &pszPackagePathWritePtr, &cchPackagePathRemaining, 0); if (SUCCEEDED(hr)) { // If the path is not terminated by a backslash, then append one. if (pszPackagePath[ARRAYSIZE(pszPackagePath) - cchPackagePathRemaining - 1] != L'\\') { hr = StringCchCopyEx( pszPackagePathWritePtr, cchPackagePathRemaining, L"\\", &pszPackagePathWritePtr, &cchPackagePathRemaining, 0); } } if (SUCCEEDED(hr)) { hr = FindTypeInDirectory( pMetaDataDispenser, pszFullName, pszPackagePath, phstrMetaDataFilePath, ppMetaDataImport, pmdTypeDef); } return hr; } HRESULT ResolveThirdPartyType( _In_ IMetaDataDispenserEx* pMetaDataDispenser, _In_ PCWSTR pszFullName, _Out_opt_ HSTRING* phstrMetaDataFilePath, _COM_Outptr_opt_result_maybenull_ IMetaDataImport2** ppMetaDataImport, _Out_opt_ mdTypeDef* pmdTypeDef) { HRESULT hr = S_OK; UINT32 dwPackagesCount = 0; UINT32 dwBufferLength = 0; const UINT32 filter = PACKAGE_FILTER_HEAD | PACKAGE_FILTER_DIRECT | PACKAGE_FILTER_IS_IN_RELATED_SET; hr = HRESULT_FROM_WIN32(GetCurrentPackageInfo(filter, &dwBufferLength, nullptr, &dwPackagesCount)); // Only find the type if the it is a unpacakged app. Packaged apps can have their exe on their package graph, // which will allow type resolution against adjacent WinMDs. if (hr == HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE)) { PCWSTR exeDir = nullptr; // Never freed; owned by process global. RETURN_IF_FAILED(GetProcessExeDir(&exeDir)); hr = FindTypeInDirectoryWithNormalization( pMetaDataDispenser, pszFullName, exeDir, phstrMetaDataFilePath, ppMetaDataImport, pmdTypeDef); if (hr == RO_E_METADATA_NAME_NOT_FOUND) { // For compatibility purposes, if we fail to find the type in the unpackaged location, we should return // HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE) instead of a "not found" error. This preserves the // behavior that existed before unpackaged type resolution was implemented. hr = HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE); } return hr; } else { return RO_E_METADATA_NAME_NOT_FOUND; } } // // MetaDataImportersLRUCache implementation // INIT_ONCE MetaDataImportersLRUCache::s_initOnce = INIT_ONCE_STATIC_INIT; MetaDataImportersLRUCache* MetaDataImportersLRUCache::s_pMetaDataImportersLRUCacheInstance = nullptr; MetaDataImportersLRUCache* MetaDataImportersLRUCache::GetMetaDataImportersLRUCacheInstance() { BOOL fInitializationSucceeded = InitOnceExecuteOnce( &s_initOnce, ConstructLRUCacheIfNecessary, nullptr, nullptr); UNREFERENCED_PARAMETER(fInitializationSucceeded); return s_pMetaDataImportersLRUCacheInstance; } // Called via InitOnceExecuteOnce. BOOL CALLBACK MetaDataImportersLRUCache::ConstructLRUCacheIfNecessary( PINIT_ONCE /*initOnce*/, PVOID /*parameter*/, PVOID* /*context*/) { HRESULT hr = S_OK; if (s_pMetaDataImportersLRUCacheInstance == nullptr) { s_pMetaDataImportersLRUCacheInstance = new MetaDataImportersLRUCache(); if (s_pMetaDataImportersLRUCacheInstance == nullptr) { hr = E_OUTOFMEMORY; } } return SUCCEEDED(hr); } HRESULT MetaDataImportersLRUCache::GetMetaDataImporter( _In_ IMetaDataDispenserEx* pMetaDataDispenser, _In_ PCWSTR pszCandidateFilePath, _Outptr_opt_ IMetaDataImport2** ppMetaDataImporter) { if (ppMetaDataImporter == nullptr) { return ERROR_BAD_ARGUMENTS; } HRESULT hr = S_OK; *ppMetaDataImporter = nullptr; EnterCriticalSection(&_csCacheLock); if (IsFilePathCached(pszCandidateFilePath)) { // Get metadata importer from cache. *ppMetaDataImporter = _metadataImportersMap[pszCandidateFilePath]; IMetaDataImport2* value = *ppMetaDataImporter; if (value != nullptr) { value->AddRef(); } } else { // Importer was not found in cache. hr = GetNewMetaDataImporter( pMetaDataDispenser, pszCandidateFilePath, ppMetaDataImporter); } LeaveCriticalSection(&_csCacheLock); return hr; } HRESULT MetaDataImportersLRUCache::GetNewMetaDataImporter( _In_ IMetaDataDispenserEx* pMetaDataDispenser, _In_ PCWSTR pszCandidateFilePath, _Outptr_opt_ IMetaDataImport2** ppMetaDataImporter) { if (ppMetaDataImporter == nullptr) { return ERROR_BAD_ARGUMENTS; } HRESULT hr; hr = pMetaDataDispenser->OpenScope( pszCandidateFilePath, ofReadOnly, IID_IMetaDataImport2, reinterpret_cast<IUnknown**>(ppMetaDataImporter)); if (SUCCEEDED(hr)) { _metadataImportersMap.emplace( pszCandidateFilePath, *ppMetaDataImporter); IMetaDataImport2* value = *ppMetaDataImporter; if (value != nullptr) { value->AddRef(); } } if (SUCCEEDED(hr)) { hr = AddNewFilePathToList(pszCandidateFilePath); } return hr; } HRESULT MetaDataImportersLRUCache::AddNewFilePathToList(PCWSTR pszFilePath) { HRESULT hr = RemoveLeastRecentlyUsedItemIfListIsFull(); if (SUCCEEDED(hr)) { // Make room for new element. for (int i = g_dwMetaDataImportersLRUCacheSize - 2; i >= 0; i--) { _arFilePaths[i + 1] = _arFilePaths[i]; } _arFilePaths[0] = AllocateAndCopyString(pszFilePath); if (_arFilePaths[0] == nullptr) { hr = E_OUTOFMEMORY; } } return hr; } bool MetaDataImportersLRUCache::IsFilePathCached(PCWSTR pszFilePath) { int filePathIndex = GetFilePathIndex(pszFilePath); if (filePathIndex != -1) { MoveElementToFrontOfList(filePathIndex); return true; } else { return false; } } int MetaDataImportersLRUCache::GetFilePathIndex(PCWSTR pszFilePath) { int filePathIndex = -1; for (int i = 0; (i < g_dwMetaDataImportersLRUCacheSize) && (_arFilePaths[i] != nullptr); i++) { if (wcscmp(pszFilePath, _arFilePaths[i]) == 0) { filePathIndex = i; break; } } return filePathIndex; } void MetaDataImportersLRUCache::MoveElementToFrontOfList(int elementIndex) { PWSTR pszFoundFilePath = _arFilePaths[elementIndex]; for (int i = elementIndex - 1; i >= 0; i--) { _arFilePaths[i + 1] = _arFilePaths[i]; } _arFilePaths[0] = pszFoundFilePath; } HRESULT MetaDataImportersLRUCache::RemoveLeastRecentlyUsedItemIfListIsFull() { HRESULT hr = S_OK; PWSTR pszLastFilePathInList = _arFilePaths[g_dwMetaDataImportersLRUCacheSize - 1]; if (pszLastFilePathInList != nullptr) { IMetaDataImport2* value = _metadataImportersMap[pszLastFilePathInList]; if (value != nullptr) { value->Release(); value = nullptr; } if (!_metadataImportersMap.erase(pszLastFilePathInList)) { hr = E_UNEXPECTED; } delete[] pszLastFilePathInList; _arFilePaths[g_dwMetaDataImportersLRUCacheSize - 1] = nullptr; } return hr; } }
13,662
852
<filename>CondFormats/RPCObjects/src/RPCFebConnector.cc<gh_stars>100-1000 #include "CondFormats/RPCObjects/interface/RPCFebConnector.h" #include <ostream> #include <sstream> RPCFebConnector::RPCFebConnector(RPCDetId const& rpc_det_id, unsigned int first_strip, int slope, std::uint16_t channels) : first_strip_(1), slope_(slope < 0 ? -1 : 1), channels_(channels), rpc_det_id_(rpc_det_id.rawId()) { setFirstStrip(first_strip); } std::string RPCFebConnector::getString() const { std::ostringstream oss; oss << rpc_det_id_ << '_' << (int)first_strip_ << (slope_ < 0 ? '-' : '+') << '_' << std::hex << std::showbase << channels_; return oss.str(); } std::ostream& operator<<(std::ostream& ostream, RPCFebConnector const& connector) { return (ostream << connector.getString()); }
302
640
/*! \file SystemCompatibility.cpp \brief The source file for access to system specific functionality. */ // Lucius includes #include <prnn/detail/util/system_compatibility.h> // System Includes #ifdef HAVE_CONFIG_H #include <configure.h> #endif #ifdef _WIN32 #include <windows.h> #elif __APPLE__ #include <sys/types.h> #include <sys/sysctl.h> #elif __GNUC__ #if HAVE_GLEW #include <GL/glx.h> #endif #include <unistd.h> #include <sys/sysinfo.h> #include <cxxabi.h> #else #error "Unknown system/compiler (WIN32, APPLE, and GNUC are supported)." #endif // Standard Library Includes #include <algorithm> #include <cstdlib> namespace prnn { namespace util { unsigned int getHardwareThreadCount() { #ifdef _WIN32 SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; #elif __APPLE__ int nm[2]; size_t len = 4; uint32_t count; nm[0] = CTL_HW; nm[1] = HW_AVAILCPU; sysctl(nm, 2, &count, &len, NULL, 0); if(count < 1) { nm[1] = HW_NCPU; sysctl(nm, 2, &count, &len, NULL, 0); if(count < 1) { count = 1; } } return std::max(1U, count); #elif __GNUC__ return sysconf(_SC_NPROCESSORS_ONLN); #endif } std::string getExecutablePath(const std::string& executableName) { return executableName; } long long unsigned int getFreePhysicalMemory() { #ifdef _WIN32 MEMORYSTATUSEX status; status.dwLength = sizeof(status); GlobalMemoryStatusEx(&status); return status.ullTotalPhys; #elif __APPLE__ #if 0 int mib[2]; uint64_t physical_memory; size_t length; // Get the Physical memory size mib[0] = CTL_HW; mib[1] = HW_USERMEM; // HW_MEMSIZE -> physical memory length = sizeof(uint64_t); sysctl(mib, 2, &physical_memory, &length, NULL, 0); return physical_memory; #else return (100ULL * (1ULL << 20)); #endif #elif __GNUC__ return get_avphys_pages() * getpagesize(); #endif } long long unsigned int getMaxClockSpeed() { // 3ghz, TODO return (1ULL << 30) * 3; } long long unsigned int getFMAsPerClockPerCore() { // TODO: check for SSE return 8; } long long unsigned int getMachineFlops() { // TODO: check for GPUs return getHardwareThreadCount() * getMaxClockSpeed() * getFMAsPerClockPerCore(); } bool isAnOpenGLContextAvailable() { #ifdef _WIN32 HGLRC handle = wglGetCurrentContext(); return (handle != NULL); #elif __APPLE__ // TODO fill this in return false; #elif __GNUC__ #if HAVE_GLEW GLXContext openglContext = glXGetCurrentContext(); return (openglContext != 0); #else return false; #endif #endif } bool isMangledCXXString(const std::string& string) { return string.find("_Z") == 0; } std::string demangleCXXString(const std::string& string) { #ifdef _WIN32 // TODO fill this in return string; #elif __APPLE__ // TODO fill this in return string; #elif __GNUC__ int status = 0; std::string name = abi::__cxa_demangle(string.c_str(), 0, 0, &status); if(status < 0) { name = string; } return name; #endif } std::string getEnvironmentVariable(const std::string& string) { if(!isEnvironmentVariableDefined(string)) { throw std::runtime_error( "Tried to access undefined environment variable '" + string + "'"); } return std::getenv(string.c_str()); } bool isEnvironmentVariableDefined(const std::string& name) { return std::getenv(name.c_str()) != nullptr; } } }
1,711
435
<gh_stars>100-1000 { "copyright_text": "Standard YouTube License", "description": "Today, services built on Python 3.6.3 are widely used at Facebook. But as recently as May of 2014 it was actually impossible at all to use Python 3 at Facebook. Come learn how we cut the Gordian Knot of dependencies and social aversion to the point where new services are now being written in Python 3 while older Python 2 projects are actively migrated to Python 3. All accomplished by a small group of individual contributors in their spare time. Learn to fight the good fight and upgrade your organization to Python 3 like we did at Facebook.", "duration": 1848, "language": "eng", "recorded": "2018-05-12", "related_urls": [ { "label": "Conference schedule", "url": "https://us.pycon.org/2018/schedule/talks/" }, { "label": "Conference slides (Github)", "url": "https://github.com/PyCon/2018-slides" }, { "label": "Conference slides (SpeakerDeck)", "url": "https://speakerdeck.com/pycon2018" }, { "label": "talk schedule", "url": "https://us.pycon.org/2018/schedule/presentation/117/" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/H4SS9yVWJYA/maxresdefault.jpg", "title": "Fighting the Good Fight: Python 3 in your organization", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=H4SS9yVWJYA" } ] }
524
14,499
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ typedef int T; int f(int* p) { int x = *p; p->T::~T(); return x; } template <typename T> int destroy(T* ptr) { ptr->T::~T(); return 0; } void test() { int* t = 0; destroy<int*>(&t); }
149
877
<gh_stars>100-1000 import org.checkerframework.common.value.qual.*; public class MinLenConstants { void test() { int @MinLen(3) [] arr = {1, 2, 3}; } }
65
348
<filename>docs/data/leg-t2/017/01703470.json {"nom":"Villars-les-Bois","circ":"3ème circonscription","dpt":"Charente-Maritime","inscrits":225,"abs":105,"votants":120,"blancs":8,"nuls":3,"exp":109,"res":[{"nuance":"REM","nom":"<NAME>","voix":63},{"nuance":"LR","nom":"M. <NAME>","voix":46}]}
119
1,270
<reponame>akhilbobby/pegasus # Copyright 2020 The PEGASUS Authors.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for parsers. Shape notations: U: unknown dimensions, L: length, B: batch_size, I: max_input_length, T: max_target_length. """ # # pylint: disable=invalid-name import tensorflow as tf def filter_by_length(tensor_list, min_len_list=None, max_len_list=None): """Filter tensors by their minimum or maximum length.""" if not min_len_list and not max_len_list: return tensor_list if min_len_list: if len(min_len_list) != len(tensor_list): raise ValueError("Min length list need to match size of tensor_list.") else: min_len_list = [None for _ in tensor_list] if max_len_list: if len(max_len_list) != len(tensor_list): raise ValueError("Max length list need to match size of tensor_list.") else: max_len_list = [None for _ in tensor_list] keep = tf.constant(True, dtype=tf.bool) for min_len, max_len, tensor in zip(min_len_list, max_len_list, tensor_list): if min_len and max_len and min_len >= max_len: raise ValueError("Invalid min max lengths.") if any([min_len, max_len]): tensor_len = tf.reduce_sum(tf.cast(tf.greater(tensor, 0), tf.int32)) if min_len: keep = tf.logical_and(keep, tf.greater(tensor_len, min_len)) if max_len: keep = tf.logical_and(keep, tf.less_equal(tensor_len, max_len)) filtered_tensor_list = [] for tensor in tensor_list: empty_tensor = tf.zeros( [0] * len(tensor.shape.as_list()), dtype=tensor.dtype) filtered_tensor_list.append( tf.cond(keep, lambda: tensor, lambda: empty_tensor)) # pylint: disable=cell-var-from-loop return filtered_tensor_list def add_length_bucket_id(inputs_BxI, targets_BxT, bucket_size, bucket_start_id, max_num_buckets): """Add bucket id of the target to start of the inputs.""" if bucket_size: non_pad_BxL = tf.cast(tf.greater(targets_BxT, 0), targets_BxT.dtype) length_Bx1 = tf.reduce_sum(non_pad_BxL, axis=-1, keep_dims=True) bucket_id_Bx1 = length_Bx1 // bucket_size + bucket_start_id # tail distributions are assigned to the last bucket. bucket_id_Bx1 = tf.minimum(bucket_id_Bx1, max_num_buckets) inputs_BxI = tf.concat([bucket_id_Bx1, inputs_BxI[:, :-1]], axis=-1) return inputs_BxI def add_task_id(inputs_1xI, task_id): task_id_1x1 = tf.cast(tf.reshape(task_id, [1, 1]), inputs_1xI.dtype) return tf.concat([task_id_1x1, inputs_1xI[:, :-1]], axis=1)
1,199
450
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*------------------------------------------------------------------------- * * cdbtimer.h * Functions to manipulate timers used in a backend. * * * $Id$ * *------------------------------------------------------------------------- */ #ifndef CDBTIMER_H_ #define CDBTIMER_H_ #include <sys/time.h> typedef struct itimers { struct itimerval rtimer; /* ITIMER_REAL */ struct itimerval vtimer; /* ITIMER_VIRTUAL */ struct itimerval ptimer; /* ITIMER_PROF */ } itimers; void resetTimers(struct itimers *timers); void restoreTimers(struct itimers *timers); #endif /* CDBTIMER_H_ */
395
302
<reponame>cashlisa/mos<gh_stars>100-1000 #include "math.h" static uint32_t rseed = 1; static uint32_t rand_r(uint32_t *seed) { unsigned int next = *seed; int result; next *= 1103515245; next += 12345; result = (unsigned int)(next / 65536) % 2048; next *= 1103515245; next += 12345; result <<= 10; result ^= (unsigned int)(next / 65536) % 1024; next *= 1103515245; next += 12345; result <<= 10; result ^= (unsigned int)(next / 65536) % 1024; *seed = next; return result; } uint32_t rand() { return rand_r(&rseed); } uint32_t srand(uint32_t seed) { rseed = seed; return rand_r(&rseed); }
260
3,631
<gh_stars>1000+ /* * Copyright (c) 2021. Red Hat, Inc. and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.mvel.compiler.compiler; import java.util.Collection; import java.util.List; import org.drools.testcoverage.common.util.KieBaseTestConfiguration; import org.drools.testcoverage.common.util.KieUtil; import org.drools.testcoverage.common.util.TestParametersUtil; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.kie.api.builder.KieBuilder; import org.kie.api.builder.Message; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @RunWith(Parameterized.class) public class TypeDeclarationUnsupportedModelTest { private final KieBaseTestConfiguration kieBaseTestConfiguration; public TypeDeclarationUnsupportedModelTest(final KieBaseTestConfiguration kieBaseTestConfiguration) { this.kieBaseTestConfiguration = kieBaseTestConfiguration; } @Parameterized.Parameters(name = "KieBase type={0}") public static Collection<Object[]> getParameters() { return TestParametersUtil.getKieBaseCloudConfigurations(false); } @Test() public void testTraitExtendPojo() { //DROOLS-697 final String s1 = "package test;\n" + "declare Poojo " + "end " + "declare trait Mask extends Poojo " + "end " + ""; KieBuilder kieBuilder = KieUtil.getKieBuilderFromDrls(kieBaseTestConfiguration, false, s1); List<Message> errors = kieBuilder.getResults().getMessages(Message.Level.ERROR); assertEquals(1, errors.size()); } @Test public void testRedeclareWithInterfaceExtensionAndOverride() { final String s1 = "package test;\n" + "declare trait " + TypeDeclarationTest.Ext.class.getCanonicalName() + " extends " + TypeDeclarationTest.Base.class.getCanonicalName() + " " + " fld : String " + "end " + "declare trait " + TypeDeclarationTest.Base.class.getCanonicalName() + " " + "end " + ""; KieBuilder kieBuilder = KieUtil.getKieBuilderFromDrls(kieBaseTestConfiguration, false, s1); List<Message> errors = kieBuilder.getResults().getMessages(Message.Level.ERROR); assertTrue(errors.toString(), errors.isEmpty()); } @Test public void testDeclaresInForeignPackages() { String str1 = "" + "package org.drools \n" + "declare foreign.ClassC fld : foreign.ClassD end " + "declare foreign.ClassD end " + ""; KieBuilder kieBuilder = KieUtil.getKieBuilderFromDrls(kieBaseTestConfiguration, false, str1); List<Message> errors = kieBuilder.getResults().getMessages(Message.Level.ERROR); assertTrue(errors.toString(), errors.isEmpty()); } @Test public void testTypeReDeclarationPojo() { String str1 = "" + "package org.drools \n" + "import " + TypeDeclarationTest.class.getName() + ".ClassC; \n" + "" + "declare " + TypeDeclarationTest.class.getName() + ".ClassC \n" + " name : String \n" + " age : Integer \n" + "end \n"; KieBuilder kieBuilder = KieUtil.getKieBuilderFromDrls(kieBaseTestConfiguration, false, str1); List<Message> errors = kieBuilder.getResults().getMessages(Message.Level.ERROR); assertTrue(errors.toString(), errors.isEmpty()); } @Test public void testTypeReDeclarationPojoMoreFields() { String str1 = "" + "package org.drools \n" + "import " + TypeDeclarationTest.class.getName() + ".ClassC; \n" + "" + "declare " + TypeDeclarationTest.class.getName() + ".ClassC \n" + " name : String \n" + " age : Integer \n" + " address : Objet \n" + "end \n"; KieBuilder kieBuilder = KieUtil.getKieBuilderFromDrls(kieBaseTestConfiguration, false, str1); List<Message> errors = kieBuilder.getResults().getMessages(Message.Level.ERROR); assertFalse("Should have an error", errors.isEmpty()); } @Test public void testTypeReDeclarationPojoLessFields() { String str1 = "" + "package org.drools \n" + "import " + TypeDeclarationTest.class.getName() + ".ClassC; \n" + "" + "declare " + TypeDeclarationTest.class.getName() + ".ClassC \n" + " name : String \n" + "end \n"; KieBuilder kieBuilder = KieUtil.getKieBuilderFromDrls(kieBaseTestConfiguration, false, str1); List<Message> errors = kieBuilder.getResults().getMessages(Message.Level.ERROR); assertFalse("Should have an error", errors.isEmpty()); } }
2,321
496
<gh_stars>100-1000 # Generated by Django 3.0.4 on 2020-07-12 15:57 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('users', '0015_auto_20200712_1404'), ] operations = [ migrations.RenameField( model_name='user', old_name='membership_platform_id', new_name='patreon_id', ), ]
185
679
<filename>main/reportdesign/source/filter/xml/xmlFormattedField.hxx<gh_stars>100-1000 /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef RPT_XMLFORMATTEDFIELD_HXX #define RPT_XMLFORMATTEDFIELD_HXX #include "xmlReportElementBase.hxx" #include <com/sun/star/report/XFormattedField.hpp> namespace rptxml { class ORptFilter; class OXMLFormattedField : public OXMLReportElementBase { OXMLFormattedField(const OXMLFormattedField&); void operator =(const OXMLFormattedField&); public: OXMLFormattedField( ORptFilter& rImport ,sal_uInt16 nPrfx ,const ::rtl::OUString& rLName ,const ::com::sun::star::uno::Reference< ::com::sun::star::xml::sax::XAttributeList > & xAttrList ,const ::com::sun::star::uno::Reference< ::com::sun::star::report::XFormattedField >& _xComponent ,OXMLTable* _pContainer ,bool _bPageCount); virtual ~OXMLFormattedField(); }; // ----------------------------------------------------------------------------- } // namespace rptxml // ----------------------------------------------------------------------------- #endif // RPT_XMLFORMATTEDFIELD_HXX
646
3,269
// Time: O(n) // Space: O(w) /** * Definition for a binary tree node. * struct TreeNode { * int val; * TreeNode *left; * TreeNode *right; * TreeNode() : val(0), left(nullptr), right(nullptr) {} * TreeNode(int x) : val(x), left(nullptr), right(nullptr) {} * TreeNode(int x, TreeNode *left, TreeNode *right) : val(x), left(left), right(right) {} * }; */ // bfs solution class Solution { public: bool isValidSequence(TreeNode* root, vector<int>& arr) { vector<TreeNode *> q = {root}; for (int depth = 0; depth < arr.size(); ++depth) { vector<TreeNode *> new_q; while (!q.empty()) { const auto node = q.back(); q.pop_back(); if (!node || node->val != arr[depth]) { continue; } if (depth + 1 == arr.size() && node->left == node->right) { return true; } new_q.emplace_back(node->left); new_q.emplace_back(node->right); } q = move(new_q); } return false; } }; // Time: O(n) // Space: O(h) // dfs solution with stack class Solution2 { public: bool isValidSequence(TreeNode* root, vector<int>& arr) { vector<pair<TreeNode *, int>> s = {{root, 0}}; while (!s.empty()) { const auto [node, depth] = s.back(); s.pop_back(); if (!node || depth == arr.size() || node->val != arr[depth]) { continue; } if (depth + 1 == arr.size() && node->left == node->right) { return true; } s.emplace_back(node->right, depth + 1); s.emplace_back(node->left, depth + 1); } return false; } }; // Time: O(n) // Space: O(h) // dfs solution with recursion class Solution3 { public: bool isValidSequence(TreeNode* root, vector<int>& arr) { return dfs(root, arr, 0); } private: bool dfs(TreeNode *node, const vector<int>& arr, int depth) { if (!node || depth == arr.size() || node->val != arr[depth]) { return false; } if (depth + 1 == arr.size() && node->left == node->right) { return true; } return dfs(node->left, arr, depth + 1) || dfs(node->right, arr, depth + 1); } };
1,178
543
/* * Copyright 2017 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.mirkosertic.bytecoder.core; public class SimpleClass implements SimpleInterface { protected String unknownString; protected double doubleValue = 10d; protected int intValue = 10; protected float floatValue = 14f; protected String[] stringArray; protected byte[] byteArray; protected long longValue = 44L; protected int anotherIntValue; public static byte[] createArray() { return new byte[] {(byte) 10}; } public int sum(int a, int b) { return a + b; } public int div(int a, int b) { return a / b; } public int mul(int a, int b) { return a * b; } public int sub(int a, int b) { return a - b; } public static void main(String args) { int a = 10; int b = 20; int c = a + b; byte x1= -1; byte x2 = 0; byte x3 = 1; byte x4 = 2; byte x5 = 3; byte x6 = 4; byte x7 = 5; boolean[] booleans = new boolean[10]; booleans[0] = false; char[] chars = {}; float[] floats = {}; double[] doubles = {}; byte[] bytes = {}; short[] shorts = {}; int[] ints = {}; long[] longs = {}; int z = createArray().length; Object[] objects = new Object[10]; objects[0] = null; if (objects[1] instanceof Object) { return; } double dw = 11d; if (c > 20) { return; } SimpleClass theSimpleMe = new SimpleClass(); int theSum = theSimpleMe.sum(10, 20); int x = theSimpleMe.intValue; } }
929
336
<filename>system/apps/105_avrprogrammer/source/ihex.h #ifndef INTEL_HEX_H #define INTEL_HEX_H /** * \file ihex.h * \brief Low-level utility functions to create, read, write, and print Intel HEX8 binary records. * \author <NAME> <<EMAIL>> * \date February 2011 * \version 1.0.5 */ uint32_t _strtol(char*str, char**p, int nBase) { uint32_t nVal = 0; char ch; while ( (ch = *str++) != 0 ) { nVal <<= 4; if ( ch >= '0' && ch <= '9' ) nVal |= ch - '0'; if ( ch >= 'a' && ch <= 'f' ) nVal |= ch - 'a' + 10; if ( ch >= 'A' && ch <= 'F' ) nVal |= ch - 'A' + 10; } return nVal; } /* General definition of the Intel HEX8 specification */ enum _IHexDefinitions { /* 768 should be plenty of space to read in a Intel HEX8 record */ IHEX_RECORD_BUFF_SIZE = 64, /* Offsets and lengths of various fields in an Intel HEX8 record */ IHEX_COUNT_OFFSET = 1, IHEX_COUNT_LEN = 2, IHEX_ADDRESS_OFFSET = 3, IHEX_ADDRESS_LEN = 4, IHEX_TYPE_OFFSET = 7, IHEX_TYPE_LEN = 2, IHEX_DATA_OFFSET = 9, IHEX_CHECKSUM_LEN = 2, IHEX_MAX_DATA_LEN = 512, /* Ascii hex encoded length of a single byte */ IHEX_ASCII_HEX_BYTE_LEN = 2, /* Start code offset and value */ IHEX_START_CODE_OFFSET = 0, IHEX_START_CODE = ':', }; /** * All possible error codes the Intel HEX8 record utility functions may return. */ enum IHexErrors { IHEX_OK = 0, /**< Error code for success or no error. */ IHEX_ERROR_FILE = -1, /**< Error code for error while reading from or writing to a file. You may check errno for the exact error if this error code is encountered. */ IHEX_ERROR_EOF = -2, /**< Error code for encountering end-of-file when reading from a file. */ IHEX_ERROR_INVALID_RECORD = -3, /**< Error code for error if an invalid record was read. */ IHEX_ERROR_INVALID_ARGUMENTS = -4, /**< Error code for error from invalid arguments passed to function. */ IHEX_ERROR_NEWLINE = -5, /**< Error code for encountering a newline with no record when reading from a file. */ }; /** * Intel HEX8 Record Types 00-05 */ enum IHexRecordTypes { IHEX_TYPE_00 = 0, /**< Data Record */ IHEX_TYPE_01, /**< End of FIL Record */ IHEX_TYPE_02, /**< Extended Segment Address Record */ IHEX_TYPE_03, /**< Start Segment Address Record */ IHEX_TYPE_04, /**< Extended Linear Address Record */ IHEX_TYPE_05, /**< Start Linear Address Record */ }; /** * Structure to hold the fields of an Intel HEX8 record. */ typedef struct { uint16_t address; /**< The 16-bit address field. */ uint8_t data[IHEX_MAX_DATA_LEN/2]; /**< The 8-bit array data field, which has a maximum size of 256 bytes. */ int dataLen; /**< The number of bytes of data stored in this record. */ int type; /**< The Intel HEX8 record type of this record. */ uint8_t checksum; /**< The checksum of this record. */ } IHexRecord; uint8_t Checksum_IHexRecord(const IHexRecord *ihexRecord); /** * Sets all of the record fields of an Intel HEX8 record structure. * \param type The Intel HEX8 record type (integer value of 0 through 5). * \param address The 16-bit address of the data. * \param data A point to the 8-bit array of data. * \param dataLen The size of the 8-bit data array. * \param ihexRecord A pointer to the target Intel HEX8 record structure where these fields will be set. * \return IHEX_OK on success, otherwise one of the IHEX_ERROR_ error codes. * \retval IHEX_OK on success. * \retval IHEX_ERROR_INVALID_ARGUMENTS if the record pointer is NULL, or if the length of the 8-bit data array is out of range (less than zero or greater than the maximum data length allowed by record specifications, see IHexRecord.data). */ int New_IHexRecord(int type, uint16_t address, const uint8_t *data, int dataLen, IHexRecord *ihexRecord) { /* Data length size check, assertion of ihexRecord pointer */ if (dataLen < 0 || dataLen > IHEX_MAX_DATA_LEN / 2 || ihexRecord == NULL) return IHEX_ERROR_INVALID_ARGUMENTS; ihexRecord->type = type; ihexRecord->address = address; memcpy(ihexRecord->data, data, dataLen); ihexRecord->dataLen = dataLen; ihexRecord->checksum = Checksum_IHexRecord(ihexRecord); return IHEX_OK; } /** * Reads an Intel HEX8 record from an opened file. * \param ihexRecord A pointer to the Intel HEX8 record structure that will store the read record. * \param in A FIL pointer to an opened FIL that can be read. * \return IHEX_OK on success, otherwise one of the IHEX_ERROR_ error codes. * \retval IHEX_OK on success. * \retval IHEX_ERROR_INVALID_ARGUMENTS if the record pointer or FIL pointer is NULL. * \retval IHEX_ERROR_EOF if end-of-file has been reached. * \retval IHEX_ERROR_FILE if a FIL reading error has occured. * \retval IHEX_INVALID_RECORD if the record read is invalid (record did not match specifications or record checksum was invalid). */ int Read_IHexRecord(IHexRecord *ihexRecord, CBufferedReader &in) { char recordBuff[IHEX_RECORD_BUFF_SIZE]; /* A temporary buffer to hold ASCII hex encoded data, set to the maximum length we would ever need */ char hexBuff[IHEX_ADDRESS_LEN + 1]; int dataCount, i; /* Check our record pointer and FIL pointer */ if (ihexRecord == NULL ) return IHEX_ERROR_INVALID_ARGUMENTS; in >> recordBuff; /* Check if we hit a newline */ if (strlen(recordBuff) == 0) return IHEX_ERROR_NEWLINE; /* Size check for start code, count, addess, and type fields */ if (strlen(recordBuff) < (unsigned int) (1 + IHEX_COUNT_LEN + IHEX_ADDRESS_LEN + IHEX_TYPE_LEN)) return IHEX_ERROR_INVALID_RECORD; /* Check the for colon start code */ if (recordBuff[IHEX_START_CODE_OFFSET] != IHEX_START_CODE) return IHEX_ERROR_INVALID_RECORD; /* Copy the ASCII hex encoding of the count field into hexBuff, convert it to a usable integer */ strncpy(hexBuff, recordBuff + IHEX_COUNT_OFFSET, IHEX_COUNT_LEN); hexBuff[IHEX_COUNT_LEN] = 0; dataCount = _strtol(hexBuff, (char **) NULL, 16); /* Copy the ASCII hex encoding of the address field into hexBuff, convert it to a usable integer */ strncpy(hexBuff, recordBuff + IHEX_ADDRESS_OFFSET, IHEX_ADDRESS_LEN); hexBuff[IHEX_ADDRESS_LEN] = 0; ihexRecord->address = (uint16_t) _strtol(hexBuff, (char **) NULL, 16); /* Copy the ASCII hex encoding of the address field into hexBuff, convert it to a usable integer */ strncpy(hexBuff, recordBuff + IHEX_TYPE_OFFSET, IHEX_TYPE_LEN); hexBuff[IHEX_TYPE_LEN] = 0; ihexRecord->type = _strtol(hexBuff, (char **) NULL, 16); /* Size check for start code, count, address, type, data and checksum fields */ if (strlen(recordBuff) < (unsigned int) (1 + IHEX_COUNT_LEN + IHEX_ADDRESS_LEN + IHEX_TYPE_LEN + dataCount * 2 + IHEX_CHECKSUM_LEN)) return IHEX_ERROR_INVALID_RECORD; /* Loop through each ASCII hex byte of the data field, pull it out into hexBuff, * convert it and store the result in the data buffer of the Intel HEX8 record */ for (i = 0; i < dataCount; i++) { /* Times two i because every byte is represented by two ASCII hex characters */ strncpy(hexBuff, recordBuff + IHEX_DATA_OFFSET + 2 * i, IHEX_ASCII_HEX_BYTE_LEN); hexBuff[IHEX_ASCII_HEX_BYTE_LEN] = 0; ihexRecord->data[i] = (uint8_t) _strtol(hexBuff, (char **) NULL, 16); } ihexRecord->dataLen = dataCount; /* Copy the ASCII hex encoding of the checksum field into hexBuff, convert it to a usable integer */ strncpy(hexBuff, recordBuff + IHEX_DATA_OFFSET + dataCount * 2, IHEX_CHECKSUM_LEN); hexBuff[IHEX_CHECKSUM_LEN] = 0; ihexRecord->checksum = (uint8_t) _strtol(hexBuff, (char **) NULL, 16); if (ihexRecord->checksum != Checksum_IHexRecord(ihexRecord)) return IHEX_ERROR_INVALID_RECORD; return IHEX_OK; } /** * Calculates the checksum of an Intel HEX8 IHexRecord structure. * See the Intel HEX8 specifications for more details on the checksum calculation. * \param ihexRecord A pointer to the Intel HEX8 record structure. * \return The 8-bit checksum. */ uint8_t Checksum_IHexRecord(const IHexRecord *ihexRecord) { uint8_t checksum; int i; /* Add the data count, type, address, and data bytes together */ checksum = ihexRecord->dataLen; checksum += ihexRecord->type; checksum += (uint8_t) ihexRecord->address; checksum += (uint8_t) ((ihexRecord->address & 0xFF00) >> 8); for (i = 0; i < ihexRecord->dataLen; i++) checksum += ihexRecord->data[i]; /* Two's complement on checksum */ checksum = ~checksum + 1; return checksum; } #endif
3,122
60,067
<reponame>xiaohanhuang/pytorch<filename>torch/csrc/jit/frontend/lexer.h #pragma once #include <ATen/core/Macros.h> #include <c10/util/C++17.h> #include <c10/util/Exception.h> #include <torch/csrc/WindowsTorchApiMacro.h> #include <torch/csrc/jit/frontend/parser_constants.h> #include <torch/csrc/jit/frontend/source_range.h> #include <torch/csrc/jit/frontend/strtod.h> #include <algorithm> #include <clocale> #include <cstdlib> #include <memory> #include <sstream> #include <string> #include <vector> namespace torch { namespace jit { // single character tokens are just the character itself '+' // multi-character tokens need an entry here // if the third entry is not the empty string, it is used // in the lexer to match this token. // These kinds are also used in Tree.h as the kind of the AST node. // Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the // lexer. #define TC_FORALL_TOKEN_KINDS(_) \ _(TK_EOF, "eof", "") \ _(TK_WHITESPACE, "whitespace", "") \ _(TK_WHITESPACE_EOF, "whitespace_eof", "") \ _(TK_NUMBER, "number", "") \ _(TK_NEWLINE, "newline", "") \ _(TK_INDENT, "indent", "") \ _(TK_DEDENT, "dedent", "") \ _(TK_DEF, "def", "def") \ _(TK_EQUIVALENT, "equivalent", "<=>") \ _(TK_IDENT, "ident", "") \ _(TK_STRING, "string", "") \ _(TK_STRINGLITERAL, "string_literal", "") \ _(TK_CONST, "const", "") \ _(TK_LIST, "list", "") \ _(TK_DICT, "dict", "") \ _(TK_OPTION, "option", "") \ _(TK_APPLY, "apply", "") \ _(TK_COMPREHENSION, "comprehension", "") \ _(TK_RANGE_CONSTRAINT, "range_constraint", "") \ _(TK_PARAM, "param", "") \ _(TK_INFERRED, "inferred", "") \ _(TK_ACCESS, "access", "") \ _(TK_ASSIGN, "assign", "") \ _(TK_AUG_ASSIGN, "aug_assign", "") \ _(TK_ATTRIBUTE, "attribute", "") \ _(TK_IF, "if", "if") \ _(TK_ELSE, "else", "else") \ _(TK_ELIF, "elif", "elif") \ _(TK_WHILE, "while", "while") \ _(TK_EXPR_STMT, "expression statement", "") \ _(TK_RETURN, "return", "return") \ _(TK_IS, "is", "is") \ _(TK_ISNOT, "is not", "is not") \ _(TK_NE, "ne", "!=") \ _(TK_EQ, "eq", "==") \ _(TK_LE, "le", "<=") \ _(TK_GE, "ge", ">=") \ _(TK_FLOOR_DIV, "floordiv", "//") \ _(TK_IF_EXPR, "if", "") \ _(TK_TRUE, "True", "True") \ _(TK_FALSE, "False", "False") \ _(TK_NONE, "None", "None") \ _(TK_AND, "and", "and") \ _(TK_OR, "or", "or") \ _(TK_NOT, "not", "not") \ _(TK_LSHIFT, "<<", "<<") \ _(TK_RSHIFT, ">>", ">>") \ _(TK_CAST, "cast", "") \ _(TK_PLUS_EQ, "+=", "+=") \ _(TK_MINUS_EQ, "-=", "-=") \ _(TK_TIMES_EQ, "*=", "*=") \ _(TK_DIV_EQ, "/=", "/=") \ _(TK_MOD_EQ, "%=", "%=") \ _(TK_BIT_OR_EQ, "|=", "|=") \ _(TK_BIT_AND_EQ, "&=", "&=") \ _(TK_BIT_XOR_EQ, "^=", "^=") \ _(TK_LSHIFT_EQ, "<<=", "<<=") \ _(TK_RSHIFT_EQ, ">>=", ">>=") \ _(TK_POW_EQ, "**=", "**=") \ _(TK_GLOBAL, "global", "global") \ _(TK_BUILT_IN, "built-in", "") \ _(TK_SUBSCRIPT, "subscript", "") \ _(TK_VAR, "variable", "") \ _(TK_NOTHING, "nothing", "") \ _(TK_DICT_LITERAL, "dict-literal", "") \ _(TK_LIST_LITERAL, "list-literal", "") \ _(TK_TUPLE_LITERAL, "tuple-literal", "") \ _(TK_FOR, "for", "for") \ _(TK_IN, "in", "in") \ _(TK_NOTIN, "not in", "not in") \ _(TK_STARRED, "starred", "") \ _(TK_UNARY_MINUS, "unary minus", "") \ _(TK_POW, "pow operator", "**") \ _(TK_ARROW, "arrow", "->") \ _(TK_DECL, "decl", "") \ _(TK_SLICE_EXPR, "slice expr", "") \ _(TK_TYPE_COMMENT, "type comment", "# type:") \ _(TK_RAISE, "raise", "raise") \ _(TK_ASSERT, "assert", "assert") \ _(TK_DOTS, "dots", "...") \ _(TK_LIST_COMP, "list comprehension", "") \ _(TK_DICT_COMP, "dict comprehension", "") \ _(TK_BREAK, "break", "break") \ _(TK_CONTINUE, "continue", "continue") \ _(TK_DELETE, "del", "del") \ _(TK_PASS, "pass", "pass") \ _(TK_CLASS_DEF, "class", "class") \ _(TK_IMPORT, "import", "import") \ _(TK_WITH, "with", "with") \ _(TK_WITH_ITEM, "withitem", "") \ _(TK_AS, "as", "as") \ _(TK_PROP, "property", "") \ _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \ _(TK_NONE_TYPE, "NoneType", "NoneType") enum TokenKind { // we use characters to represent themselves so skip all valid characters // before // assigning enum values to multi-char tokens. TK_DUMMY_START = 256, #define DEFINE_TOKEN(tok, _, _2) tok, TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN) #undef DEFINE_TOKEN }; TORCH_API std::string kindToString(int kind); TORCH_API int stringToKind(const std::string& str); // nested hash tables that indicate char-by-char what is a valid token. struct TokenTrie; using TokenTrieRef = std::unique_ptr<TokenTrie>; struct TokenTrie { TokenTrie() : kind(0) {} void insert(const char* str, int tok) { if (*str == '\0') { AT_ASSERT(kind == 0); kind = tok; return; } for (size_t i = 0, e = child_chars.size(); i < e; ++i) { if (child_chars[i] == *str) { child_tries[i]->insert(str + 1, tok); return; } } child_chars.emplace_back(*str); child_tries.emplace_back(std::make_unique<TokenTrie>()); child_tries.back()->insert(str + 1, tok); } int kind; // 0 == invalid token std::vector<char> child_chars; std::vector<TokenTrieRef> child_tries; }; // stuff that is shared against all TC lexers/parsers and is initialized only // once. struct TORCH_API SharedParserData { SharedParserData() : head(new TokenTrie()) { std::stringstream ss; for (const char* c = valid_single_char_tokens; *c; c++) { std::string str(1, *c); head->insert(str.c_str(), *c); } #define ADD_CASE(tok, _, tokstring) \ if (*(tokstring) != '\0') { \ head->insert((tokstring), (tok)); \ } TC_FORALL_TOKEN_KINDS(ADD_CASE) #undef ADD_CASE } // find the longest match of str.substring(pos) against a token, return true // if successful filling in kind, start,and len bool match( c10::string_view str, size_t pos, bool continuation, // are we inside a scope where newlines don't count // (e.g. inside parens) bool whitespace_token, // should we treat whitespace as a token int* kind, size_t* start, size_t* len) { *start = pos; // skip whitespace while (pos < str.size() && isblank(str[pos])) pos++; // special handling if (pos < str.size()) { if (str[pos] == '#' && !isTypeComment(str, pos)) { // skip comments while (pos < str.size() && str[pos] != '\n') pos++; // tail call, handle whitespace and more comments return match( str, pos, continuation, whitespace_token, kind, start, len); } if (str[pos] == '\\' && pos + 1 < str.size() && str[pos + 1] == '\n' && !whitespace_token) { return match(str, pos + 2, continuation, false, kind, start, len); } if (str[pos] == '\n') { return match( str, pos + 1, continuation, !continuation, kind, start, len); } } // we handle white space before EOF because in the case we have something // like the following where we need to generate the dedent token if foo: // ... // else: // pass if (whitespace_token) { *kind = pos == str.size() ? TK_WHITESPACE_EOF : TK_WHITESPACE; *len = pos - *start; return true; } if (pos == str.size()) { *kind = TK_EOF; *start = pos; *len = 0; return true; } // invariant: the next token is not whitespace or newline *start = pos; // check for a valid number if (isNumber(str, pos, len)) { *kind = TK_NUMBER; return true; } // check for string if (isString(str, pos, len)) { *kind = TK_STRINGLITERAL; return true; } // check for either an ident or a token // ident tracks whether what we have scanned so far could be an identifier // matched indicates if we have found any match. bool matched = false; bool ident = true; TokenTrie* cur = head.get(); for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr); i++) { ident = ident && validIdent(i, str[pos + i]); if (ident) { matched = true; *len = i + 1; *kind = TK_IDENT; } // check for token second, so that e.g. 'max' matches the token TK_MAX // rather the // identifier 'max' if (cur) { const auto begin_it = cur->child_chars.begin(); const auto end_it = cur->child_chars.end(); const auto ch_it = std::find(begin_it, end_it, str[pos + i]); cur = (ch_it == end_it) ? nullptr : cur->child_tries[ch_it - begin_it].get(); if (cur && cur->kind != 0) { matched = true; *len = i + 1; *kind = cur->kind; } } } return matched; } bool isUnary(int kind, int* prec); bool isBinary(int kind, int* prec); bool isRightAssociative(int kind) { switch (kind) { case '?': case TK_POW: return true; default: return false; } } private: bool validIdent(size_t i, char n) { return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); } // 1. skip whitespace // 2. handle comment or newline // bool isNumber(c10::string_view str, size_t start, size_t* len) { char first = str[start]; // strtod allows numbers to start with + or - or nan or inf // http://en.cppreference.com/w/cpp/string/byte/strtof // but we want only the number part, otherwise 1+3 will turn into two // adjacent numbers in the lexer if (first == '-' || first == '+' || isalpha(first)) return false; const char* startptr = str.data() + start; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) char* endptr; torch::jit::strtod_c(startptr, &endptr); *len = endptr - startptr; // check if the number is complex valued // access is safe because string is assumed to be null terminated if (endptr != nullptr && *endptr == 'j') { *len += 1; } return *len > 0; } bool isCharCount(char c, c10::string_view str, size_t start, int len) { // count checks from [start, start + len) return start + len <= str.size() && std::count(str.begin() + start, str.begin() + start + len, c) == len; } // python concatenates all adjacent strings "a" "b" == "ab" // strings can be enclosed with 1 or 3 single or double quotes // if enclosed with 3 quotes newlines are valid // as elsewhere, backslash and new line should be ignored bool isString(c10::string_view str, size_t start, size_t* len) { char quote = str[start]; if (quote != '\"' && quote != '\'') return false; int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1; // end is now set past the opening quotation marks size_t end = start + quote_len; while (end < str.size() && !isCharCount(quote, str, end, quote_len)) { if (str[end] == '\n' && quote_len != 3) { return false; } // handle escaped characters. advances past escaped quotation marks, // escaped newlines and escaped backslashes // multi-char escapes like \x1A are handled fine here because the // remainder of the escape are valid string characters anyway if (str[end] == '\\') { end++; } end++; } // set length equal to the complete string including quotations *len = end - start + quote_len; // if end finished without going past the last character of the string than // there is a match return end < str.size(); } bool isblank(int n) { return isspace(n) && n != '\n'; } // Make an exception ignoring comments for type annotation comments bool isTypeComment(c10::string_view str, size_t pos) { const std::string type_string = "# type:"; if (str.size() < pos + type_string.length()) { return false; } auto match_string = str.substr(pos, type_string.size()); return match_string == type_string; } TokenTrieRef head; }; TORCH_API SharedParserData& sharedParserData(); struct Token { int kind; SourceRange range; Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {} std::string text() { return range.text(); } std::string kindString() const { return kindToString(kind); } }; struct Lexer { explicit Lexer(std::shared_ptr<SourceView> source) : source(std::move(source)), pos(0), nesting(0), indent_stack(), next_tokens(), shared(sharedParserData()) { auto first_indent = lexRaw(true); indent_stack.push_back(first_indent.range.size()); lex(); } // Return the current token, and then move to the next one Token next() { if (next_tokens.size() == 0) reportError("Lexer invariant violated: empty token queue"); Token r = std::move(next_tokens.front()); next_tokens.erase(next_tokens.begin()); if (next_tokens.size() == 0) { lex(); } return r; } // Skip the current token if it matches the given kind bool nextIf(int kind) { if (cur().kind != kind) return false; next(); return true; } [[noreturn]] void reportError(const std::string& what) { reportError(what, cur()); } [[noreturn]] void reportError(const std::string& what, const Token& t) { std::stringstream ss; ss << what << ":\n"; t.range.highlight(ss); throw std::runtime_error(ss.str()); } [[noreturn]] void expected(const std::string& what, const Token& t) { std::stringstream ss; ss << "expected " << what << " but found '" << t.kindString() << "' here:\n"; t.range.highlight(ss); throw std::runtime_error(ss.str()); } [[noreturn]] void expected(const std::string& what) { expected(what, cur()); } // Check that the current token has a given kind, return the current token, // and advance to the next one. Token expect(int kind) { if (cur().kind != kind) { expected(kindToString(kind)); } return next(); } Token& lookahead() { if (next_tokens.size() < 2) { lex(); } return next_tokens[1]; } Token& cur() { return next_tokens.front(); } private: void lex() { auto r = lexRaw(); switch (r.kind) { case '(': case '[': case '{': nesting++; break; case ')': case ']': case '}': nesting--; break; case TK_WHITESPACE: case TK_WHITESPACE_EOF: { const auto depth = static_cast<int64_t>( r.kind == TK_WHITESPACE_EOF ? indent_stack.front() : r.range.size()); // note: TK_WHITESPACE_EOF is whitespace right before the EOF token // just like we allow the code to be indented to a particular initial // indent level, we allow the final indent to be anything and set // it back to the initial indent level. This allows the code to be // put into string literals inside code without worrying about final // whitespace if (depth > indent_stack.back()) { indent_stack.push_back(depth); r.kind = TK_INDENT; } else if (depth == indent_stack.back()) { r.kind = TK_NEWLINE; } else { next_tokens.emplace_back(TK_NEWLINE, r.range); while (indent_stack.back() != depth) { indent_stack.pop_back(); next_tokens.emplace_back(TK_DEDENT, r.range); if (indent_stack.size() == 0) { reportError( "invalid indent level " + c10::guts::to_string(depth), r); } } return; // We've already queued the tokens } } break; default: break; } next_tokens.push_back(std::move(r)); } Token lexRaw(bool whitespace_token = false) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int kind; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) size_t start; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) size_t length; AT_ASSERT(source); if (!shared.match( source->text(), pos, nesting > 0, whitespace_token, &kind, &start, &length)) { expected( "a valid token", Token( (source->text())[start], SourceRange(source, start, start + 1))); } auto t = Token(kind, SourceRange(source, start, start + length)); pos = start + length; return t; } std::shared_ptr<SourceView> source; size_t pos; size_t nesting; // depth of ( [ { nesting... std::vector<int> indent_stack; // stack of indentation level of blocks // Invariant: this should always contain at least a single element std::vector<Token> next_tokens; SharedParserData& shared; }; } // namespace jit } // namespace torch
9,087
369
<gh_stars>100-1000 /* * Copyright © 2021 <NAME>, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package io.cdap.cdap.internal.app.runtime.artifact.plugin.nested; import com.google.gson.Gson; import io.cdap.cdap.api.annotation.Description; import io.cdap.cdap.api.annotation.Macro; import io.cdap.cdap.api.annotation.Name; import io.cdap.cdap.api.annotation.Plugin; import io.cdap.cdap.api.plugin.PluginConfig; import java.util.Objects; import java.util.concurrent.Callable; /** * Plugin which contains nested plugin config */ @Plugin(type = "dummy") @Name("nested") @Description("Nested config") public class NestedConfigPlugin implements Callable<String> { private Config config; @Override public String call() throws Exception { return new Gson().toJson(config); } public static class Config extends PluginConfig { @Name("X") public int x; @Name("Nested") @Macro public NestedConfig nested; public Config(int x, NestedConfig nested) { this.x = x; this.nested = nested; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Config config = (Config) o; return x == config.x && Objects.equals(nested, config.nested); } @Override public int hashCode() { return Objects.hash(x, nested); } } public static class NestedConfig extends PluginConfig { @Name("Nested1") @Macro public String nested1; @Name("Nested2") @Macro public String nested2; public NestedConfig(String nested1, String nested2) { this.nested1 = nested1; this.nested2 = nested2; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } NestedConfig that = (NestedConfig) o; return Objects.equals(nested1, that.nested1) && Objects.equals(nested2, that.nested2); } @Override public int hashCode() { return Objects.hash(nested1, nested2); } } }
1,014
2,633
/* * Copyright (C) <NAME> * Copyright (C) NGINX, Inc. */ #ifndef _NXT_JOB_FILE_H_INCLUDED_ #define _NXT_JOB_FILE_H_INCLUDED_ /* * nxt_job_file_read() allows to open a file, to get its type, size, and * modification time, to read or map file content to memory, and to close * the file. It can be done as one operation for small file or as several * operations for large file. On each operation completion ready_handler * or error_handler completion handlers are called. Since they are job * operations, they can be run by a thread pool. * * If a file is not opened then it is opened and its type, size, and * modification time are got. Then file content starting from given offset * is read or mapped in memory if there is a buffer supplied. The offset * field is correspondingly updated. * * If there is no buffer but the read_ahead flag is set then the first * byte is read to initiate read ahead operation. * * If the close flag is set then file descriptor is closed when the file * is completely read. * * The complete flag is set by nxt_job_file_read() when the file is * completely read. * * The test_before_open flag allows to save syscalls in some case, for * example, not to open and then not to close a directory. It calls * nxt_file_info() to get file type, size, and modification time before * opening the file. A custom read_required() callback combined with this * flag can also omit opening and reading on some conditions. However, * if the callback forces opening then additional nxt_file_info() is * called after opening. The default read_required() callback always * forces opening and reading. */ typedef struct nxt_job_file_s nxt_job_file_t; struct nxt_job_file_s { nxt_job_t job; nxt_file_t file; nxt_off_t offset; nxt_buf_t *buffer; nxt_work_handler_t ready_handler; nxt_work_handler_t error_handler; nxt_int_t (*read_required)(nxt_job_file_t *jbf); uint16_t directory_end; uint16_t close_before_open:1; uint16_t test_before_open:1; uint16_t read_ahead:1; uint16_t close:1; uint16_t complete:1; }; NXT_EXPORT nxt_job_file_t *nxt_job_file_create(nxt_mp_t *mp); NXT_EXPORT void nxt_job_file_init(nxt_job_file_t *jbf); NXT_EXPORT void nxt_job_file_read(nxt_task_t *task, nxt_job_t *job); #endif /* _NXT_JOB_FILE_H_INCLUDED_ */
914
2,464
package com.cjj.refresh; /** * Created by cjj on 2015/8/4. * 刷新回调接口 */ public interface PullToRefreshListener { /** * 刷新中。。。 * @param refreshLayout */ void onRefresh(RefreshLayout refreshLayout); }
107
731
<reponame>cocobear/fuxi #!/usr/bin/env python # -*- coding: utf-8 -*- import socket from urllib.parse import urlparse from pocsuite3.api import register_poc from pocsuite3.api import Output, POCBase from pocsuite3.api import POC_CATEGORY, VUL_TYPE class TestPOC(POCBase): vulID = '00002' version = '1' author = 'jeffzhang' vulDate = '2017-08-15' createDate = '2017-08-15' updateDate = '2017-08-15' references = [ 'http://blog.knownsec.com/2015/11/\ analysis-of-redis-unauthorized-of-expolit/'] name = 'Redis 未授权访问' appPowerLink = 'https://www.redis.io' appName = 'Redis' appVersion = 'All' vulType = VUL_TYPE.UNAUTHORIZED_ACCESS category = POC_CATEGORY.EXPLOITS.REMOTE desc = ''' redis 默认没有开启相关认证,黑客直接访问即可获取数据库中所有信息。 ''' samples = ['172.16.31.10'] def _verify(self): result = {} payload = b'\x2a\x31\x0d\x0a\x24\x34\x0d\x0a\x69\x6e\x66\x6f\x0d\x0a' pr = urlparse(self.url) if pr.port: # and pr.port not in ports: ports = [pr.port] else: ports = [6379, 16379, 26379] for port in ports: try: s = socket.socket() s.connect((pr.hostname, port)) s.send(payload) data = s.recv(4096) if data and b'redis_version' in data: result['VerifyInfo'] = {} result['VerifyInfo']['URL'] = '{}:{}'.format( pr.hostname, port) result['extra'] = {} result['extra']['evidence'] = data.decode('utf-8') break except: #raise pass finally: s.close() return self.parse_attack(result) def _attack(self): return self._verify() def parse_attack(self, result): output = Output(self) if result: output.success(result) else: output.fail("not vulnerability") return output register_poc(TestPOC)
1,201
1,439
/******************************************************************************* * Copyright 2018 <NAME> http://galenframework.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.galenframework.tests.parser; import static com.galenframework.components.TestUtils.deleteSystemProperty; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import java.io.IOException; import java.util.Arrays; import java.util.List; import com.galenframework.parser.*; import com.galenframework.specs.Location; import com.galenframework.specs.RangeValue; import com.galenframework.specs.Side; import com.galenframework.config.GalenConfig; import com.galenframework.specs.Range; import com.galenframework.parser.StringCharReader; import org.apache.commons.lang3.StringEscapeUtils; import org.apache.commons.lang3.tuple.Pair; import org.hamcrest.MatcherAssert; import org.junit.BeforeClass; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; public class ExpectationsTest { @BeforeClass public void init() throws IOException { deleteSystemProperty("galen.range.approximation"); deleteSystemProperty("galen.reporting.listeners"); GalenConfig.getConfig().reset(); } @Test(dataProvider = "rangeValueTestData") public void rangeValueTest(String textForParsing, RangeValue expected) { RangeValue rangeValue = new ExpectRangeValue().read(new StringCharReader(textForParsing)); MatcherAssert.assertThat(rangeValue, is(expected)); } @DataProvider public Object[][] rangeValueTestData() { return new Object[][] { {"0", new RangeValue(0, 0)}, {"123", new RangeValue(123, 0)}, {"0.0", new RangeValue(0, 1)}, {"1.0", new RangeValue(10, 1)}, {"-1.0", new RangeValue(-10, 1)}, {"-15.04567", new RangeValue(-1504567, 5)}, {"15.04567", new RangeValue(1504567, 5)} }; } @Test(dataProvider = "rangeTestData") public void expectRangeTest(String textForParsing, Range expected) { StringCharReader stringCharReader = new StringCharReader(textForParsing); Range range = new ExpectRange().read(stringCharReader); MatcherAssert.assertThat(range, is(expected)); } @DataProvider public Object[][] rangeTestData() { return new Object[][]{ {"10 to 15 px", Range.between(10, 15)}, {"10.0 to 15.4 px", Range.between(new RangeValue(100, 1), new RangeValue(154, 1))}, {"10 to 15px", Range.between(10, 15)}, {"10to15px", Range.between(10, 15)}, {"-15to-10px", Range.between(-15, -10)}, {"-15.04to-10px", Range.between(new RangeValue(-1504, 2), new RangeValue(-10))}, {"10to15 px", Range.between(10, 15)}, {"9 px", Range.exact(9)}, {"9px", Range.exact(9)}, {"9.01px", Range.exact(new RangeValue(901, 2))}, {" 9px", Range.exact(new RangeValue(9))}, {"\t9px", Range.exact(9)}, {"\t9\t\tpx", Range.exact(9)}, {"-49px", Range.exact(-49)}, {"~100px", Range.between(98, 102)}, {"~1000px", Range.between(998, 1002)}, {"~1px", Range.between(-1, 3)}, {"~0px", Range.between(-2, 2)}, {" ~0px", Range.between(-2, 2)}, {">10px", Range.greaterThan(10)}, {"> 10px", Range.greaterThan(10)}, {"<10px", Range.lessThan(10)}, {"< 10px", Range.lessThan(10)}, {"<= 10px", Range.lessThanOrEquals(10)}, {">= 10px", Range.greaterThanOrEquals(10)}, {"15% of screen/width", Range.exact(15).withPercentOf("screen/width")}, {"15.05% of screen/width", Range.exact(new RangeValue(1505, 2)).withPercentOf("screen/width")}, {"15 to 40% of screen/height", Range.between(15, 40).withPercentOf("screen/height")}, {"15 to 40% of item-1/some-other-stuff/a/b/c2", Range.between(15, 40).withPercentOf("item-1/some-other-stuff/a/b/c2")}, {"~40% of item-1/some-other-stuff/a/b/c2", Range.between(38, 42).withPercentOf("item-1/some-other-stuff/a/b/c2")}, {"> 67 % of object/width", Range.greaterThan(67).withPercentOf("object/width")}, {" < 30% of object/width", Range.lessThan(30).withPercentOf("object/width")}, {" > 70% of parent/width", Range.greaterThan(70).withPercentOf("parent/width")} }; } @Test(dataProvider = "provideBadRangeSamples") public void shouldGiveError_forIncorrectRanges(TestData<String> testData) { StringCharReader stringCharReader = new StringCharReader(testData.textForParsing); SyntaxException exception = null; try { new ExpectRange().read(stringCharReader); } catch (SyntaxException e) { exception = e; } assertThat("Exception should be", exception, is(notNullValue())); assertThat("Exception message should be", exception.getMessage(), is(testData.expected)); } @DataProvider public Object[][] provideBadRangeSamples() { return new Object[][] { row("0", "Expecting \"px\", \"to\" or \"%\", got \"\""), row("0p", "Expecting \"px\", \"to\" or \"%\", got \"p\""), row("0 p", "Expecting \"px\", \"to\" or \"%\", got \"p\""), row("0PX", "Expecting \"px\", \"to\" or \"%\", got \"PX\""), row("10 to 20", "Expecting \"px\", got \"\""), row("10 to 20p", "Expecting \"px\", got \"p\""), row("10 to 20%", "Missing value path for relative range"), row("10 to 20% of ", "Missing value path for relative range"), row("10% to 20% of ", "Missing value path for relative range"), }; } @Test(dataProvider = "wordTestData") public void expectWord(TestData<String> testData) { StringCharReader stringCharReader = new StringCharReader(testData.textForParsing); String word = new ExpectWord().read(stringCharReader); assertThat(word, is(testData.expected)); } @DataProvider public Object[][] wordTestData() { return new Object[][]{ row("object", "object"), row(" object", "object"), row("\tobject ", "object"), row("\t\tobject\tanother", "object"), row("o ject", "o"), row("o123-123124-_124/124|12qw!@#$%^^&*().<>?:\"[]{} ject", "o123-123124-_124/124|12qw!@#$%^^&*().<>?:\"[]{}"), row(" je ct", "je") }; } @Test(dataProvider="wordWithBreakingSymbolTestData") public void expectWordWithBreakingSymbol(String text, char breakingSymbol, String expectedWord) { StringCharReader stringCharReader = new StringCharReader(text); String word = new ExpectWord().stopOnTheseSymbols(breakingSymbol).read(stringCharReader); assertThat(word, is(expectedWord)); } @DataProvider public Object[][] wordWithBreakingSymbolTestData() { return new Object[][]{ new Object[]{"Hi, John!", ',', "Hi"}, new Object[]{" Hi, John!", ',', "Hi"}, new Object[]{" HiJohn", 'o', "HiJ"}, new Object[]{"HiJohn", '!', "HiJohn"} }; } @Test(dataProvider = "sideTestData") public void expectSides(TestData<List<Side>> testData) { StringCharReader stringCharReader = new StringCharReader(testData.textForParsing); List<Side> sides = new ExpectSides().read(stringCharReader); Side[] expected = testData.expected.toArray(new Side[testData.expected.size()]); assertThat(sides.size(), is(expected.length)); assertThat(sides, contains(expected)); } @DataProvider public Object[][] sideTestData() { return new Object[][]{ row("left right", sides(Side.LEFT, Side.RIGHT)), row(" \tleft\t right ", sides(Side.LEFT, Side.RIGHT)), row(" left ", sides(Side.LEFT)), row("top left ", sides(Side.TOP, Side.LEFT)), row("top left bottom ", sides(Side.TOP, Side.LEFT, Side.BOTTOM)) }; } @Test(dataProvider = "locationsTestData") public void expectLocations(TestData<List<Location>> testData) { StringCharReader stringCharReader = new StringCharReader(testData.textForParsing); List<Location> sides = new ExpectLocations().read(stringCharReader); Location[] expected = testData.expected.toArray(new Location[testData.expected.size()]); assertThat(sides.size(), is(expected.length)); assertThat(sides, contains(expected)); } @DataProvider public Object[][] locationsTestData() { return new Object[][]{ row("10 px left right, 10 to 20 px top bottom", locations(new Location(Range.exact(10), sides(Side.LEFT, Side.RIGHT)), new Location(Range.between(10, 20), sides(Side.TOP, Side.BOTTOM)))), row("10 px left, 10 to 20 px top bottom, 30px right", locations(new Location(Range.exact(10), sides(Side.LEFT)), new Location(Range.between(10, 20), sides(Side.TOP, Side.BOTTOM)), new Location(Range.exact(30), sides(Side.RIGHT)))), row(" 10 px left right , 10 to 20 px top bottom ", locations(new Location(Range.exact(10), sides(Side.LEFT, Side.RIGHT)), new Location(Range.between(10, 20), sides(Side.TOP, Side.BOTTOM)))), row("\t10 px left right\t,\t10 to 20 px\ttop\tbottom \t \t \t", locations(new Location(Range.exact(10), sides(Side.LEFT, Side.RIGHT)), new Location(Range.between(10, 20), sides(Side.TOP, Side.BOTTOM)))), }; } @Test(dataProvider = "provideBadLocations") public void shouldGiveError_forIncorrectLocations(String text, String expectedErrorMessage) { StringCharReader stringCharReader = new StringCharReader(text); SyntaxException exception = null; try { new ExpectLocations().read(stringCharReader); } catch (SyntaxException e) { exception = e; } assertThat("Exception should be", exception, is(notNullValue())); assertThat("Exception message should be", exception.getMessage(), is(expectedErrorMessage)); } @DataProvider public Object[][] provideBadLocations() { return new Object[][]{ {"left", "Cannot parse range value: \"\""}, {"10px qwe", "Unknown side: \"qwe\""}, {"10 to 30px qwe", "Unknown side: \"qwe\""}, {"10 to 30% of screen/width qwe", "Unknown side: \"qwe\""}, {"10px left qwe", "Unknown side: \"qwe\""}, {"10px left, 20px qwe", "Unknown side: \"qwe\""}, {"10px left, 20px left qwe", "Unknown side: \"qwe\""}, {"10px left, right, top", "Cannot parse range value: \"\""}, }; } @Test public void shouldParse_commaSeparatedKeyValue() { String text = ",param1 1, param2 v a l u e 2, booleanParam, param3 2.3, param1 2"; List<Pair<String, String>> params = new ExpectCommaSeparatedKeyValue().read(new StringCharReader(text)); assertThat(params.size(), is(5)); assertThat(params.get(0).getKey(), is("param1")); assertThat(params.get(0).getValue(), is("1")); assertThat(params.get(1).getKey(), is("param2")); assertThat(params.get(1).getValue(), is("v a l u e 2")); assertThat(params.get(2).getKey(), is("booleanParam")); assertThat(params.get(2).getValue(), is("")); assertThat(params.get(3).getKey(), is("param3")); assertThat(params.get(3).getValue(), is("2.3")); assertThat(params.get(4).getKey(), is("param1")); assertThat(params.get(4).getValue(), is("2")); } @Test public void shouldParse_commaSeparatedKeyValue_2() { String text = "param1 1, param2 2"; List<Pair<String, String>> params = new ExpectCommaSeparatedKeyValue().read(new StringCharReader(text)); assertThat(params.size(), is(2)); assertThat(params.get(0).getKey(), is("param1")); assertThat(params.get(0).getValue(), is("1")); assertThat(params.get(1).getKey(), is("param2")); assertThat(params.get(1).getValue(), is("2")); } private List<Location> locations(Location...locations) { return Arrays.asList(locations); } private List<Side> sides(Side...sides) { return Arrays.asList(sides); } private <T> Object[] row(String textForParsing, T expectedRange) { return new Object[]{new TestData<>(textForParsing, expectedRange)}; } private class TestData<T> { private String textForParsing; private T expected; public TestData(String textForParsing, T expected) { this.textForParsing = textForParsing; this.expected = expected; } @Override public String toString() { return StringEscapeUtils.escapeJava(textForParsing); } } }
5,918
5,964
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is mozilla.org code. * * The Initial Developer of the Original Code is * Netscape Communications Corporation. * Portions created by the Initial Developer are Copyright (C) 1998 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ #include <windows.h> #include "profilew.h" ProfileWin::ProfileWin() : Profile() { hKey = NULL; char szClass[] = "SpyPluginClass"; DWORD disp = 0L; LONG res = RegCreateKeyEx(HKEY_LOCAL_MACHINE, NPSPY_REG_SUBKEY, 0L, szClass, 0L, KEY_READ | KEY_WRITE, NULL, &hKey, &disp); if(res != ERROR_SUCCESS) hKey = NULL; } ProfileWin::~ProfileWin() { if(hKey) RegCloseKey(hKey); } BOOL ProfileWin::getBool(char * key, BOOL * value) { if(!value) return FALSE; DWORD size = sizeof(DWORD); DWORD val = 1L; LONG res = RegQueryValueEx(hKey, key, 0L, NULL, (BYTE *)&val, &size); if(res != ERROR_SUCCESS) return FALSE; *value = (val == 0L) ? FALSE : TRUE; return TRUE; } BOOL ProfileWin::setBool(char * key, BOOL value) { DWORD size = sizeof(DWORD); DWORD val = value ? 1L : 0L; LONG res = RegSetValueEx(hKey, key, 0L, REG_DWORD, (const BYTE *)&val, size); return (res == ERROR_SUCCESS); } BOOL ProfileWin::getString(char * key, char * string, int size) { LONG res = RegQueryValueEx(hKey, key, 0L, NULL, (BYTE *)string, (DWORD *)&size); return (res == ERROR_SUCCESS); } BOOL ProfileWin::setString(char * key, char * string) { DWORD size = strlen(string); LONG res = RegSetValueEx(hKey, key, 0L, REG_SZ, (const BYTE *)string, size); return (res == ERROR_SUCCESS); } BOOL ProfileWin::getSizeAndPosition(int *width, int *height, int *x, int *y) { DWORD size = sizeof(DWORD); LONG res = ERROR_SUCCESS; res = RegQueryValueEx(hKey, NPSPY_REG_KEY_WIDTH, 0L, NULL, (BYTE *)width, &size); if(res != ERROR_SUCCESS) return FALSE; res = RegQueryValueEx(hKey, NPSPY_REG_KEY_HEIGHT, 0L, NULL, (BYTE *)height, &size); if(res != ERROR_SUCCESS) return FALSE; res = RegQueryValueEx(hKey, NPSPY_REG_KEY_X, 0L, NULL, (BYTE *)x, &size); if(res != ERROR_SUCCESS) return FALSE; res = RegQueryValueEx(hKey, NPSPY_REG_KEY_Y, 0L, NULL, (BYTE *)y, &size); if(res != ERROR_SUCCESS) return FALSE; return TRUE; } BOOL ProfileWin::setSizeAndPosition(int width, int height, int x, int y) { DWORD size = sizeof(DWORD); LONG res = ERROR_SUCCESS; res = RegSetValueEx(hKey, NPSPY_REG_KEY_WIDTH, 0L, REG_DWORD, (const BYTE *)&width, size); if(res != ERROR_SUCCESS) return FALSE; res = RegSetValueEx(hKey, NPSPY_REG_KEY_HEIGHT, 0L, REG_DWORD, (const BYTE *)&height, size); if(res != ERROR_SUCCESS) return FALSE; res = RegSetValueEx(hKey, NPSPY_REG_KEY_X, 0L, REG_DWORD, (const BYTE *)&x, size); if(res != ERROR_SUCCESS) return FALSE; res = RegSetValueEx(hKey, NPSPY_REG_KEY_Y, 0L, REG_DWORD, (const BYTE *)&y, size); if(res != ERROR_SUCCESS) return FALSE; return TRUE; }
1,843
470
<filename>Example/Pods/LFMediaEditingController/LFMediaEditingController/LFMediaEditingController/class/vendors/JRFilterBar/Cell/JRFilterBarCell.h // // JRStrainImageShowViewCell.h // JRCollectionView // // Created by Mr.D on 2018/8/6. // Copyright © 2018年 Mr.D. All rights reserved. // #import <UIKit/UIKit.h> extern CGFloat const JR_LABEL_HEIGHT; @class JRFilterModel; @interface JRFilterBarCell : UICollectionViewCell /** 默认字体和框框颜色 */ @property (nonatomic, strong) UIColor *defaultColor; /** 已选字体和框框颜色 */ @property (nonatomic, strong) UIColor *selectColor; @property (nonatomic, assign) BOOL isSelectedModel; - (void)setCellData:(JRFilterModel *)cellData; + (NSString *)identifier; @end
290
440
<filename>visa/VarSplit.h /*========================== begin_copyright_notice ============================ Copyright (C) 2019-2021 Intel Corporation SPDX-License-Identifier: MIT ============================= end_copyright_notice ===========================*/ #ifndef _VARSPLIT_H_ #define _VARSPLIT_H_ #include "FlowGraph.h" #include "BuildIR.h" #include "RPE.h" namespace vISA { class LiveRange; class GraphColor; class RPE; class GlobalRA; // store mapping of a split variable to original variable. if any split // variable is spilled, we can reuse spill location of original variable. // also store all instructions emitted in preheader, loop exit for each // split variable. this helps eliminate those instruction in case the // split variable itself spills. class SplitResults { public: G4_Declare* origDcl = nullptr; std::unordered_map<G4_BB*, std::unordered_set<G4_INST*>> insts; }; class LoopVarSplit { public: LoopVarSplit(G4_Kernel& k, GraphColor* c, RPE* r); void run(); std::vector<G4_SrcRegRegion*> getReads(G4_Declare* dcl, Loop& loop); std::vector<G4_DstRegRegion*> getWrites(G4_Declare* dcl, Loop& loop); unsigned int getMaxRegPressureInLoop(Loop& loop); void dump(std::ostream& of = std::cerr); static void removeSplitInsts(GlobalRA* gra, G4_Declare* spillDcl, G4_BB* bb); static bool removeFromPreheader(GlobalRA* gra, G4_Declare* spillDcl, G4_BB* bb, INST_LIST_ITER filledInstIter); static bool removeFromLoopExit(GlobalRA* gra, G4_Declare* spillDcl, G4_BB* bb, INST_LIST_ITER filledInstIter); static const std::unordered_set<G4_INST*> getSplitInsts(GlobalRA* gra, G4_BB* bb); private: bool split(G4_Declare* dcl, Loop& loop); void copy(G4_BB* bb, G4_Declare* dst, G4_Declare* src, SplitResults* splitData, bool pushBack = true); void replaceSrc(G4_SrcRegRegion* src, G4_Declare* dcl); void replaceDst(G4_DstRegRegion* dst, G4_Declare* dcl); G4_Declare* getNewDcl(G4_Declare* dcl1, G4_Declare* dcl2); std::vector<Loop*> getLoopsToSplitAround(G4_Declare* dcl); G4_Kernel& kernel; GraphColor* coloring = nullptr; RPE* rpe = nullptr; VarReferences references; // store set of dcls marked as spill in current RA iteration std::unordered_set<G4_Declare*> spilledDclSet; // store spill cost for each dcl std::map<G4_Declare*, float> dclSpillCost; std::unordered_map<G4_Declare*, G4_Declare*> oldNewDcl; std::unordered_map<Loop*, std::unordered_set<G4_Declare*>> splitsPerLoop; std::unordered_map<Loop*, unsigned int> maxRegPressureCache; // a spilled dcl may be split multiple times, once per loop // store this information to uplevel to GlobalRA class so // anytime we spill a split variable, we reuse spill location. // Orig dcl, vector<Tmp Dcl, Loop> std::unordered_map<G4_Declare*, std::vector<std::pair<G4_Declare*, Loop*>>> splitResults; }; class VarProperties { public: enum class AccessGranularity { OneGrf = 1, TwoGrf = 2, Unknown = 3 }; AccessGranularity ag = AccessGranularity::Unknown; unsigned int numDefs = 0; std::pair<G4_DstRegRegion*, G4_BB*> def; std::vector<std::pair<G4_SrcRegRegion*, G4_BB*>> srcs; bool candidateDef = false; bool legitCandidate = true; // API to check whether variable is local or global bool isDefUsesInSameBB() { auto defBB = def.second; for (auto src : srcs) { if (src.second != defBB) return false; } return true; } bool isPartDclUsed(unsigned int lb, unsigned int rb) { // Return true if lb/rb is part of any src regions for (auto& src : srcs) { if (src.first->getLeftBound() >= lb && src.first->getRightBound() <= rb) return true; } return false; } }; class VarSplitPass { public: VarSplitPass(G4_Kernel&); void run(); void replaceIntrinsics(); G4_Declare* getParentDcl(G4_Declare*); std::vector<G4_Declare*>* getChildren(G4_Declare*); std::vector<G4_Declare*> getSiblings(G4_Declare*); bool isSplitDcl(G4_Declare*); bool isPartialDcl(G4_Declare*); unsigned int getSiblingNum(G4_Declare*); unsigned int getIdealAllocation(G4_Declare*, LiveRange**); bool isChildDclUnused(G4_Declare*); void writeHints(G4_Declare*, LiveRange**); void undo(G4_Declare*); bool reallocParent(G4_Declare*, LiveRange**); bool isParentChildRelation(G4_Declare*, G4_Declare*); bool isSplitVarLocal(G4_Declare*); bool splitOccured() { return IRchanged; } private: G4_Kernel& kernel; void findSplitCandidates(); void split(); std::unordered_map<G4_Declare*, VarProperties> splitVars; // split dcl, parent dcl std::unordered_map<G4_Declare*, G4_Declare*> splitParentDcl; // parent dcl, vector<children> std::unordered_map<G4_Declare*, std::vector<G4_Declare*>> splitChildren; // Store child dcls that are never referenced in CFG std::unordered_set<G4_Declare*> unusedDcls; // Store pre-split regions for undo // <new src/dst region, <old src inst, old src rgn, old src#>> std::unordered_map<G4_Operand*, std::tuple<G4_INST*, G4_Operand*, unsigned int>> preSplit; bool IRchanged = false; private: // Split verification related declarations void buildPreVerify(); void verify(); void verifyOverlap(); class InstData { public: G4_DstRegRegion* dst = nullptr; unsigned int dstLb = 0; unsigned int dstRb = 0; G4_Operand* src[G4_MAX_SRCS]; unsigned int srcLb[G4_MAX_SRCS]; unsigned int srcRb[G4_MAX_SRCS]; InstData() { for (unsigned int i = 0; i != G4_MAX_SRCS; i++) { src[i] = nullptr; srcLb[i] = 0; srcRb[i] = 0; } } }; std::unordered_map<G4_INST*, InstData> splitVerify; }; }; #endif
2,526
653
#include <errno.h> #include <string.h> #include "chacha.h" #include "random.h" #include "util.h" #include <sys/random.h> static void get_random_seed(void *buf, size_t size) { while (size) { ssize_t r; do { r = getrandom(buf, size, 0); } while (r == -1 && errno == EINTR); if (r <= 0) { fatal_error("getrandom failed"); } buf = (char *)buf + r; size -= r; } } void random_state_init(struct random_state *state) { u8 rnd[CHACHA_KEY_SIZE + CHACHA_IV_SIZE]; get_random_seed(rnd, sizeof(rnd)); chacha_keysetup(&state->ctx, rnd); chacha_ivsetup(&state->ctx, rnd + CHACHA_KEY_SIZE); state->index = RANDOM_CACHE_SIZE; state->reseed = 0; } void random_state_init_from_random_state(struct random_state *state, struct random_state *source) { u8 rnd[CHACHA_KEY_SIZE + CHACHA_IV_SIZE]; get_random_bytes(source, rnd, sizeof(rnd)); chacha_keysetup(&state->ctx, rnd); chacha_ivsetup(&state->ctx, rnd + CHACHA_KEY_SIZE); state->index = RANDOM_CACHE_SIZE; state->reseed = 0; } static void refill(struct random_state *state) { if (state->reseed >= RANDOM_RESEED_SIZE) { random_state_init(state); } chacha_keystream_bytes(&state->ctx, state->cache, RANDOM_CACHE_SIZE); state->index = 0; state->reseed += RANDOM_CACHE_SIZE; } void get_random_bytes(struct random_state *state, void *buf, size_t size) { // avoid needless copying to and from the cache as an optimization if (size > RANDOM_CACHE_SIZE / 2) { chacha_keystream_bytes(&state->ctx, buf, size); return; } while (size) { if (state->index == RANDOM_CACHE_SIZE) { refill(state); } size_t remaining = RANDOM_CACHE_SIZE - state->index; size_t copy_size = min(size, remaining); memcpy(buf, state->cache + state->index, copy_size); state->index += copy_size; buf = (char *)buf + copy_size; size -= copy_size; } } u16 get_random_u16(struct random_state *state) { u16 value; unsigned remaining = RANDOM_CACHE_SIZE - state->index; if (remaining < sizeof(value)) { refill(state); } memcpy(&value, state->cache + state->index, sizeof(value)); state->index += sizeof(value); return value; } // See Fast Random Integer Generation in an Interval by <NAME> u16 get_random_u16_uniform(struct random_state *state, u16 bound) { u32 random = get_random_u16(state); u32 multiresult = random * bound; u16 leftover = multiresult; if (leftover < bound) { u16 threshold = -bound % bound; while (leftover < threshold) { random = get_random_u16(state); multiresult = random * bound; leftover = (u16)multiresult; } } return multiresult >> 16; } u64 get_random_u64(struct random_state *state) { u64 value; unsigned remaining = RANDOM_CACHE_SIZE - state->index; if (remaining < sizeof(value)) { refill(state); } memcpy(&value, state->cache + state->index, sizeof(value)); state->index += sizeof(value); return value; } // See Fast Random Integer Generation in an Interval by <NAME> u64 get_random_u64_uniform(struct random_state *state, u64 bound) { u128 random = get_random_u64(state); u128 multiresult = random * bound; u64 leftover = multiresult; if (leftover < bound) { u64 threshold = -bound % bound; while (leftover < threshold) { random = get_random_u64(state); multiresult = random * bound; leftover = multiresult; } } return multiresult >> 64; }
1,596
666
<filename>lib/ctrcommon/source/ctrcommon/gput.cpp<gh_stars>100-1000 #include "ctrcommon/gpu.hpp" #include "ctrcommon/platform.hpp" #include <malloc.h> #include <stdio.h> #include <string.h> #include <sstream> #include <stack> #include <3ds.h> #include <math.h> #include "ctrcommon_shader_vsh_shbin.h" #include "ctrcommon_font_bin.h" u32 defaultShader = 0; u32 stringVbo = 0; u32 dummyTexture = 0; u32 fontTexture = 0; float projection[16] = {0}; float modelview[16] = {0}; std::stack<float*> projectionStack; std::stack<float*> modelviewStack; void gputInit() { gpuCreateShader(&defaultShader); gpuLoadShader(defaultShader, ctrcommon_shader_vsh_shbin, ctrcommon_shader_vsh_shbin_size); gputUseDefaultShader(); gpuCreateVbo(&stringVbo); gpuVboAttributes(stringVbo, ATTRIBUTE(0, 3, ATTR_FLOAT) | ATTRIBUTE(1, 2, ATTR_FLOAT) | ATTRIBUTE(2, 4, ATTR_FLOAT), 3); gpuCreateTexture(&dummyTexture); gpuTextureInfo(dummyTexture, 64, 64, PIXEL_RGBA8, TEXTURE_MIN_FILTER(FILTER_NEAREST) | TEXTURE_MAG_FILTER(FILTER_NEAREST)); memset(gpuGetTextureData(dummyTexture), 0xFF, 64 * 64 * 4); void* gpuFont = gpuAlloc(ctrcommon_font_bin_size); memcpy(gpuFont, ctrcommon_font_bin, ctrcommon_font_bin_size); gpuCreateTexture(&fontTexture); gpuTextureData(fontTexture, gpuFont, 128, 128, PIXEL_RGBA8, TEXTURE_MIN_FILTER(FILTER_NEAREST) | TEXTURE_MAG_FILTER(FILTER_NEAREST)); gpuFree(gpuFont); float identity[16]; gputIdentityMatrix(identity); gputProjection(identity); gputModelView(identity); } void gputCleanup() { if(defaultShader != 0) { gpuFreeShader(defaultShader); defaultShader = 0; } if(stringVbo != 0) { gpuFreeVbo(stringVbo); stringVbo = 0; } if(fontTexture != 0) { gpuFreeTexture(fontTexture); fontTexture = 0; } } void gputUseDefaultShader() { gpuUseShader(defaultShader); } void gputMultMatrix4(float* out, const float* m1, const float* m2) { if(out == NULL || m1 == NULL || m2 == NULL) { return; } for(u32 x1 = 0; x1 < 4; x1++) { for(u32 y2 = 0; y2 < 4; y2++) { out[y2 * 4 + x1] = 0; for(u32 y1 = 0; y1 < 4; y1++) { out[y2 * 4 + x1] += m1[y1 * 4 + x1] * m2[y2 * 4 + y1]; } } } } void gputIdentityMatrix(float *out) { if(out == NULL) { return; } memset(out, 0x00, 16 * sizeof(float)); out[0] = 1.0f; out[5] = 1.0f; out[10] = 1.0f; out[15] = 1.0f; } void gputOrthoMatrix(float* out, float left, float right, float bottom, float top, float near, float far) { float orthoMatrix[16]; orthoMatrix[0] = 2.0f / (right - left); orthoMatrix[1] = 0.0f; orthoMatrix[2] = 0.0f; orthoMatrix[3] = -((right + left) / (right - left)); orthoMatrix[4] = 0.0f; orthoMatrix[5] = 2.0f / (top - bottom); orthoMatrix[6] = 0.0f; orthoMatrix[7] = -((top + bottom) / (top - bottom)); orthoMatrix[8] = 0.0f; orthoMatrix[9] = 0.0f; orthoMatrix[10] = 2.0f / (far - near); orthoMatrix[11] = -((far + near) / (far - near)); orthoMatrix[12] = 0.0f; orthoMatrix[13] = 0.0f; orthoMatrix[14] = 0.0f; orthoMatrix[15] = 1.0f; float correction[16]; gputRotationMatrixZ(correction, (float) M_PI / 2.0f); gputMultMatrix4(out, orthoMatrix, correction); } void gputPerspectiveMatrix(float* out, float fovy, float aspect, float near, float far) { float top = near * (float) tan(fovy / 2); float right = top * aspect; float projectionMatrix[16]; projectionMatrix[0] = near / right; projectionMatrix[1] = 0.0f; projectionMatrix[2] = 0.0f; projectionMatrix[3] = 0.0f; projectionMatrix[4] = 0.0f; projectionMatrix[5] = near / top; projectionMatrix[6] = 0.0f; projectionMatrix[7] = 0.0f; projectionMatrix[8] = 0.0f; projectionMatrix[9] = 0.0f; projectionMatrix[10] = -(far + near) / (far - near); projectionMatrix[11] = -2.0f * (far * near) / (far - near); projectionMatrix[12] = 0.0f; projectionMatrix[13] = 0.0f; projectionMatrix[14] = -1.0f; projectionMatrix[15] = 0.0f; float correction[16]; gputIdentityMatrix(correction); correction[10] = 0.5f; correction[11] = -0.5f; gputMultMatrix4(out, correction, projectionMatrix); } void gputTranslationMatrix(float* out, float x, float y, float z) { if(out == NULL) { return; } gputIdentityMatrix(out); out[3] = x; out[7] = y; out[11] = z; } void gputRotationMatrixX(float* out, float rotation) { if(out == NULL) { return; } memset(out, 0x00, 16 * sizeof(float)); out[0] = 1.0f; out[5] = (float) cos(rotation); out[6] = (float) sin(rotation); out[9] = (float) -sin(rotation); out[10] = (float) cos(rotation); out[15] = 1.0f; } void gputRotationMatrixY(float* out, float rotation) { if(out == NULL) { return; } memset(out, 0x00, 16 * sizeof(float)); out[0] = (float) cos(rotation); out[2] = (float) sin(rotation); out[5] = 1.0f; out[8] = (float) -sin(rotation); out[10] = (float) cos(rotation); out[15] = 1.0f; } void gputRotationMatrixZ(float* out, float rotation) { if(out == NULL) { return; } memset(out, 0x00, 16 * sizeof(float)); out[0] = (float) cos(rotation); out[1] = (float) sin(rotation); out[4] = (float) -sin(rotation); out[5] = (float) cos(rotation); out[10] = 1.0f; out[15] = 1.0f; } void gputScaleMatrix(float *matrix, float x, float y, float z) { matrix[0] *= x; matrix[4] *= x; matrix[8] *= x; matrix[12] *= x; matrix[1] *= y; matrix[5] *= y; matrix[9] *= y; matrix[13] *= y; matrix[2] *= z; matrix[6] *= z; matrix[10] *= z; matrix[14] *= z; } void gputPushProjection() { float* old = (float*) malloc(16 * sizeof(float)); memcpy(old, projection, 16 * sizeof(float)); projectionStack.push(old); } void gputPopProjection() { if(projectionStack.empty()) { return; } float* old = projectionStack.top(); projectionStack.pop(); gputProjection(old); free(old); } float* gputGetProjection() { return projection; } void gputProjection(float* matrix) { if(matrix == NULL) { return; } memcpy(projection, matrix, 16 * sizeof(float)); gpuSetUniform(defaultShader, VERTEX_SHADER, "projection", projection, 4); } void gputOrtho(float left, float right, float bottom, float top, float near, float far) { float orthoMatrix[16]; gputOrthoMatrix(orthoMatrix, left, right, bottom, top, near, far); gputProjection(orthoMatrix); } void gputPerspective(float fovy, float aspect, float near, float far) { float perspectiveMatrix[16]; gputPerspectiveMatrix(perspectiveMatrix, fovy, aspect, near, far); gputProjection(perspectiveMatrix); } void gputPushModelView() { float* old = (float*) malloc(16 * sizeof(float)); memcpy(old, modelview, 16 * sizeof(float)); modelviewStack.push(old); } void gputPopModelView() { if(modelviewStack.empty()) { return; } float* old = modelviewStack.top(); modelviewStack.pop(); gputModelView(old); free(old); } float* gputGetModelView() { return modelview; } void gputModelView(float* matrix) { if(matrix == NULL) { return; } memcpy(modelview, matrix, 16 * sizeof(float)); gpuSetUniform(defaultShader, VERTEX_SHADER, "modelview", modelview, 4); } void gputTranslate(float x, float y, float z) { float translationMatrix[16]; gputTranslationMatrix(translationMatrix, x, y, z); float resultMatrix[16]; gputMultMatrix4(resultMatrix, modelview, translationMatrix); gputModelView(resultMatrix); } void gputRotateX(float rotation) { float rotationMatrix[16]; gputRotationMatrixX(rotationMatrix, rotation); float resultMatrix[16]; gputMultMatrix4(resultMatrix, modelview, rotationMatrix); gputModelView(resultMatrix); } void gputRotateY(float rotation) { float rotationMatrix[16]; gputRotationMatrixY(rotationMatrix, rotation); float resultMatrix[16]; gputMultMatrix4(resultMatrix, modelview, rotationMatrix); gputModelView(resultMatrix); } void gputRotateZ(float rotation) { float rotationMatrix[16]; gputRotationMatrixZ(rotationMatrix, rotation); float resultMatrix[16]; gputMultMatrix4(resultMatrix, modelview, rotationMatrix); gputModelView(resultMatrix); } void gputRotate(float x, float y, float z) { float tempMatrix[16]; float tempMatrix2[16]; float tempMatrix3[16]; gputRotationMatrixX(tempMatrix, x); gputRotationMatrixY(tempMatrix2, y); gputMultMatrix4(tempMatrix3, tempMatrix, tempMatrix2); gputRotationMatrixZ(tempMatrix2, z); gputMultMatrix4(tempMatrix, tempMatrix3, tempMatrix2); gputMultMatrix4(tempMatrix2, modelview, tempMatrix); gputModelView(tempMatrix2); } void gputScale(float x, float y, float z) { gputScaleMatrix(modelview, x, y, z); gputModelView(modelview); } float gputGetStringWidth(const std::string str, float charWidth) { u32 len = str.length(); if(len == 0) { return 0; } u32 longestLine = 0; u32 currLength = 0; for(u32 i = 0; i < len; i++) { if(str[i] == '\n') { if(currLength > longestLine) { longestLine = currLength; } currLength = 0; continue; } currLength++; } if(currLength > longestLine) { longestLine = currLength; } return (int) (longestLine * charWidth); } float gputGetStringHeight(const std::string str, float charHeight) { u32 len = str.length(); if(len == 0) { return 0; } u32 lines = 1; for(u32 i = 0; i < len; i++) { if(str[i] == '\n') { lines++; } } return (int) (lines * charHeight); } void gputDrawString(const std::string str, float x, float y, float charWidth, float charHeight, u8 red, u8 green, u8 blue, u8 alpha) { const u32 len = str.length(); if(len == 0) { return; } static const float charSize = 8.0f / 128.0f; const float r = (float) red / 255.0f; const float g = (float) green / 255.0f; const float b = (float) blue / 255.0f; const float a = (float) alpha / 255.0f; gpuVboDataInfo(stringVbo, len * 6, PRIM_TRIANGLES); float* tempVboData = (float*) gpuGetVboData(stringVbo); float cx = x; float cy = y + gputGetStringHeight(str, charHeight) - charHeight; for(u32 i = 0; i < len; i++) { char c = str[i]; if(c == '\n') { memset(tempVboData + (i * 6 * 9), 0, 6 * 9 * sizeof(float)); cx = x; cy -= charHeight; continue; } const float texX1 = (c % 16) * charSize; const float texY1 = 1.0f - ((c / 16 + 1) * charSize); const float texX2 = texX1 + charSize; const float texY2 = texY1 + charSize; const float vboData[] = { cx, cy, -0.1f, texX1, texY1, r, g, b, a, cx + charWidth, cy, -0.1f, texX2, texY1, r, g, b, a, cx + charWidth, cy + charHeight, -0.1f, texX2, texY2, r, g, b, a, cx + charWidth, cy + charHeight, -0.1f, texX2, texY2, r, g, b, a, cx, cy + charHeight, -0.1f, texX1, texY2, r, g, b, a, cx, cy, -0.1f, texX1, texY1, r, g, b, a }; memcpy(tempVboData + (i * 6 * 9), vboData, sizeof(vboData)); cx += charWidth; } gpuBindTexture(TEXUNIT0, fontTexture); gpuDrawVbo(stringVbo); // Flush the GPU command buffer so we can safely reuse the VBO. gpuFlush(); } void gputTakeScreenshot() { u32 headerSize = 0x36; u32 imageSize = 400 * 480 * 3; u8* header = (u8*) malloc(headerSize); memset(header, 0, headerSize); *(u16*) &header[0x0] = 0x4D42; *(u32*) &header[0x2] = headerSize + imageSize; *(u32*) &header[0xA] = headerSize; *(u32*) &header[0xE] = 0x28; *(u32*) &header[0x12] = 400; *(u32*) &header[0x16] = 480; *(u32*) &header[0x1A] = 0x00180001; *(u32*) &header[0x22] = imageSize; u8* image = (u8*) malloc(imageSize); memset(image, 0, imageSize); if(gfxGetScreenFormat(GFX_TOP) == GSP_BGR8_OES) { u8* top = gfxGetFramebuffer(GFX_TOP, GFX_LEFT, NULL, NULL); for(u32 x = 0; x < 400; x++) { for(u32 y = 0; y < 240; y++) { u8* src = &top[((240 - y - 1) + x * 240) * 3]; u8* dst = &image[((479 - y) * 400 + x) * 3]; *(u16*) dst = *(u16*) src; dst[2] = src[2]; } } } if(gfxGetScreenFormat(GFX_BOTTOM) == GSP_BGR8_OES) { u8* bottom = gfxGetFramebuffer(GFX_BOTTOM, GFX_LEFT, NULL, NULL); for(u32 x = 0; x < 320; x++) { for(u32 y = 0; y < 240; y++) { u8* src = &bottom[((240 - y - 1) + x * 240) * 3]; u8* dst = &image[((479 - (y + 240)) * 400 + (x + 40)) * 3]; *(u16*) dst = *(u16*) src; dst[2] = src[2]; } } } std::stringstream fileStream; fileStream << "/screenshot_" << platformGetTime() << ".bmp"; std::string file = fileStream.str(); FILE* fd = fopen(file.c_str(), "wb"); if(fd) { fwrite(header, 1, headerSize, fd); fwrite(image, 1, imageSize, fd); fclose(fd); } free(header); free(image); }
6,296
1,603
package com.linkedin.metadata.kafka.hydrator; import com.datahub.authentication.Authentication; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.linkedin.common.urn.Urn; import com.linkedin.entity.EntityResponse; import com.linkedin.entity.client.EntityClient; import com.linkedin.r2.RemoteInvocationException; import java.net.URISyntaxException; import java.util.Collections; import java.util.Optional; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import static com.linkedin.metadata.Constants.*; @Slf4j @RequiredArgsConstructor public class EntityHydrator { private final Authentication _systemAuthentication; private final EntityClient _entityClient; private final ChartHydrator _chartHydrator = new ChartHydrator(); private final CorpUserHydrator _corpUserHydrator = new CorpUserHydrator(); private final DashboardHydrator _dashboardHydrator = new DashboardHydrator(); private final DataFlowHydrator _dataFlowHydrator = new DataFlowHydrator(); private final DataJobHydrator _dataJobHydrator = new DataJobHydrator(); private final DatasetHydrator _datasetHydrator = new DatasetHydrator(); public Optional<ObjectNode> getHydratedEntity(String entityTypeName, String urn) { final ObjectNode document = JsonNodeFactory.instance.objectNode(); // Hydrate fields from urn Urn urnObj; try { urnObj = Urn.createFromString(urn); } catch (URISyntaxException e) { log.info("Invalid URN: {}", urn); return Optional.empty(); } // Hydrate fields from snapshot EntityResponse entityResponse; try { entityResponse = _entityClient.batchGetV2(entityTypeName, Collections.singleton(urnObj), null, this._systemAuthentication).get(urnObj); } catch (RemoteInvocationException | URISyntaxException e) { log.error("Error while calling GMS to hydrate entity for urn {}", urn); return Optional.empty(); } if (entityResponse == null) { log.error("Could not find entity for urn {}", urn); return Optional.empty(); } switch (entityResponse.getEntityName()) { case CHART_ENTITY_NAME: _chartHydrator.hydrateFromEntityResponse(document, entityResponse); break; case CORP_USER_ENTITY_NAME: _corpUserHydrator.hydrateFromEntityResponse(document, entityResponse); break; case DASHBOARD_ENTITY_NAME: _dashboardHydrator.hydrateFromEntityResponse(document, entityResponse); break; case DATA_FLOW_ENTITY_NAME: _dataFlowHydrator.hydrateFromEntityResponse(document, entityResponse); break; case DATA_JOB_ENTITY_NAME: _dataJobHydrator.hydrateFromEntityResponse(document, entityResponse); break; case DATASET_ENTITY_NAME: _datasetHydrator.hydrateFromEntityResponse(document, entityResponse); break; default: log.error("Unable to find valid hydrator for entity type: {} urn: {}", entityResponse.getEntityName(), urn); return Optional.empty(); } return Optional.of(document); } }
1,099
370
#define DLONG #include <../Cholesky/cholmod_rowfac.c>
23
733
package com.douban.rexxar.view; import android.annotation.TargetApi; import android.app.Activity; import android.content.Context; import android.os.Build; import android.os.Handler; import android.os.Looper; import android.text.TextUtils; import android.util.AttributeSet; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.webkit.MimeTypeMap; import android.webkit.WebResourceRequest; import android.webkit.WebResourceResponse; import android.webkit.WebView; import android.widget.FrameLayout; import android.widget.ProgressBar; import android.widget.Toast; import com.douban.rexxar.Constants; import com.douban.rexxar.R; import com.douban.rexxar.resourceproxy.network.RexxarContainerAPI; import com.douban.rexxar.utils.AppContext; import com.douban.rexxar.utils.BusProvider; import com.douban.rexxar.utils.MimeUtils; import com.douban.rexxar.utils.RxLoadError; import com.douban.rexxar.utils.io.stream.ClosedInputStream; import java.lang.ref.WeakReference; import java.util.Map; /** * pull-to-refresh * error view * * Created by luanqian on 16/4/7. */ public class RexxarWebView extends FrameLayout implements RexxarWebViewCore.UriLoadCallback, RexxarWebViewCore.WebViewHeightCallback, RexxarWebViewCore.ReloadDelegate{ public static final String TAG = "RexxarWebView"; /** * Classes that wish to be notified when the swipe gesture correctly * triggers a refresh should implement this interface. */ public interface OnRefreshListener { void onRefresh(); } private SwipeRefreshLayout mSwipeRefreshLayout; private RexxarWebViewCore mCore; private RexxarErrorView mErrorView; private ProgressBar mProgressBar; private String mUri; private boolean mUsePage; private WeakReference<RexxarWebViewCore.UriLoadCallback> mUriLoadCallback = new WeakReference<RexxarWebViewCore.UriLoadCallback>(null); // 加载时间 private long mStartLoadTime; private boolean mEnablePageAutoPageVisible = true; public RexxarWebView(Context context) { super(context); init(); } public RexxarWebView(Context context, AttributeSet attrs) { super(context, attrs); init(); } public RexxarWebView(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); init(); } private void init() { LayoutInflater.from(getContext()).inflate(R.layout.view_rexxar_webview, this, true); mSwipeRefreshLayout = (SwipeRefreshLayout) findViewById(R.id.swipe_refresh_layout); try { mCore = new RexxarWebViewCore(getContext()); mCore.addWebViewHeightCallback(this); mCore.setReloadDelegate(this); mSwipeRefreshLayout.addView(mCore, new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT)); } catch (Exception e) { e.printStackTrace(); // WebView missing, toast & finish activity Toast.makeText(AppContext.getInstance(), R.string.webview_missing, Toast.LENGTH_SHORT).show(); if (null != getContext() && getContext() instanceof Activity) { ((Activity) getContext()).finish(); return; } } mErrorView = (RexxarErrorView) findViewById(R.id.rexxar_error_view); mProgressBar = (ProgressBar) findViewById(R.id.progress_bar); BusProvider.getInstance().register(this); } RexxarWebViewCore.WebViewHeightCallback mCallback; public void addWebViewHeightCallback(RexxarWebViewCore.WebViewHeightCallback callback) { if (null != callback) { mCallback = callback; } } @Override public void onHeightChange(int height) { // 优先用callback if (null != mCallback) { mCallback.onHeightChange(height); } // mSwipeRefreshLayout ViewGroup.LayoutParams layoutParams = mSwipeRefreshLayout.getLayoutParams(); if (null == layoutParams) { layoutParams = new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, height); } else { layoutParams.height = height; } mSwipeRefreshLayout.setLayoutParams(layoutParams); } /** * 设置下拉刷新监听 * @param listener */ public void setOnRefreshListener(final OnRefreshListener listener) { if (null != listener) { mSwipeRefreshLayout.setOnRefreshListener(new android.support.v4.widget.SwipeRefreshLayout.OnRefreshListener() { @Override public void onRefresh() { listener.onRefresh(); } }); } } /** * 下拉刷新颜色 * * @param color */ public void setRefreshMainColor(int color) { if (color > 0) { mSwipeRefreshLayout.setMainColor(color); } } /** * 启用/禁用SwipeRefreshLayout * @param enable */ public void enableSwipeRefreshLayoutNestesScroll(boolean enable) { if (null != mSwipeRefreshLayout) { mSwipeRefreshLayout.setNestedScrollingEnabled(enable); } } /** * 启用/禁用 下拉刷新手势 * * @param enable */ public void enableRefresh(boolean enable) { mSwipeRefreshLayout.setEnabled(enable); } /** * 设置刷新 * @param refreshing */ public void setRefreshing(boolean refreshing) { mSwipeRefreshLayout.setRefreshing(refreshing); } public WebView getWebView() { return mCore; } /***************************设置RexxarWebViewCore的一些方法代理****************************/ public void setWebViewClient(RexxarWebViewClient client) { if (null != mCore) { mCore.setWebViewClient(client); } } public void enableExpandContentHeight(boolean enable) { if (null != mCore) { mCore.enableExpandContentHeight(enable); } } public void setWebviewCallback(RexxarWebViewCore.WebCallbacks callback) { if (null != mCore) { mCore.setWebviewCallback(callback); } } public int getWebContentHeight() { if (null != mCore) { return mCore.getWebViewContentHeight(); } return 0; } public void setWebViewScrollListener(RexxarWebViewCore.WebViewScrollListener scrollListener) { if (null != mCore) { mCore.setWebViewScrollListener(scrollListener); } } /** * 启用/禁用 嵌套滑动 */ public void enableNestedScroll(boolean enable) { mCore.enableNestedScroll(enable); } public void setWebChromeClient(RexxarWebChromeClient client) { if (null != mCore) { mCore.setWebChromeClient(client); } } public void loadUri(String uri) { if (null != mCore) { this.mUri = uri; this.mUsePage = true; mCore.loadUri(uri,this); mStartLoadTime = System.currentTimeMillis() / 1000; } } public void loadUri(String uri, final RexxarWebViewCore.UriLoadCallback callback) { if (null != mCore) { this.mUri = uri; this.mUsePage = true; if (null != callback) { this.mUriLoadCallback = new WeakReference<RexxarWebViewCore.UriLoadCallback>(callback); } mCore.loadUri(uri, this); mStartLoadTime = System.currentTimeMillis() / 1000; } } public void loadPartialUri(String uri) { if (null != mCore) { mCore.loadPartialUri(uri); this.mUri = uri; this.mUsePage = false; mStartLoadTime = System.currentTimeMillis() / 1000; } } public void loadPartialUri(String uri, final RexxarWebViewCore.UriLoadCallback callback) { if (null != mCore) { this.mUri = uri; this.mUsePage = false; if (null != callback) { this.mUriLoadCallback = new WeakReference<RexxarWebViewCore.UriLoadCallback>(callback); } mCore.loadPartialUri(uri, this); mStartLoadTime = System.currentTimeMillis() / 1000; } } @Override public boolean onStartLoad() { post(new Runnable() { @Override public void run() { if (null == mUriLoadCallback.get() || !mUriLoadCallback.get().onStartLoad()) { mProgressBar.setVisibility(View.VISIBLE); } } }); return true; } @Override public boolean onStartDownloadHtml() { post(new Runnable() { @Override public void run() { if (null == mUriLoadCallback.get() || !mUriLoadCallback.get().onStartDownloadHtml()) { mProgressBar.setVisibility(View.VISIBLE); } } }); return true; } @Override public boolean onSuccess() { post(new Runnable() { @Override public void run() { if (null == mUriLoadCallback.get() || !mUriLoadCallback.get().onSuccess()) { mProgressBar.setVisibility(View.GONE); } } }); return true; } @Override public boolean onFail(final RxLoadError error) { post(new Runnable() { @Override public void run() { if (null == mUriLoadCallback.get() || !mUriLoadCallback.get().onFail(error)) { mProgressBar.setVisibility(View.GONE); mErrorView.show(error.message); } } }); return true; } public void destroy() { if (null != mCore) { // 调用生命周期函数 onPageDestroy(); setWebViewClient(new NullWebViewClient()); // 页面加载时间超过4s之后才可以直接销毁 if (System.currentTimeMillis() / 1000 - mStartLoadTime > 4) { destroyWebViewCore(); } else { new Handler(Looper.getMainLooper()).postDelayed(new Runnable() { @Override public void run() { destroyWebViewCore(); } }, 3000); } } } private void destroyWebViewCore() { try { if (null != mCore) { mSwipeRefreshLayout.removeView(mCore); mCore.loadUrl("about:blank"); mCore.stopLoading(); // 退出时调用此方法,移除绑定的服务,否则某些特定系统会报错 mCore.getSettings().setJavaScriptEnabled(false); mCore.clearHistory(); mCore.clearView(); mCore.removeAllViews(); mCore.destroy(); } } catch (Throwable ex) { } mCore = null; } public void loadUrl(String url) { if (null != mCore) { mCore.loadUrl(url); mStartLoadTime = System.currentTimeMillis() / 1000; } } public void loadData(String data, String mimeType, String encoding) { if (null != mCore) { mCore.loadData(data, mimeType, encoding); mStartLoadTime = System.currentTimeMillis() / 1000; } } public void loadUrl(String url, Map<String, String> additionalHttpHeaders) { if (null != mCore) { mCore.loadUrl(url, additionalHttpHeaders); mStartLoadTime = System.currentTimeMillis() / 1000; } } public void loadDataWithBaseURL(String baseUrl, String data, String mimeType, String encoding, String historyUrl) { if (null != mCore) { mCore.loadDataWithBaseURL(baseUrl, data, mimeType, encoding, historyUrl); mStartLoadTime = System.currentTimeMillis() / 1000; } } public void onPause() { if (null != mCore) { mCore.onPause(); } } public void onResume() { if (null != mCore) { mCore.onResume(); } } @Override protected void onWindowVisibilityChanged(int visibility) { super.onWindowVisibilityChanged(visibility); if (null != mCore) { if (mEnablePageAutoPageVisible) { if (visibility == View.VISIBLE) { onPageVisible(); } else { onPageInvisible(); } } } } public void disableAutoPageVisible() { mEnablePageAutoPageVisible = false; } /** * 自定义url拦截处理 * * @param widget */ public void addRexxarWidget(RexxarWidget widget) { if (null == widget) { return; } if (null != mCore) { mCore.addRexxarWidget(widget); } } /** * 自定义container api * * @param containerAPI */ public void addContainerApi(RexxarContainerAPI containerAPI) { if (null != containerAPI && null != mCore) { mCore.addContainerApi(containerAPI); } } public void onPageVisible() { callFunction("Rexxar.Lifecycle.onPageVisible"); } public void onPageInvisible() { callFunction("Rexxar.Lifecycle.onPageInvisible"); } public void onPageDestroy() { callFunction("Rexxar.Lifecycle.onPageDestroy"); } @Override protected void onDetachedFromWindow() { BusProvider.getInstance().unregister(this); super.onDetachedFromWindow(); } public void onEventMainThread(BusProvider.BusEvent event) { if (event.eventId == Constants.EVENT_REXXAR_RETRY) { mErrorView.setVisibility(View.GONE); reload(); } else if (event.eventId == Constants.EVENT_REXXAR_NETWORK_ERROR) { boolean handled = false; RxLoadError error = RxLoadError.UNKNOWN; if (null != event.data) { error = event.data.getParcelable(Constants.KEY_ERROR); } if (null != mUriLoadCallback && null != mUriLoadCallback.get()) { handled = mUriLoadCallback.get().onFail(error); } if (!handled) { mProgressBar.setVisibility(View.GONE); mErrorView.show(error.message); } } } /** * 重新加载页面 */ public void reload() { if (null != mCore) { if (mUsePage) { mCore.loadUri(mUri, this); } else { mCore.loadPartialUri(mUri, this); } } } @Override public void onReload() { reload(); } /** * Native调用js方法, 传递参数 * * @param functionName 方法名 */ public void callFunction(String functionName) { callFunction(functionName, null); } /** * Native调用js方法, 传递参数 * * @param functionName 方法名 * @param jsonString 参数,需要是json格式 */ public void callFunction(String functionName, String jsonString) { if (TextUtils.isEmpty(functionName)) { return; } if (null == mCore) { return; } if (TextUtils.isEmpty(jsonString)) { mCore.loadUrl(String.format(Constants.FUNC_FORMAT, functionName)); } else { jsonString = jsonString.replaceAll("(\\\\)([^utrn])", "\\\\\\\\$1$2"); jsonString = jsonString.replaceAll("(\\\\)([utrn])", "\\\\$1$2"); jsonString = jsonString.replaceAll("(?<=[^\\\\])(\")", "\\\\\""); jsonString = jsonString.replaceAll("(?<=[^\\\\])(\')", "\\\\\'"); String command = String.format(Constants.FUNC_FORMAT_WITH_PARAMETERS, functionName, jsonString); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) { try { mCore.evaluateJavascript(command, null); } catch (Exception e) { mCore.loadUrl(String.format(Constants.FUNC_FORMAT_WITH_PARAMETERS, functionName, jsonString)); } } else { mCore.loadUrl(String.format(Constants.FUNC_FORMAT_WITH_PARAMETERS, functionName, jsonString)); } } } /** * 存在的原因 * 因为我们通过shouldInterceptRequest来实现拦截,经测试发现快速打开rexxar页面再退出,连续5次左右会出现rexxar页无法打开的情况; * 而原生的webview不存在这个问题,经过定位发现如果不覆写shouldInterceptRequest这个方法,就不会出现这个问题。 * * 清除WebViewClient是在WebView的destroy方法实现的,所以rexxar的webview必须尽快调用destory方法。 * * 但因为退出时要调用js方法,稍微延迟destory,所以通过主动设置一个没有实现shouldInterceptRequest的RexxarWebViewClient来避免能上面的问题。 */ private static class NullWebViewClient extends RexxarWebViewClient{ @Override public boolean shouldOverrideUrlLoading(WebView view, String url) { return true; } @Override public boolean shouldOverrideUrlLoading(WebView view, WebResourceRequest request) { return true; } @TargetApi(21) @Override public WebResourceResponse shouldInterceptRequest(WebView view, WebResourceRequest request) { String fileExtension = MimeTypeMap.getFileExtensionFromUrl(request.getUrl().toString()); String mimeType = MimeUtils.guessMimeTypeFromExtension(fileExtension); return new WebResourceResponse(mimeType, "UTF-8", new ClosedInputStream()); } @Override public WebResourceResponse shouldInterceptRequest(WebView view, String url) { String fileExtension = MimeTypeMap.getFileExtensionFromUrl(url); String mimeType = MimeUtils.guessMimeTypeFromExtension(fileExtension); return new WebResourceResponse(mimeType, "UTF-8", new ClosedInputStream()); } } }
8,830
3,562
<reponame>kaiker19/incubator-doris // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.doris.flink.cfg; import java.io.Serializable; import java.util.Properties; /** * Options for the Doris stream connector. */ public class DorisStreamOptions implements Serializable { private static final long serialVersionUID = 1L; private Properties prop; private DorisOptions options; private DorisReadOptions readOptions; public DorisStreamOptions(Properties prop) { this.prop = prop; init(); } /** * convert DorisStreamOptions to DorisOptions and DorisReadOptions */ private void init() { DorisOptions.Builder optionsBuilder = DorisOptions.builder() .setFenodes(prop.getProperty(ConfigurationOptions.DORIS_FENODES)) .setUsername(prop.getProperty(ConfigurationOptions.DORIS_USER)) .setPassword(prop.getProperty(ConfigurationOptions.DORIS_PASSWORD)) .setTableIdentifier(prop.getProperty(ConfigurationOptions.TABLE_IDENTIFIER)); DorisReadOptions.Builder readOptionsBuilder = DorisReadOptions.builder() .setDeserializeArrowAsync(Boolean.valueOf(prop.getProperty(ConfigurationOptions.DORIS_DESERIALIZE_ARROW_ASYNC,ConfigurationOptions.DORIS_DESERIALIZE_ARROW_ASYNC_DEFAULT.toString()))) .setDeserializeQueueSize(Integer.valueOf(prop.getProperty(ConfigurationOptions.DORIS_DESERIALIZE_QUEUE_SIZE,ConfigurationOptions.DORIS_DESERIALIZE_QUEUE_SIZE_DEFAULT.toString()))) .setExecMemLimit(Long.valueOf(prop.getProperty(ConfigurationOptions.DORIS_EXEC_MEM_LIMIT,ConfigurationOptions.DORIS_EXEC_MEM_LIMIT_DEFAULT.toString()))) .setFilterQuery(prop.getProperty(ConfigurationOptions.DORIS_FILTER_QUERY)) .setReadFields(prop.getProperty(ConfigurationOptions.DORIS_READ_FIELD)) .setRequestQueryTimeoutS(Integer.valueOf(prop.getProperty(ConfigurationOptions.DORIS_REQUEST_QUERY_TIMEOUT_S,ConfigurationOptions.DORIS_REQUEST_QUERY_TIMEOUT_S_DEFAULT.toString()))) .setRequestBatchSize(Integer.valueOf(prop.getProperty(ConfigurationOptions.DORIS_BATCH_SIZE,ConfigurationOptions.DORIS_BATCH_SIZE_DEFAULT.toString()))) .setRequestConnectTimeoutMs(Integer.valueOf(prop.getProperty(ConfigurationOptions.DORIS_REQUEST_CONNECT_TIMEOUT_MS,ConfigurationOptions.DORIS_REQUEST_CONNECT_TIMEOUT_MS_DEFAULT.toString()))) .setRequestReadTimeoutMs(Integer.valueOf(prop.getProperty(ConfigurationOptions.DORIS_REQUEST_READ_TIMEOUT_MS,ConfigurationOptions.DORIS_REQUEST_READ_TIMEOUT_MS_DEFAULT.toString()))) .setRequestRetries(Integer.valueOf(prop.getProperty(ConfigurationOptions.DORIS_REQUEST_RETRIES,ConfigurationOptions.DORIS_REQUEST_RETRIES_DEFAULT.toString()))) .setRequestTabletSize(Integer.valueOf(prop.getProperty(ConfigurationOptions.DORIS_TABLET_SIZE,ConfigurationOptions.DORIS_TABLET_SIZE_DEFAULT.toString()))); this.options = optionsBuilder.build(); this.readOptions = readOptionsBuilder.build(); } public DorisOptions getOptions() { return options; } public DorisReadOptions getReadOptions() { return readOptions; } }
1,421
480
/* * Copyright [2013-2021], Alibaba Group Holding Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.polardbx.gms.topology; import com.alibaba.polardbx.common.utils.TStringUtil; import lombok.Data; import lombok.EqualsAndHashCode; import java.sql.ResultSet; import java.sql.SQLException; /** * @author chenghui.lch */ @EqualsAndHashCode(callSuper = true) @Data public class GroupDetailInfoExRecord extends GroupDetailInfoRecord implements Comparable<GroupDetailInfoExRecord> { public String phyDbName; public GroupDetailInfoExRecord() { } public GroupDetailInfoExRecord(String groupName, String storageInst) { this.groupName = groupName; this.storageInstId = storageInst; } @Override public GroupDetailInfoExRecord fill(ResultSet rs) throws SQLException { this.storageInstId = rs.getString("storage_inst_id"); this.dbName = rs.getString("db_name"); this.groupName = rs.getString("group_name"); this.phyDbName = rs.getString("phy_db_name"); return this; } @Override public int compareTo(GroupDetailInfoExRecord o) { int res = TStringUtil.compareTo(this.dbName, o.dbName); if (res != 0) { return res; } res = TStringUtil.compareTo(this.groupName, o.groupName); return res; } }
667
1,939
<reponame>Kinway050/bk-ci /* * Tencent is pleased to support the open source community by making BlueKing available. * Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. * Licensed under the MIT License (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * http://opensource.org/licenses/MIT * Unless required by applicable law or agreed to in writing, software distributed under * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.tencent.bk.codecc.defect.dao.mongotemplate; import com.tencent.bk.codecc.defect.model.CodeFileUrlEntity; import org.apache.commons.collections.CollectionUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.mongodb.core.BulkOperations; import org.springframework.data.mongodb.core.MongoTemplate; import org.springframework.data.mongodb.core.query.Criteria; import org.springframework.data.mongodb.core.query.Query; import org.springframework.data.mongodb.core.query.Update; import org.springframework.stereotype.Repository; import java.util.List; /** * 代码仓库url的持久化 * * @date 2019/10/25 * @version V1.0 */ @Repository public class CodeFileUrlDao { @Autowired private MongoTemplate mongoTemplate; public void upsert(long taskId, List<CodeFileUrlEntity> codeFileUrlEntityList) { BulkOperations ops = mongoTemplate.bulkOps(BulkOperations.BulkMode.UNORDERED, CodeFileUrlEntity.class); if (CollectionUtils.isNotEmpty(codeFileUrlEntityList)) { for (CodeFileUrlEntity entity : codeFileUrlEntityList) { Query query = new Query(); query.addCriteria(Criteria.where("task_id").is(taskId) .and("file_path").is(entity.getFile())); Update update = new Update(); update.set("task_id", taskId) .set("file_path", entity.getFile()) .set("url", entity.getUrl()) .set("version", entity.getVersion()) .set("scm_type", entity.getScmType()); ops.upsert(query, update); } ops.execute(); } } }
964
902
package com.megster.cordova; import android.Manifest; import android.content.pm.PackageManager; import android.app.Activity; import android.bluetooth.BluetoothAdapter; import android.bluetooth.BluetoothDevice; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.os.Handler; import android.os.Message; import android.provider.Settings; import android.util.Log; import org.apache.cordova.CordovaArgs; import org.apache.cordova.CordovaPlugin; import org.apache.cordova.CallbackContext; import org.apache.cordova.PluginResult; import org.apache.cordova.LOG; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.util.Set; /** * PhoneGap Plugin for Serial Communication over Bluetooth */ public class BluetoothSerial extends CordovaPlugin { // actions private static final String LIST = "list"; private static final String CONNECT = "connect"; private static final String CONNECT_INSECURE = "connectInsecure"; private static final String DISCONNECT = "disconnect"; private static final String WRITE = "write"; private static final String AVAILABLE = "available"; private static final String READ = "read"; private static final String READ_UNTIL = "readUntil"; private static final String SUBSCRIBE = "subscribe"; private static final String UNSUBSCRIBE = "unsubscribe"; private static final String SUBSCRIBE_RAW = "subscribeRaw"; private static final String UNSUBSCRIBE_RAW = "unsubscribeRaw"; private static final String IS_ENABLED = "isEnabled"; private static final String IS_CONNECTED = "isConnected"; private static final String CLEAR = "clear"; private static final String SETTINGS = "showBluetoothSettings"; private static final String ENABLE = "enable"; private static final String DISCOVER_UNPAIRED = "discoverUnpaired"; private static final String SET_DEVICE_DISCOVERED_LISTENER = "setDeviceDiscoveredListener"; private static final String CLEAR_DEVICE_DISCOVERED_LISTENER = "clearDeviceDiscoveredListener"; private static final String SET_NAME = "setName"; private static final String SET_DISCOVERABLE = "setDiscoverable"; // callbacks private CallbackContext connectCallback; private CallbackContext dataAvailableCallback; private CallbackContext rawDataAvailableCallback; private CallbackContext enableBluetoothCallback; private CallbackContext deviceDiscoveredCallback; private BluetoothAdapter bluetoothAdapter; private BluetoothSerialService bluetoothSerialService; // Debugging private static final String TAG = "BluetoothSerial"; private static final boolean D = true; // Message types sent from the BluetoothSerialService Handler public static final int MESSAGE_STATE_CHANGE = 1; public static final int MESSAGE_READ = 2; public static final int MESSAGE_WRITE = 3; public static final int MESSAGE_DEVICE_NAME = 4; public static final int MESSAGE_TOAST = 5; public static final int MESSAGE_READ_RAW = 6; // Key names received from the BluetoothChatService Handler public static final String DEVICE_NAME = "device_name"; public static final String TOAST = "toast"; StringBuffer buffer = new StringBuffer(); private String delimiter; private static final int REQUEST_ENABLE_BLUETOOTH = 1; // Android 23 requires user to explicitly grant permission for location to discover unpaired private static final String ACCESS_COARSE_LOCATION = Manifest.permission.ACCESS_COARSE_LOCATION; private static final int CHECK_PERMISSIONS_REQ_CODE = 2; private CallbackContext permissionCallback; @Override public boolean execute(String action, CordovaArgs args, CallbackContext callbackContext) throws JSONException { LOG.d(TAG, "action = " + action); if (bluetoothAdapter == null) { bluetoothAdapter = BluetoothAdapter.getDefaultAdapter(); } if (bluetoothSerialService == null) { bluetoothSerialService = new BluetoothSerialService(mHandler); } boolean validAction = true; if (action.equals(LIST)) { listBondedDevices(callbackContext); } else if (action.equals(CONNECT)) { boolean secure = true; connect(args, secure, callbackContext); } else if (action.equals(CONNECT_INSECURE)) { // see Android docs about Insecure RFCOMM http://goo.gl/1mFjZY boolean secure = false; connect(args, secure, callbackContext); } else if (action.equals(DISCONNECT)) { connectCallback = null; bluetoothSerialService.stop(); callbackContext.success(); } else if (action.equals(WRITE)) { byte[] data = args.getArrayBuffer(0); bluetoothSerialService.write(data); callbackContext.success(); } else if (action.equals(AVAILABLE)) { callbackContext.success(available()); } else if (action.equals(READ)) { callbackContext.success(read()); } else if (action.equals(READ_UNTIL)) { String interesting = args.getString(0); callbackContext.success(readUntil(interesting)); } else if (action.equals(SUBSCRIBE)) { delimiter = args.getString(0); dataAvailableCallback = callbackContext; PluginResult result = new PluginResult(PluginResult.Status.NO_RESULT); result.setKeepCallback(true); callbackContext.sendPluginResult(result); } else if (action.equals(UNSUBSCRIBE)) { delimiter = null; // send no result, so Cordova won't hold onto the data available callback anymore PluginResult result = new PluginResult(PluginResult.Status.NO_RESULT); dataAvailableCallback.sendPluginResult(result); dataAvailableCallback = null; callbackContext.success(); } else if (action.equals(SUBSCRIBE_RAW)) { rawDataAvailableCallback = callbackContext; PluginResult result = new PluginResult(PluginResult.Status.NO_RESULT); result.setKeepCallback(true); callbackContext.sendPluginResult(result); } else if (action.equals(UNSUBSCRIBE_RAW)) { rawDataAvailableCallback = null; callbackContext.success(); } else if (action.equals(IS_ENABLED)) { if (bluetoothAdapter.isEnabled()) { callbackContext.success(); } else { callbackContext.error("Bluetooth is disabled."); } } else if (action.equals(IS_CONNECTED)) { if (bluetoothSerialService.getState() == BluetoothSerialService.STATE_CONNECTED) { callbackContext.success(); } else { callbackContext.error("Not connected."); } } else if (action.equals(CLEAR)) { buffer.setLength(0); callbackContext.success(); } else if (action.equals(SETTINGS)) { Intent intent = new Intent(Settings.ACTION_BLUETOOTH_SETTINGS); cordova.getActivity().startActivity(intent); callbackContext.success(); } else if (action.equals(ENABLE)) { enableBluetoothCallback = callbackContext; Intent intent = new Intent(BluetoothAdapter.ACTION_REQUEST_ENABLE); cordova.startActivityForResult(this, intent, REQUEST_ENABLE_BLUETOOTH); } else if (action.equals(DISCOVER_UNPAIRED)) { if (cordova.hasPermission(ACCESS_COARSE_LOCATION)) { discoverUnpairedDevices(callbackContext); } else { permissionCallback = callbackContext; cordova.requestPermission(this, CHECK_PERMISSIONS_REQ_CODE, ACCESS_COARSE_LOCATION); } } else if (action.equals(SET_DEVICE_DISCOVERED_LISTENER)) { this.deviceDiscoveredCallback = callbackContext; } else if (action.equals(CLEAR_DEVICE_DISCOVERED_LISTENER)) { this.deviceDiscoveredCallback = null; } else if (action.equals(SET_NAME)) { String newName = args.getString(0); bluetoothAdapter.setName(newName); callbackContext.success(); } else if (action.equals(SET_DISCOVERABLE)) { int discoverableDuration = args.getInt(0); Intent discoverIntent = new Intent(BluetoothAdapter.ACTION_REQUEST_DISCOVERABLE); discoverIntent.putExtra(BluetoothAdapter.EXTRA_DISCOVERABLE_DURATION, discoverableDuration); cordova.getActivity().startActivity(discoverIntent); } else { validAction = false; } return validAction; } @Override public void onActivityResult(int requestCode, int resultCode, Intent data) { if (requestCode == REQUEST_ENABLE_BLUETOOTH) { if (resultCode == Activity.RESULT_OK) { Log.d(TAG, "User enabled Bluetooth"); if (enableBluetoothCallback != null) { enableBluetoothCallback.success(); } } else { Log.d(TAG, "User did *NOT* enable Bluetooth"); if (enableBluetoothCallback != null) { enableBluetoothCallback.error("User did not enable Bluetooth"); } } enableBluetoothCallback = null; } } @Override public void onDestroy() { super.onDestroy(); if (bluetoothSerialService != null) { bluetoothSerialService.stop(); } } private void listBondedDevices(CallbackContext callbackContext) throws JSONException { JSONArray deviceList = new JSONArray(); Set<BluetoothDevice> bondedDevices = bluetoothAdapter.getBondedDevices(); for (BluetoothDevice device : bondedDevices) { deviceList.put(deviceToJSON(device)); } callbackContext.success(deviceList); } private void discoverUnpairedDevices(final CallbackContext callbackContext) throws JSONException { final CallbackContext ddc = deviceDiscoveredCallback; final BroadcastReceiver discoverReceiver = new BroadcastReceiver() { private JSONArray unpairedDevices = new JSONArray(); public void onReceive(Context context, Intent intent) { String action = intent.getAction(); if (BluetoothDevice.ACTION_FOUND.equals(action)) { BluetoothDevice device = intent.getParcelableExtra(BluetoothDevice.EXTRA_DEVICE); try { JSONObject o = deviceToJSON(device); unpairedDevices.put(o); if (ddc != null) { PluginResult res = new PluginResult(PluginResult.Status.OK, o); res.setKeepCallback(true); ddc.sendPluginResult(res); } } catch (JSONException e) { // This shouldn't happen, log and ignore Log.e(TAG, "Problem converting device to JSON", e); } } else if (BluetoothAdapter.ACTION_DISCOVERY_FINISHED.equals(action)) { callbackContext.success(unpairedDevices); cordova.getActivity().unregisterReceiver(this); } } }; Activity activity = cordova.getActivity(); activity.registerReceiver(discoverReceiver, new IntentFilter(BluetoothDevice.ACTION_FOUND)); activity.registerReceiver(discoverReceiver, new IntentFilter(BluetoothAdapter.ACTION_DISCOVERY_FINISHED)); bluetoothAdapter.startDiscovery(); } private JSONObject deviceToJSON(BluetoothDevice device) throws JSONException { JSONObject json = new JSONObject(); json.put("name", device.getName()); json.put("address", device.getAddress()); json.put("id", device.getAddress()); if (device.getBluetoothClass() != null) { json.put("class", device.getBluetoothClass().getDeviceClass()); } return json; } private void connect(CordovaArgs args, boolean secure, CallbackContext callbackContext) throws JSONException { String macAddress = args.getString(0); BluetoothDevice device = bluetoothAdapter.getRemoteDevice(macAddress); if (device != null) { connectCallback = callbackContext; bluetoothSerialService.connect(device, secure); buffer.setLength(0); PluginResult result = new PluginResult(PluginResult.Status.NO_RESULT); result.setKeepCallback(true); callbackContext.sendPluginResult(result); } else { callbackContext.error("Could not connect to " + macAddress); } } // The Handler that gets information back from the BluetoothSerialService // Original code used handler for the because it was talking to the UI. // Consider replacing with normal callbacks private final Handler mHandler = new Handler() { public void handleMessage(Message msg) { switch (msg.what) { case MESSAGE_READ: buffer.append((String)msg.obj); if (dataAvailableCallback != null) { sendDataToSubscriber(); } break; case MESSAGE_READ_RAW: if (rawDataAvailableCallback != null) { byte[] bytes = (byte[]) msg.obj; sendRawDataToSubscriber(bytes); } break; case MESSAGE_STATE_CHANGE: if(D) Log.i(TAG, "MESSAGE_STATE_CHANGE: " + msg.arg1); switch (msg.arg1) { case BluetoothSerialService.STATE_CONNECTED: Log.i(TAG, "BluetoothSerialService.STATE_CONNECTED"); notifyConnectionSuccess(); break; case BluetoothSerialService.STATE_CONNECTING: Log.i(TAG, "BluetoothSerialService.STATE_CONNECTING"); break; case BluetoothSerialService.STATE_LISTEN: Log.i(TAG, "BluetoothSerialService.STATE_LISTEN"); break; case BluetoothSerialService.STATE_NONE: Log.i(TAG, "BluetoothSerialService.STATE_NONE"); break; } break; case MESSAGE_WRITE: // byte[] writeBuf = (byte[]) msg.obj; // String writeMessage = new String(writeBuf); // Log.i(TAG, "Wrote: " + writeMessage); break; case MESSAGE_DEVICE_NAME: Log.i(TAG, msg.getData().getString(DEVICE_NAME)); break; case MESSAGE_TOAST: String message = msg.getData().getString(TOAST); notifyConnectionLost(message); break; } } }; private void notifyConnectionLost(String error) { if (connectCallback != null) { connectCallback.error(error); connectCallback = null; } } private void notifyConnectionSuccess() { if (connectCallback != null) { PluginResult result = new PluginResult(PluginResult.Status.OK); result.setKeepCallback(true); connectCallback.sendPluginResult(result); } } private void sendRawDataToSubscriber(byte[] data) { if (data != null && data.length > 0) { PluginResult result = new PluginResult(PluginResult.Status.OK, data); result.setKeepCallback(true); rawDataAvailableCallback.sendPluginResult(result); } } private void sendDataToSubscriber() { String data = readUntil(delimiter); if (data != null && data.length() > 0) { PluginResult result = new PluginResult(PluginResult.Status.OK, data); result.setKeepCallback(true); dataAvailableCallback.sendPluginResult(result); sendDataToSubscriber(); } } private int available() { return buffer.length(); } private String read() { int length = buffer.length(); String data = buffer.substring(0, length); buffer.delete(0, length); return data; } private String readUntil(String c) { String data = ""; int index = buffer.indexOf(c, 0); if (index > -1) { data = buffer.substring(0, index + c.length()); buffer.delete(0, index + c.length()); } return data; } @Override public void onRequestPermissionResult(int requestCode, String[] permissions, int[] grantResults) throws JSONException { for(int result:grantResults) { if(result == PackageManager.PERMISSION_DENIED) { LOG.d(TAG, "User *rejected* location permission"); this.permissionCallback.sendPluginResult(new PluginResult( PluginResult.Status.ERROR, "Location permission is required to discover unpaired devices.") ); return; } } switch(requestCode) { case CHECK_PERMISSIONS_REQ_CODE: LOG.d(TAG, "User granted location permission"); discoverUnpairedDevices(permissionCallback); break; } } }
7,771
410
<gh_stars>100-1000 package com.nepxion.aquarius.idgenerator.local.impl; /** * <p>Title: Nepxion Aquarius</p> * <p>Description: Nepxion Aquarius</p> * <p>Copyright: Copyright (c) 2017-2050</p> * <p>Company: Nepxion</p> * @author <NAME> * @version 1.0 */ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Value; import com.nepxion.aquarius.common.constant.AquariusConstant; import com.nepxion.aquarius.common.util.DateUtil; import com.nepxion.aquarius.common.util.StringUtil; import com.nepxion.aquarius.idgenerator.local.LocalIdGenerator; public class LocalIdGeneratorImpl implements LocalIdGenerator { private static final Logger LOG = LoggerFactory.getLogger(LocalIdGeneratorImpl.class); private static final String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss:SSS"; private static final long DEFAULT_START_TIMESTAMP = 1483200000000L; // 2017-01-01 00:00:00:000 private volatile Map<String, SnowflakeIdGenerator> idGeneratorMap = new ConcurrentHashMap<String, SnowflakeIdGenerator>(); @Value("${" + AquariusConstant.FREQUENT_LOG_PRINT + ":false}") private Boolean frequentLogPrint; @Override public String nextUniqueId(long dataCenterId, long machineId) throws Exception { return nextUniqueId(DEFAULT_START_TIMESTAMP, dataCenterId, machineId); } @Override public String nextUniqueId(String startTimestamp, long dataCenterId, long machineId) throws Exception { return nextUniqueId(DateUtil.parseDate(startTimestamp, DATE_FORMAT).getTime(), dataCenterId, machineId); } @Override public String nextUniqueId(long startTimestamp, long dataCenterId, long machineId) throws Exception { String nextUniqueId = getIdGenerator(startTimestamp, dataCenterId, machineId).nextId(); if (frequentLogPrint) { LOG.info("Next unique id is {} for startTimestamp={}, dataCenterId={}, machineId={}", nextUniqueId, startTimestamp, dataCenterId, machineId); } return nextUniqueId; } @Override public String[] nextUniqueIds(long dataCenterId, long machineId, int count) throws Exception { return nextUniqueIds(DEFAULT_START_TIMESTAMP, dataCenterId, machineId, count); } @Override public String[] nextUniqueIds(String startTimestamp, long dataCenterId, long machineId, int count) throws Exception { return nextUniqueIds(DateUtil.parseDate(startTimestamp, DATE_FORMAT).getTime(), dataCenterId, machineId, count); } @Override public String[] nextUniqueIds(long startTimestamp, long dataCenterId, long machineId, int count) throws Exception { String[] nextUniqueIds = getIdGenerator(startTimestamp, dataCenterId, machineId).nextIds(count); if (frequentLogPrint) { LOG.info("Next unique ids is {} for startTimestamp={}, dataCenterId={}, machineId={}, count={}", StringUtil.convert(nextUniqueIds), startTimestamp, dataCenterId, machineId, count); } return nextUniqueIds; } private SnowflakeIdGenerator getIdGenerator(long startTimestamp, long dataCenterId, long machineId) { String key = dataCenterId + "-" + machineId; SnowflakeIdGenerator idGenerator = idGeneratorMap.get(key); if (idGenerator == null) { SnowflakeIdGenerator newIdGnerator = new SnowflakeIdGenerator(startTimestamp, dataCenterId, machineId); idGenerator = idGeneratorMap.putIfAbsent(key, newIdGnerator); if (idGenerator == null) { idGenerator = newIdGnerator; } } return idGenerator; } }
1,350
454
<filename>vertx-gaia/vertx-up/src/main/java/io/vertx/up/commune/envelop/Rib.java package io.vertx.up.commune.envelop; import io.vertx.core.buffer.Buffer; import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; import io.vertx.up.eon.Constants; import io.vertx.up.exception.WebException; public class Rib { public static <T> JsonObject input(final T data) { return RibTool.input(data); } public static WebException normalize(final WebException error) { return RibTool.normalize(error); } public static <T> T deserialize(final Object value, final Class<?> clazz) { return RibTool.deserialize(value, clazz); } public static JsonObject outJson(final JsonObject data, final WebException error) { return RibTool.outJson(data, error); } public static Buffer outBuffer(final JsonObject data, final WebException error) { return RibTool.outBuffer(data, error); } public static JsonObject getBody(final JsonObject data) { return RibData.getBody(data); } public static <T> T get(final JsonObject data) { return RibData.get(data); } public static <T> T get(final JsonObject data, final Class<?> clazz) { return RibData.get(data, clazz); } public static <T> T get(final JsonObject data, final Class<?> clazz, final Integer index) { return RibData.get(data, clazz, index); } public static <T> void set(final JsonObject data, final String field, final T value, final Integer argIndex) { RibData.set(data, field, value, argIndex); } public static boolean isIndex(final Integer argIndex) { return Constants.INDEXES.containsKey(argIndex); } public static void projection(final JsonObject reference, final JsonArray projection, final boolean clear) { RibIr.irProjection(reference, projection, clear); } public static void criteria(final JsonObject reference, final JsonObject criteria, final boolean clear) { RibIr.irCriteria(reference, criteria, clear); } }
755
721
<reponame>payrollhero/google-api-ruby-client { "api_id": "accesscontextmanager:v1", "name_pretty": "Access Context Manager API", "distribution_name": "google-apis-accesscontextmanager_v1", "language": "ruby", "library_type": "REST" }
99
483
<filename>nitrite/src/main/java/org/dizitart/no2/common/module/PluginManager.java /* * Copyright (c) 2017-2020. Nitrite author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.dizitart.no2.common.module; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.dizitart.no2.NitriteConfig; import org.dizitart.no2.exceptions.NitriteIOException; import org.dizitart.no2.exceptions.PluginException; import org.dizitart.no2.index.*; import org.dizitart.no2.common.mapper.MappableMapper; import org.dizitart.no2.common.mapper.NitriteMapper; import org.dizitart.no2.store.NitriteStore; import org.dizitart.no2.store.memory.InMemoryStoreModule; import java.util.HashMap; import java.util.Map; /** * The nitrite database plugin manager. It loads the nitrite plugins * before opening the database. * * @see NitriteModule * @see NitritePlugin * @author <NAME>. * @since 4.0 */ @Slf4j @Getter public class PluginManager implements AutoCloseable { private final Map<String, NitriteIndexer> indexerMap; private final NitriteConfig nitriteConfig; private NitriteMapper nitriteMapper; private NitriteStore<?> nitriteStore; /** * Instantiates a new {@link PluginManager}. * * @param nitriteConfig the nitrite config */ public PluginManager(NitriteConfig nitriteConfig) { this.indexerMap = new HashMap<>(); this.nitriteConfig = nitriteConfig; } /** * Loads a {@link NitriteModule} instance. * * @param module the module */ public void loadModule(NitriteModule module) { if (module != null && module.plugins() != null) { for (NitritePlugin plugin : module.plugins()) { loadPlugin(plugin); } } } /** * Find and loads all nitrite plugins configured. */ public void findAndLoadPlugins() { try { loadInternalPlugins(); } catch (Exception e) { log.error("Error while loading internal plugins", e); throw new PluginException("error while loading internal plugins", e); } } /** * Initializes all plugins instances. */ public void initializePlugins() { if (nitriteStore != null) { initializePlugin(nitriteStore); } else { log.error("No storage engine found. Please ensure that a storage module has been loaded properly"); throw new NitriteIOException("no storage engine found"); } if (nitriteMapper != null) { initializePlugin(nitriteMapper); } if (!indexerMap.isEmpty()) { for (NitriteIndexer nitriteIndexer : indexerMap.values()) { initializePlugin(nitriteIndexer); } } } @Override public void close() { for (NitriteIndexer nitriteIndexer : indexerMap.values()) { nitriteIndexer.close(); } if (nitriteMapper != null) { nitriteMapper.close(); } if (nitriteStore != null) { nitriteStore.close(); } } private void loadPlugin(NitritePlugin plugin) { populatePlugins(plugin); } private void initializePlugin(NitritePlugin plugin) { plugin.initialize(nitriteConfig); } private void populatePlugins(NitritePlugin plugin) { if (plugin != null) { if (plugin instanceof NitriteIndexer) { loadIndexer((NitriteIndexer) plugin); } else if (plugin instanceof NitriteMapper) { loadNitriteMapper((NitriteMapper) plugin); } else if (plugin instanceof NitriteStore) { loadNitriteStore((NitriteStore<?>) plugin); } else { plugin.close(); throw new PluginException("invalid plugin loaded " + plugin); } } } private void loadNitriteStore(NitriteStore<?> nitriteStore) { if (this.nitriteStore != null) { nitriteStore.close(); throw new PluginException("multiple NitriteStore found"); } this.nitriteStore = nitriteStore; } private void loadNitriteMapper(NitriteMapper nitriteMapper) { if (this.nitriteMapper != null) { nitriteMapper.close(); throw new PluginException("multiple NitriteMapper found"); } this.nitriteMapper = nitriteMapper; } private synchronized void loadIndexer(NitriteIndexer nitriteIndexer) { if (indexerMap.containsKey(nitriteIndexer.getIndexType())) { nitriteIndexer.close(); throw new PluginException("multiple Indexer found for type " + nitriteIndexer.getIndexType()); } this.indexerMap.put(nitriteIndexer.getIndexType(), nitriteIndexer); } protected void loadInternalPlugins() { if (!indexerMap.containsKey(IndexType.UNIQUE)) { log.debug("Loading default unique indexer"); NitritePlugin plugin = new UniqueIndexer(); loadPlugin(plugin); } if (!indexerMap.containsKey(IndexType.NON_UNIQUE)) { log.debug("Loading default non-unique indexer"); NitritePlugin plugin = new NonUniqueIndexer(); loadPlugin(plugin); } if (!indexerMap.containsKey(IndexType.FULL_TEXT)) { log.debug("Loading nitrite text indexer"); NitritePlugin plugin = new NitriteTextIndexer(); loadPlugin(plugin); } if (nitriteMapper == null) { log.debug("Loading mappable mapper"); NitritePlugin plugin = new MappableMapper(); loadPlugin(plugin); } if (nitriteStore == null) { loadModule(new InMemoryStoreModule()); log.warn("No persistent storage module found, creating an in-memory database"); } } }
2,638
2,010
from colorama import init from colorama import Fore, Back, Style import tensorflow as tf from terminaltables import SingleTable from natsort import natsorted def print_table(TABLE_DATA): table_instance = SingleTable(TABLE_DATA, "") table_instance.justify_columns[2] = 'right' print(table_instance.table) print def print_bright(s): init() print(Style.BRIGHT + s + Style.RESET_ALL) def print_green(info, value=""): print(Fore.GREEN + "[%s] " % info + Style.RESET_ALL + str(value)) def print_red(info, value=""): print(Fore.RED + "[%s] " % info + Style.RESET_ALL + str(value)) def print_session(session_type): FLAGS = tf.app.flags.FLAGS print_bright("\nSetting up TF %s session:" % session_type) for key in natsorted(FLAGS.__dict__["__flags"].keys()): if "dir" not in key: print_green(key, FLAGS.__dict__["__flags"][key]) def print_directories(list_delete, list_create): print_bright("\nConfiguring directories:") for d in list_delete: print_red("Deleting", d) for d in list_create: print_green("Creating", d) def print_initialize(): print_bright("\nInitialization:") print_green("Created session saver") print_green("Ran init ops") def print_summaries(): print_bright("\nSummaries:") list_summaries = tf.get_collection(tf.GraphKeys.SUMMARIES) for t in list_summaries: print_green(t.name) def print_queues(): print_bright("\nQueues:") print_green("Created coordinator") print_green("Started queue runner") def print_check_data(out, list_data): print TABLE_DATA = (('Variable Name', 'Shape', "Min value", "Max value"),) for o, t in zip(out, list_data): TABLE_DATA += (tuple([t.name, str(o.shape), "%.3g" % o.min(), "%.3g" % o.max()]),) print_table(TABLE_DATA) def print_checkpoint(checkpoint): print_bright("\nCheckpointing:") print_green(checkpoint.model_checkpoint_path) def print_meta_graph(meta_graph_path): print_bright("\nMetagraph:") print_green("Loaded " + meta_graph_path) def print_restore(): print_bright("\nRestored session")
839
2,151
<gh_stars>1000+ // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NET_CERT_CT_POLICY_STATUS_H_ #define NET_CERT_CT_POLICY_STATUS_H_ namespace net { namespace ct { // Information about the connection's compliance with the CT policy. This value // is histogrammed, so do not re-order or change values, and add new values at // the end. enum class CTPolicyCompliance { // The connection complied with the certificate policy by // including SCTs that satisfy the policy. CT_POLICY_COMPLIES_VIA_SCTS = 0, // The connection did not have enough SCTs to comply. CT_POLICY_NOT_ENOUGH_SCTS = 1, // The connection did not have diverse enough SCTs to comply. CT_POLICY_NOT_DIVERSE_SCTS = 2, // The connection cannot be considered compliant because the build // isn't timely and therefore log information might be out of date // (for example a log might no longer be considered trustworthy). CT_POLICY_BUILD_NOT_TIMELY = 3, // Compliance details for the connection are not available, e.g. because a // resource was loaded from disk cache. CT_POLICY_COMPLIANCE_DETAILS_NOT_AVAILABLE = 4, CT_POLICY_MAX }; } // namespace ct } // namespace net #endif // NET_CERT_CT_POLICY_STATUS_H_
432
2,132
module org.bytedeco.librealsense { requires transitive org.bytedeco.javacpp; exports org.bytedeco.librealsense.global; exports org.bytedeco.librealsense.presets; exports org.bytedeco.librealsense; }
78
575
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SERVICES_NETWORK_PUBLIC_CPP_P2P_PARAM_TRAITS_H_ #define SERVICES_NETWORK_PUBLIC_CPP_P2P_PARAM_TRAITS_H_ // IPC messages for the P2P Transport API. #include <stdint.h> #include "base/component_export.h" #include "base/time/time.h" #include "ipc/ipc_message_macros.h" #include "net/base/ip_address.h" #include "net/base/network_change_notifier.h" #include "net/base/network_interfaces.h" #include "services/network/public/cpp/p2p_socket_type.h" #include "third_party/webrtc/rtc_base/async_packet_socket.h" #ifndef INTERNAL_SERVICES_NETWORK_PUBLIC_CPP_P2P_PARAM_TRAITS_H_ #define INTERNAL_SERVICES_NETWORK_PUBLIC_CPP_P2P_PARAM_TRAITS_H_ #undef IPC_MESSAGE_EXPORT #define IPC_MESSAGE_EXPORT COMPONENT_EXPORT(NETWORK_CPP_BASE) #endif // INTERNAL_SERVICES_NETWORK_PUBLIC_CPP_P2P_PARAM_TRAITS_H_ IPC_ENUM_TRAITS_MAX_VALUE(network::P2PSocketType, network::P2P_SOCKET_TYPE_LAST) IPC_ENUM_TRAITS_MAX_VALUE(network::P2PSocketOption, network::P2P_SOCKET_OPT_MAX - 1) IPC_ENUM_TRAITS_MAX_VALUE(net::NetworkChangeNotifier::ConnectionType, net::NetworkChangeNotifier::CONNECTION_LAST) IPC_ENUM_TRAITS_MIN_MAX_VALUE(rtc::DiffServCodePoint, rtc::DSCP_NO_CHANGE, rtc::DSCP_CS7) IPC_STRUCT_TRAITS_BEGIN(rtc::PacketTimeUpdateParams) IPC_STRUCT_TRAITS_MEMBER(rtp_sendtime_extension_id) IPC_STRUCT_TRAITS_MEMBER(srtp_auth_key) IPC_STRUCT_TRAITS_MEMBER(srtp_auth_tag_len) IPC_STRUCT_TRAITS_MEMBER(srtp_packet_index) IPC_STRUCT_TRAITS_END() IPC_STRUCT_TRAITS_BEGIN(rtc::PacketOptions) IPC_STRUCT_TRAITS_MEMBER(dscp) IPC_STRUCT_TRAITS_MEMBER(packet_id) IPC_STRUCT_TRAITS_MEMBER(packet_time_params) IPC_STRUCT_TRAITS_END() IPC_STRUCT_TRAITS_BEGIN(network::P2PHostAndIPEndPoint) IPC_STRUCT_TRAITS_MEMBER(hostname) IPC_STRUCT_TRAITS_MEMBER(ip_address) IPC_STRUCT_TRAITS_END() IPC_STRUCT_TRAITS_BEGIN(network::P2PSendPacketMetrics) IPC_STRUCT_TRAITS_MEMBER(packet_id) IPC_STRUCT_TRAITS_MEMBER(rtc_packet_id) IPC_STRUCT_TRAITS_MEMBER(send_time_ms) IPC_STRUCT_TRAITS_END() IPC_STRUCT_TRAITS_BEGIN(network::P2PPortRange) IPC_STRUCT_TRAITS_MEMBER(min_port) IPC_STRUCT_TRAITS_MEMBER(max_port) IPC_STRUCT_TRAITS_END() IPC_STRUCT_TRAITS_BEGIN(network::P2PPacketInfo) IPC_STRUCT_TRAITS_MEMBER(destination) IPC_STRUCT_TRAITS_MEMBER(packet_options) IPC_STRUCT_TRAITS_MEMBER(packet_id) IPC_STRUCT_TRAITS_END() #endif // SERVICES_NETWORK_PUBLIC_CPP_P2P_PARAM_TRAITS_H_
1,266
304
<reponame>apolovyi/warnings-ng-plugin package io.jenkins.plugins.analysis.core.util; import java.util.ArrayList; import java.util.Collection; import java.util.List; import com.google.errorprone.annotations.FormatMethod; import io.jenkins.plugins.analysis.core.util.QualityGate.QualityGateResult; import io.jenkins.plugins.analysis.core.util.QualityGate.QualityGateType; /** * Evaluates a set of quality gates for a static analysis report. * * @author <NAME> */ public class QualityGateEvaluator { private final List<QualityGate> qualityGates = new ArrayList<>(); /** * Enforces this quality gate for the specified run. * * @param report * the report to evaluate * @param logger * the logger that reports the passed and failed quality gate thresholds * * @return result of the evaluation, expressed by a build state */ public QualityGateStatus evaluate(final IssuesStatistics report, final FormattedLogger logger) { if (qualityGates.isEmpty()) { logger.print("-> INACTIVE - No quality gate defined"); return QualityGateStatus.INACTIVE; } QualityGateStatus status = QualityGateStatus.PASSED; for (QualityGate qualityGate : qualityGates) { int actualSize = qualityGate.getActualSizeMethodReference().apply(report); if (actualSize >= qualityGate.getThreshold()) { logger.print("-> %s - %s: %d - Quality QualityGate: %d", qualityGate.getStatus(), qualityGate.getName(), actualSize, qualityGate.getThreshold()); if (qualityGate.getStatus().isWorseThan(status)) { status = qualityGate.getStatus(); } } else { logger.print("-> PASSED - %s: %d - Quality QualityGate: %d", qualityGate.getName(), actualSize, qualityGate.getThreshold()); } } return status; } /** * Appends the specified quality gates to the end of the list of quality gates. * * @param size * the minimum number of issues that fails the quality gate * @param type * the type of the quality gate * @param strength * determines whether the quality gate is a warning or failure */ public void add(final int size, final QualityGateType type, final QualityGateResult strength) { qualityGates.add(new QualityGate(size, type, strength)); } /** * Appends all of the quality gates in the specified collection to the end of the list of quality gates. * * @param additionalQualityGates * the quality gates to add */ public void addAll(final Collection<? extends QualityGate> additionalQualityGates) { this.qualityGates.addAll(additionalQualityGates); } /** * Returns whether at least one quality gate has been added. * * @return {@code true} if at least one quality gate has been added, {@code false} otherwise */ public boolean isEnabled() { return !qualityGates.isEmpty(); } /** * Logs results of the quality gate evaluation. */ @FunctionalInterface public interface FormattedLogger { /** * Logs the specified message. * * @param format * A <a href="../util/Formatter.html#syntax">format string</a> * @param args * Arguments referenced by the format specifiers in the format string. If there are more arguments than * format specifiers, the extra arguments are ignored. The number of arguments is variable and may be * zero. */ @FormatMethod void print(String format, Object... args); } }
1,486
2,137
<reponame>lxconan/cms-attacking package com.publiccms.views.method.tools; import org.springframework.stereotype.Component; /** * * GetMd5Method * */ @Component public class GetSha2Method extends GetSha512Method { }
91
4,054
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once #include <vespa/searchlib/fef/blueprint.h> #include <vespa/searchlib/fef/featureexecutor.h> namespace search { namespace fef { namespace test { class StaticRankExecutor : public FeatureExecutor { private: const search::attribute::IAttributeVector * _attribute; public: StaticRankExecutor(const search::attribute::IAttributeVector * attribute); void execute(uint32_t docId) override; }; class StaticRankBlueprint : public Blueprint { private: std::string _attributeName; public: StaticRankBlueprint(); ~StaticRankBlueprint(); void visitDumpFeatures(const IIndexEnvironment &, IDumpFeatureVisitor &) const override {} Blueprint::UP createInstance() const override { return Blueprint::UP(new StaticRankBlueprint()); } bool setup(const IIndexEnvironment & indexEnv, const StringVector & params) override; FeatureExecutor &createExecutor(const IQueryEnvironment &queryEnv, vespalib::Stash &stash) const override; }; } // namespace test } // namespace fef } // namespace search
344
1,144
package de.metas.handlingunits.allocation; /* * #%L * de.metas.handlingunits.base * %% * Copyright (C) 2015 metas GmbH * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-2.0.html>. * #L% */ import java.math.BigDecimal; import java.time.ZonedDateTime; import org.adempiere.util.lang.impl.TableRecordReference; import org.compiere.model.I_C_UOM; import de.metas.handlingunits.IHUContext; import de.metas.product.ProductId; import de.metas.quantity.Quantity; /** * Allocation Request describes what we need to allocate/deallocate. Implementors are immutable. Instead of modifying it (split, create partial requests etc), use {@link IAllocationRequestBuilder} to * get new instances. * * @author tsa * */ public interface IAllocationRequest { BigDecimal QTY_INFINITE = Quantity.QTY_INFINITE; IHUContext getHUContext(); /** * @return allocation date */ ZonedDateTime getDate(); ProductId getProductId(); BigDecimal getQty(); /** * * @return true if this request is asking about infinite quantity */ boolean isInfiniteQty(); /** * @return true if this request is asking about ZERO quantity */ boolean isZeroQty(); I_C_UOM getC_UOM(); /** * * @return quantity/uom (source quantity/uom) */ Quantity getQuantity(); /** * Gets referenced model. * * In case you are doing allocations/deallocations, creating new HUs, the Qty changes or newly created HUs can be linked to this model. This is done in * {@link de.metas.handlingunits.hutransaction.IHUTrxListener} implementations and those implementation decide when and how this is made. * * @return referenced model (e.g. a document line) */ TableRecordReference getReference(); /** * * @return <code>true</code> if we shall allocate the qty even if the destination is already full */ boolean isForceQtyAllocation(); }
748
565
<gh_stars>100-1000 # Copyright (c) ZenML GmbH 2021. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. import os from typing import List, Optional, Union from urllib.request import urlopen import pandas as pd from zenml.core.repo import Repository from zenml.logger import get_logger from zenml.pipelines import BasePipeline from zenml.steps.step_interfaces.base_datasource_step import ( BaseDatasourceConfig, BaseDatasourceStep, ) logger = get_logger(__name__) DATASET_PATH = "diabetes.csv" DATASET_SRC = ( "https://storage.googleapis.com/zenml-public-bucket/" "pima-indians-diabetes/diabetes.csv" ) # Download the dataset for this example if not os.path.isfile(DATASET_PATH): logger.info(f"Downloading dataset {DATASET_PATH}") with urlopen(DATASET_SRC) as data: content = data.read().decode() with open(DATASET_PATH, "w") as output: output.write(content) class PandasDatasourceConfig(BaseDatasourceConfig): path: str sep: str = "," header: Union[int, List[int], str] = "infer" names: Optional[List[str]] = None index_col: Optional[Union[int, str, List[Union[int, str]], bool]] = None class PandasDatasource(BaseDatasourceStep): def entrypoint( self, config: PandasDatasourceConfig, ) -> pd.DataFrame: return pd.read_csv( filepath_or_buffer=config.path, sep=config.sep, header=config.header, names=config.names, index_col=config.index_col, ) class Chapter1Pipeline(BasePipeline): """Class for Chapter 1 of the class-based API""" def connect( self, datasource: BaseDatasourceStep, ) -> None: datasource() pipeline_instance = Chapter1Pipeline( datasource=PandasDatasource(PandasDatasourceConfig(path=DATASET_PATH)) ) pipeline_instance.run() # Post-execution repo = Repository() p = repo.get_pipeline(pipeline_name="Chapter1Pipeline") runs = p.runs print(f"Pipeline `Chapter1Pipeline` has {len(runs)} run(s)") run = runs[-1] print(f"The run you just made has {len(run.steps)} step(s).") step = run.get_step("datasource") print(f"That step has {len(step.outputs)} output artifacts.")
1,051
784
<gh_stars>100-1000 /** * Copyright (c) 2015-2021, <NAME> 杨福海 (<EMAIL>). * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.jboot.support.redis.jedis; import com.jfinal.log.Log; import io.jboot.exception.JbootException; import io.jboot.support.redis.JbootRedisBase; import io.jboot.support.redis.JbootRedisConfig; import io.jboot.support.redis.RedisScanResult; import io.jboot.utils.QuietlyUtil; import io.jboot.utils.StrUtil; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; import redis.clients.jedis.*; import redis.clients.jedis.exceptions.JedisConnectionException; import java.util.*; import java.util.Map.Entry; /** * 参考: com.jfinal.plugin.redis * JbootRedis 命令文档: http://redisdoc.com/ */ public class JbootJedisClusterImpl extends JbootRedisBase { protected JedisCluster jedisCluster; private int timeout = 2000; private int maxAttempts = 5; static final Log LOG = Log.getLog(JbootJedisClusterImpl.class); public JbootJedisClusterImpl(JbootRedisConfig config) { super(config); Integer timeout = config.getTimeout(); String password = config.getPassword(); Integer maxAttempts = config.getMaxAttempts(); if (timeout != null) { this.timeout = timeout; } if (maxAttempts == null) { maxAttempts = this.maxAttempts; } GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig(); if (StrUtil.isNotBlank(config.getTestWhileIdle())) { poolConfig.setTestWhileIdle(config.getTestWhileIdle()); } if (StrUtil.isNotBlank(config.getTestOnBorrow())) { poolConfig.setTestOnBorrow(config.getTestOnBorrow()); } if (StrUtil.isNotBlank(config.getTestOnCreate())) { poolConfig.setTestOnCreate(config.getTestOnCreate()); } if (StrUtil.isNotBlank(config.getTestOnReturn())) { poolConfig.setTestOnReturn(config.getTestOnReturn()); } if (StrUtil.isNotBlank(config.getMinEvictableIdleTimeMillis())) { poolConfig.setMinEvictableIdleTimeMillis(config.getMinEvictableIdleTimeMillis()); } if (StrUtil.isNotBlank(config.getTimeBetweenEvictionRunsMillis())) { poolConfig.setTimeBetweenEvictionRunsMillis(config.getTimeBetweenEvictionRunsMillis()); } if (StrUtil.isNotBlank(config.getNumTestsPerEvictionRun())) { poolConfig.setNumTestsPerEvictionRun(config.getNumTestsPerEvictionRun()); } if (StrUtil.isNotBlank(config.getMaxTotal())) { poolConfig.setMaxTotal(config.getMaxTotal()); } if (StrUtil.isNotBlank(config.getMaxIdle())) { poolConfig.setMaxIdle(config.getMaxIdle()); } if (StrUtil.isNotBlank(config.getMinIdle())) { poolConfig.setMinIdle(config.getMinIdle()); } if (StrUtil.isNotBlank(config.getMaxWaitMillis())) { poolConfig.setMaxWaitMillis(config.getMaxWaitMillis()); } this.jedisCluster = newJedisCluster(config.getHostAndPorts(), timeout, maxAttempts, password, poolConfig); } public static JedisCluster newJedisCluster(Set<HostAndPort> haps, Integer timeout, Integer maxAttempts, String password, GenericObjectPoolConfig poolConfig) { JedisCluster jedisCluster; if (timeout != null && maxAttempts != null && password != null && poolConfig != null) { jedisCluster = new JedisCluster(haps, timeout, timeout, maxAttempts, password, poolConfig); } else if (timeout != null && maxAttempts != null && poolConfig != null) { jedisCluster = new JedisCluster(haps, timeout, maxAttempts, poolConfig); } else if (timeout != null && maxAttempts != null) { jedisCluster = new JedisCluster(haps, timeout, maxAttempts); } else if (timeout != null && poolConfig != null) { jedisCluster = new JedisCluster(haps, timeout, poolConfig); } else if (timeout != null) { jedisCluster = new JedisCluster(haps, timeout); } else { jedisCluster = new JedisCluster(haps); } return jedisCluster; } public JbootJedisClusterImpl(JedisCluster jedisCluster) { super(null); this.jedisCluster = jedisCluster; } /** * 存放 key value 对到 redis * 如果 key 已经持有其他值, SET 就覆写旧值,无视类型。 * 对于某个原本带有生存时间(TTL)的键来说, 当 SET 命令成功在这个键上执行时, 这个键原有的 TTL 将被清除。 */ @Override public String set(Object key, Object value) { return jedisCluster.set(keyToBytes(key), valueToBytes(value)); } @Override public Long setnx(Object key, Object value) { return jedisCluster.setnx(keyToBytes(key), valueToBytes(value)); } /** * 存放 key value 对到 redis * 如果 key 已经持有其他值, SET 就覆写旧值,无视类型。 * 此方法用了修改 incr 等的值 */ public String setWithoutSerialize(Object key, Object value) { return jedisCluster.set(keyToBytes(key), value.toString().getBytes()); } /** * 存放 key value 对到 redis,并将 key 的生存时间设为 seconds (以秒为单位)。 * 如果 key 已经存在, SETEX 命令将覆写旧值。 */ public String setex(Object key, int seconds, Object value) { return jedisCluster.setex(keyToBytes(key), seconds, valueToBytes(value)); } /** * 返回 key 所关联的 value 值 * 如果 key 不存在那么返回特殊值 nil 。 */ @SuppressWarnings("unchecked") public <T> T get(Object key) { return (T) valueFromBytes(jedisCluster.get(keyToBytes(key))); } @Override public String getWithoutSerialize(Object key) { byte[] bytes = jedisCluster.get(keyToBytes(key)); if (bytes == null || bytes.length == 0) { return null; } return new String(jedisCluster.get(keyToBytes(key))); } /** * 删除给定的一个 key * 不存在的 key 会被忽略。 */ public Long del(Object key) { return jedisCluster.del(keyToBytes(key)); } /** * 删除给定的多个 key * 不存在的 key 会被忽略。 */ public Long del(Object... keys) { return jedisCluster.del(keysToBytesArray(keys)); } /** * 查找所有符合给定模式 pattern 的 key 。 * KEYS * 匹配数据库中所有 key 。 * KEYS h?llo 匹配 hello , hallo 和 hxllo 等。 * KEYS h*llo 匹配 hllo 和 heeeeello 等。 * KEYS h[ae]llo 匹配 hello 和 hallo ,但不匹配 hillo 。 * 特殊符号用 \ 隔开 */ public Set<String> keys(String pattern) { HashSet<String> keys = new HashSet<>(); Map<String, JedisPool> clusterNodes = jedisCluster.getClusterNodes(); for (String k : clusterNodes.keySet()) { JedisPool jp = clusterNodes.get(k); Jedis jedis = jp.getResource(); try { keys.addAll(jedis.keys(pattern)); } catch (Exception e) { LOG.error(e.toString(), e); } finally { jedis.close(); //用完一定要close这个链接!!! } } return keys; } /** * 同时设置一个或多个 key-value 对。 * 如果某个给定 key 已经存在,那么 MSET 会用新值覆盖原来的旧值,如果这不是你所希望的效果,请考虑使用 MSETNX 命令:它只会在所有给定 key 都不存在的情况下进行设置操作。 * MSET 是一个原子性(atomic)操作,所有给定 key 都会在同一时间内被设置,某些给定 key 被更新而另一些给定 key 没有改变的情况,不可能发生。 * <pre> * 例子: * Cache cache = RedisKit.use(); // 使用 JbootRedis 的 cache * cache.mset("k1", "v1", "k2", "v2"); // 放入多个 key value 键值对 * List list = cache.mget("k1", "k2"); // 利用多个键值得到上面代码放入的值 * </pre> */ public String mset(Object... keysValues) { if (keysValues.length % 2 != 0) throw new IllegalArgumentException("wrong number of arguments for met, keysValues length can not be odd"); byte[][] kv = new byte[keysValues.length][]; for (int i = 0; i < keysValues.length; i++) { if (i % 2 == 0) kv[i] = keyToBytes(keysValues[i]); else kv[i] = valueToBytes(keysValues[i]); } return jedisCluster.mset(kv); } /** * 返回所有(一个或多个)给定 key 的值。 * 如果给定的 key 里面,有某个 key 不存在,那么这个 key 返回特殊值 nil 。因此,该命令永不失败。 */ @SuppressWarnings("rawtypes") public List mget(Object... keys) { byte[][] keysBytesArray = keysToBytesArray(keys); List<byte[]> data = jedisCluster.mget(keysBytesArray); return valueListFromBytesList(data); } /** * 将 key 中储存的数字值减一。 * 如果 key 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECR 操作。 * 如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。 * 本操作的值限制在 64 位(bit)有符号数字表示之内。 * 关于递增(increment) / 递减(decrement)操作的更多信息,请参见 INCR 命令。 */ public Long decr(Object key) { return jedisCluster.decr(keyToBytes(key)); } /** * 将 key 所储存的值减去减量 decrement 。 * 如果 key 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECRBY 操作。 * 如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。 * 本操作的值限制在 64 位(bit)有符号数字表示之内。 * 关于更多递增(increment) / 递减(decrement)操作的更多信息,请参见 INCR 命令。 */ public Long decrBy(Object key, long longValue) { return jedisCluster.decrBy(keyToBytes(key), longValue); } /** * 将 key 中储存的数字值增一。 * 如果 key 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCR 操作。 * 如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。 * 本操作的值限制在 64 位(bit)有符号数字表示之内。 */ public Long incr(Object key) { return jedisCluster.incr(keyToBytes(key)); } /** * 将 key 所储存的值加上增量 increment 。 * 如果 key 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCRBY 命令。 * 如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。 * 本操作的值限制在 64 位(bit)有符号数字表示之内。 * 关于递增(increment) / 递减(decrement)操作的更多信息,参见 INCR 命令。 */ public Long incrBy(Object key, long longValue) { return jedisCluster.incrBy(keyToBytes(key), longValue); } /** * 检查给定 key 是否存在。 */ public boolean exists(Object key) { return jedisCluster.exists(keyToBytes(key)); } /** * 从当前数据库中随机返回(不删除)一个 key 。 */ public String randomKey() { throw new JbootException("not support randomKey commmand in redis cluster."); } /** * 将 key 改名为 newkey 。 * 当 key 和 newkey 相同,或者 key 不存在时,返回一个错误。 * 当 newkey 已经存在时, RENAME 命令将覆盖旧值。 */ public String rename(Object oldkey, Object newkey) { return jedisCluster.rename(keyToBytes(oldkey), keyToBytes(newkey)); } /** * 将当前数据库的 key 移动到给定的数据库 db 当中。 * 如果当前数据库(源数据库)和给定数据库(目标数据库)有相同名字的给定 key ,或者 key 不存在于当前数据库,那么 MOVE 没有任何效果。 * 因此,也可以利用这一特性,将 MOVE 当作锁(locking)原语(primitive)。 */ public Long move(Object key, int dbIndex) { // return jedisCluster.move(keyToBytes(key), dbIndex); throw new JbootException("not support move commmand in redis cluster."); } /** * 将 key 原子性地从当前实例传送到目标实例的指定数据库上,一旦传送成功, key 保证会出现在目标实例上,而当前实例上的 key 会被删除。 */ public String migrate(String host, int port, Object key, int destinationDb, int timeout) { throw new JbootException("not support migrate commmand in redis cluster."); } /** * 切换到指定的数据库,数据库索引号 index 用数字值指定,以 0 作为起始索引值。 * 默认使用 0 号数据库。 * 注意:在 Jedis 对象被关闭时,数据库又会重新被设置为初始值,所以本方法 select(...) * 正常工作需要使用如下方式之一: * 1:使用 RedisInterceptor,在本线程内共享同一个 Jedis 对象 * 2:使用 JbootRedis.call(ICallback) 进行操作 * 3:自行获取 Jedis 对象进行操作 */ public String select(int databaseIndex) { // return jedisCluster.select(databaseIndex); throw new IllegalStateException("Redis Cluster does not support multiple databases like the stand alone version of Redis, " + "there is just database 0, and SELECT is not allowed."); } /** * 为给定 key 设置生存时间,当 key 过期时(生存时间为 0 ),它会被自动删除。 * 在 JbootRedis 中,带有生存时间的 key 被称为『易失的』(volatile)。 */ public Long expire(Object key, int seconds) { return jedisCluster.expire(keyToBytes(key), seconds); } /** * EXPIREAT 的作用和 EXPIRE 类似,都用于为 key 设置生存时间。不同在于 EXPIREAT 命令接受的时间参数是 UNIX 时间戳(unix timestamp)。 */ public Long expireAt(Object key, long unixTime) { return jedisCluster.expireAt(keyToBytes(key), unixTime); } /** * 这个命令和 EXPIRE 命令的作用类似,但是它以毫秒为单位设置 key 的生存时间,而不像 EXPIRE 命令那样,以秒为单位。 */ public Long pexpire(Object key, long milliseconds) { return jedisCluster.pexpire(keyToBytes(key), milliseconds); } /** * 这个命令和 EXPIREAT 命令类似,但它以毫秒为单位设置 key 的过期 unix 时间戳,而不是像 EXPIREAT 那样,以秒为单位。 */ public Long pexpireAt(Object key, long millisecondsTimestamp) { return jedisCluster.pexpireAt(keyToBytes(key), millisecondsTimestamp); } /** * 将给定 key 的值设为 value ,并返回 key 的旧值(old value)。 * 当 key 存在但不是字符串类型时,返回一个错误。 */ @SuppressWarnings("unchecked") public <T> T getSet(Object key, Object value) { return (T) valueFromBytes(jedisCluster.getSet(keyToBytes(key), valueToBytes(value))); } /** * 移除给定 key 的生存时间,将这个 key 从『易失的』(带生存时间 key )转换成『持久的』(一个不带生存时间、永不过期的 key )。 */ public Long persist(Object key) { return jedisCluster.persist(keyToBytes(key)); } /** * 返回 key 所储存的值的类型。 */ public String type(Object key) { return jedisCluster.type(keyToBytes(key)); } /** * 以秒为单位,返回给定 key 的剩余生存时间(TTL, time to live)。 */ public Long ttl(Object key) { return jedisCluster.ttl(keyToBytes(key)); } /** * 这个命令类似于 TTL 命令,但它以毫秒为单位返回 key 的剩余生存时间,而不是像 TTL 命令那样,以秒为单位。 */ public Long pttl(Object key) { return jedisCluster.pttl(key.toString()); } /** * 对象被引用的数量 */ public Long objectRefcount(Object key) { // return jedisCluster.objectRefcount(keyToBytes(key)); throw new JbootException("not support move objectRefcount in redis cluster."); } /** * 对象没有被访问的空闲时间 */ public Long objectIdletime(Object key) { // return jedisCluster.objectIdletime(keyToBytes(key)); throw new JbootException("not support move objectIdletime in redis cluster."); } /** * 将哈希表 key 中的域 field 的值设为 value 。 * 如果 key 不存在,一个新的哈希表被创建并进行 HSET 操作。 * 如果域 field 已经存在于哈希表中,旧值将被覆盖。 */ public Long hset(Object key, Object field, Object value) { return jedisCluster.hset(keyToBytes(key), valueToBytes(field), valueToBytes(value)); } /** * 同时将多个 field-value (域-值)对设置到哈希表 key 中。 * 此命令会覆盖哈希表中已存在的域。 * 如果 key 不存在,一个空哈希表被创建并执行 HMSET 操作。 */ public String hmset(Object key, Map<Object, Object> hash) { Map<byte[], byte[]> para = new HashMap<byte[], byte[]>(); for (Entry<Object, Object> e : hash.entrySet()) para.put(valueToBytes(e.getKey()), valueToBytes(e.getValue())); return jedisCluster.hmset(keyToBytes(key), para); } /** * 返回哈希表 key 中给定域 field 的值。 */ @SuppressWarnings("unchecked") public <T> T hget(Object key, Object field) { return (T) valueFromBytes(jedisCluster.hget(keyToBytes(key), valueToBytes(field))); } /** * 返回哈希表 key 中,一个或多个给定域的值。 * 如果给定的域不存在于哈希表,那么返回一个 nil 值。 * 因为不存在的 key 被当作一个空哈希表来处理,所以对一个不存在的 key 进行 HMGET 操作将返回一个只带有 nil 值的表。 */ @SuppressWarnings("rawtypes") public List hmget(Object key, Object... fields) { List<byte[]> data = jedisCluster.hmget(keyToBytes(key), valuesToBytesArray(fields)); return valueListFromBytesList(data); } /** * 删除哈希表 key 中的一个或多个指定域,不存在的域将被忽略。 */ public Long hdel(Object key, Object... fields) { return jedisCluster.hdel(keyToBytes(key), valuesToBytesArray(fields)); } /** * 查看哈希表 key 中,给定域 field 是否存在。 */ public boolean hexists(Object key, Object field) { return jedisCluster.hexists(keyToBytes(key), valueToBytes(field)); } /** * 返回哈希表 key 中,所有的域和值。 * 在返回值里,紧跟每个域名(field name)之后是域的值(value),所以返回值的长度是哈希表大小的两倍。 */ @SuppressWarnings("rawtypes") public Map hgetAll(Object key) { Map<byte[], byte[]> data = jedisCluster.hgetAll(keyToBytes(key)); Map<Object, Object> result = new HashMap<Object, Object>(); for (Entry<byte[], byte[]> e : data.entrySet()) result.put(valueFromBytes(e.getKey()), valueFromBytes(e.getValue())); return result; } /** * 返回哈希表 key 中所有域的值。 */ @SuppressWarnings("rawtypes") public List hvals(Object key) { Collection<byte[]> data = jedisCluster.hvals(keyToBytes(key)); return valueListFromBytesList(data); } /** * 返回哈希表 key 中的所有域。 * 底层实现此方法取名为 hfields 更为合适,在此仅为与底层保持一致 */ public Set<Object> hkeys(Object key) { Set<byte[]> fieldSet = jedisCluster.hkeys(keyToBytes(key)); Set<Object> result = new HashSet<Object>(); fieldSetFromBytesSet(fieldSet, result); return result; } /** * 返回哈希表 key 中域的数量。 */ public Long hlen(Object key) { return jedisCluster.hlen(keyToBytes(key)); } /** * 为哈希表 key 中的域 field 的值加上增量 increment 。 * 增量也可以为负数,相当于对给定域进行减法操作。 * 如果 key 不存在,一个新的哈希表被创建并执行 HINCRBY 命令。 * 如果域 field 不存在,那么在执行命令前,域的值被初始化为 0 。 * 对一个储存字符串值的域 field 执行 HINCRBY 命令将造成一个错误。 * 本操作的值被限制在 64 位(bit)有符号数字表示之内。 */ public Long hincrBy(Object key, Object field, long value) { return jedisCluster.hincrBy(keyToBytes(key), valueToBytes(field), value); } /** * 为哈希表 key 中的域 field 加上浮点数增量 increment 。 * 如果哈希表中没有域 field ,那么 HINCRBYFLOAT 会先将域 field 的值设为 0 ,然后再执行加法操作。 * 如果键 key 不存在,那么 HINCRBYFLOAT 会先创建一个哈希表,再创建域 field ,最后再执行加法操作。 * 当以下任意一个条件发生时,返回一个错误: * 1:域 field 的值不是字符串类型(因为 redis 中的数字和浮点数都以字符串的形式保存,所以它们都属于字符串类型) * 2:域 field 当前的值或给定的增量 increment 不能解释(parse)为双精度浮点数(double precision floating point number) * HINCRBYFLOAT 命令的详细功能和 INCRBYFLOAT 命令类似,请查看 INCRBYFLOAT 命令获取更多相关信息。 */ public Double hincrByFloat(Object key, Object field, double value) { return jedisCluster.hincrByFloat(keyToBytes(key), valueToBytes(field), value); } /** * 返回列表 key 中,下标为 index 的元素。 * 下标(index)参数 start 和 stop 都以 0 为底,也就是说,以 0 表示列表的第一个元素,以 1 表示列表的第二个元素,以此类推。 * 你也可以使用负数下标,以 -1 表示列表的最后一个元素, -2 表示列表的倒数第二个元素,以此类推。 * 如果 key 不是列表类型,返回一个错误。 */ @SuppressWarnings("unchecked") /** * 返回列表 key 中,下标为 index 的元素。 * 下标(index)参数 start 和 stop 都以 0 为底,也就是说,以 0 表示列表的第一个元素, * 以 1 表示列表的第二个元素,以此类推。 * 你也可以使用负数下标,以 -1 表示列表的最后一个元素, -2 表示列表的倒数第二个元素,以此类推。 * 如果 key 不是列表类型,返回一个错误。 */ public <T> T lindex(Object key, long index) { return (T) valueFromBytes(jedisCluster.lindex(keyToBytes(key), index)); } /** * 返回列表 key 的长度。 * 如果 key 不存在,则 key 被解释为一个空列表,返回 0 . * 如果 key 不是列表类型,返回一个错误。 */ public Long llen(Object key) { return jedisCluster.llen(keyToBytes(key)); } /** * 移除并返回列表 key 的头元素。 */ @SuppressWarnings("unchecked") public <T> T lpop(Object key) { return (T) valueFromBytes(jedisCluster.lpop(keyToBytes(key))); } /** * 将一个或多个值 value 插入到列表 key 的表头 * 如果有多个 value 值,那么各个 value 值按从左到右的顺序依次插入到表头: 比如说, * 对空列表 mylist 执行命令 LPUSH mylist a b c ,列表的值将是 c b a , * 这等同于原子性地执行 LPUSH mylist a 、 LPUSH mylist b 和 LPUSH mylist c 三个命令。 * 如果 key 不存在,一个空列表会被创建并执行 LPUSH 操作。 * 当 key 存在但不是列表类型时,返回一个错误。 */ public Long lpush(Object key, Object... values) { return jedisCluster.lpush(keyToBytes(key), valuesToBytesArray(values)); } /** * 将列表 key 下标为 index 的元素的值设置为 value 。 * 当 index 参数超出范围,或对一个空列表( key 不存在)进行 LSET 时,返回一个错误。 * 关于列表下标的更多信息,请参考 LINDEX 命令。 */ public String lset(Object key, long index, Object value) { return jedisCluster.lset(keyToBytes(key), index, valueToBytes(value)); } /** * 根据参数 count 的值,移除列表中与参数 value 相等的元素。 * count 的值可以是以下几种: * count 大于 0 : 从表头开始向表尾搜索,移除与 value 相等的元素,数量为 count 。 * count 小于 0 : 从表尾开始向表头搜索,移除与 value 相等的元素,数量为 count 的绝对值。 * count 等于 0 : 移除表中所有与 value 相等的值。 */ public Long lrem(Object key, long count, Object value) { return jedisCluster.lrem(keyToBytes(key), count, valueToBytes(value)); } /** * 返回列表 key 中指定区间内的元素,区间以偏移量 start 和 stop 指定。 * 下标(index)参数 start 和 stop 都以 0 为底,也就是说,以 0 表示列表的第一个元素,以 1 表示列表的第二个元素,以此类推。 * 你也可以使用负数下标,以 -1 表示列表的最后一个元素, -2 表示列表的倒数第二个元素,以此类推。 * <pre> * 例子: * 获取 list 中所有数据:cache.lrange(listKey, 0, -1); * 获取 list 中下标 1 到 3 的数据: cache.lrange(listKey, 1, 3); * </pre> */ @SuppressWarnings("rawtypes") public List lrange(Object key, long start, long end) { List<byte[]> data = jedisCluster.lrange(keyToBytes(key), start, end); if (data != null) { return valueListFromBytesList(data); } else { return new ArrayList<byte[]>(0); } } /** * 对一个列表进行修剪(trim),就是说,让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除。 * 举个例子,执行命令 LTRIM list 0 2 ,表示只保留列表 list 的前三个元素,其余元素全部删除。 * 下标(index)参数 start 和 stop 都以 0 为底,也就是说,以 0 表示列表的第一个元素,以 1 表示列表的第二个元素,以此类推。 * 你也可以使用负数下标,以 -1 表示列表的最后一个元素, -2 表示列表的倒数第二个元素,以此类推。 * 当 key 不是列表类型时,返回一个错误。 */ public String ltrim(Object key, long start, long end) { return jedisCluster.ltrim(keyToBytes(key), start, end); } /** * 移除并返回列表 key 的尾元素。 */ @SuppressWarnings("unchecked") public <T> T rpop(Object key) { return (T) valueFromBytes(jedisCluster.rpop(keyToBytes(key))); } /** * 命令 RPOPLPUSH 在一个原子时间内,执行以下两个动作: * 将列表 source 中的最后一个元素(尾元素)弹出,并返回给客户端。 * 将 source 弹出的元素插入到列表 destination ,作为 destination 列表的的头元素。 */ @SuppressWarnings("unchecked") public <T> T rpoplpush(Object srcKey, Object dstKey) { return (T) valueFromBytes(jedisCluster.rpoplpush(keyToBytes(srcKey), keyToBytes(dstKey))); } /** * 将一个或多个值 value 插入到列表 key 的表尾(最右边)。 * 如果有多个 value 值,那么各个 value 值按从左到右的顺序依次插入到表尾:比如 * 对一个空列表 mylist 执行 RPUSH mylist a b c ,得出的结果列表为 a b c , * 等同于执行命令 RPUSH mylist a 、 RPUSH mylist b 、 RPUSH mylist c 。 * 如果 key 不存在,一个空列表会被创建并执行 RPUSH 操作。 * 当 key 存在但不是列表类型时,返回一个错误。 */ public Long rpush(Object key, Object... values) { return jedisCluster.rpush(keyToBytes(key), valuesToBytesArray(values)); } /** * BLPOP 是列表的阻塞式(blocking)弹出原语。 * 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止。 * 当给定多个 key 参数时,按参数 key 的先后顺序依次检查各个列表,弹出第一个非空列表的头元素。 */ @SuppressWarnings("rawtypes") public List blpop(Object... keys) { // String[] keysStrings = new String[keys.length]; // for (int i = 0; i < keys.length; i++) { // keysStrings[i] = keys[i].toString(); // } List<byte[]> data = jedisCluster.blpop(timeout, keysToBytesArray(keys)); if (data != null && data.size() == 2) { List<Object> objects = new ArrayList<>(); objects.add(new String(data.get(0))); objects.add(valueFromBytes(data.get(1))); return objects; } return valueListFromBytesList(data); } /** * BLPOP 是列表的阻塞式(blocking)弹出原语。 * 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止。 * 当给定多个 key 参数时,按参数 key 的先后顺序依次检查各个列表,弹出第一个非空列表的头元素。 */ @SuppressWarnings("rawtypes") public List blpop(Integer timeout, Object... keys) { List<byte[]> data = jedisCluster.blpop(timeout, keysToBytesArray(keys)); return valueListFromBytesList(data); } /** * BRPOP 是列表的阻塞式(blocking)弹出原语。 * 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止。 * 当给定多个 key 参数时,按参数 key 的先后顺序依次检查各个列表,弹出第一个非空列表的尾部元素。 * 关于阻塞操作的更多信息,请查看 BLPOP 命令, BRPOP 除了弹出元素的位置和 BLPOP 不同之外,其他表现一致。 */ @SuppressWarnings("rawtypes") public List brpop(Object... keys) { List<byte[]> data = jedisCluster.brpop(timeout, keysToBytesArray(keys)); return valueListFromBytesList(data); } /** * BRPOP 是列表的阻塞式(blocking)弹出原语。 * 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止。 * 当给定多个 key 参数时,按参数 key 的先后顺序依次检查各个列表,弹出第一个非空列表的尾部元素。 * 关于阻塞操作的更多信息,请查看 BLPOP 命令, BRPOP 除了弹出元素的位置和 BLPOP 不同之外,其他表现一致。 */ @SuppressWarnings("rawtypes") public List brpop(Integer timeout, Object... keys) { List<byte[]> data = jedisCluster.brpop(timeout, keysToBytesArray(keys)); return valueListFromBytesList(data); } /** * 使用客户端向 JbootRedis 服务器发送一个 PING ,如果服务器运作正常的话,会返回一个 PONG 。 * 通常用于测试与服务器的连接是否仍然生效,或者用于测量延迟值。 */ public String ping() { // jedisCluster.getClusterNodes().get("aa").getResource().ping // return jedisCluster..ping(); Map<String, JedisPool> nodes = jedisCluster.getClusterNodes(); if (nodes != null) { for (JedisPool pool : nodes.values()) { try (Jedis node = pool.getResource()) { String ret = node.ping(); if (ret != null) { return ret; } } } } return null; } /** * 将一个或多个 member 元素加入到集合 key 当中,已经存在于集合的 member 元素将被忽略。 * 假如 key 不存在,则创建一个只包含 member 元素作成员的集合。 * 当 key 不是集合类型时,返回一个错误。 */ public Long sadd(Object key, Object... members) { return jedisCluster.sadd(keyToBytes(key), valuesToBytesArray(members)); } /** * 返回集合 key 的基数(集合中元素的数量)。 */ public Long scard(Object key) { return jedisCluster.scard(keyToBytes(key)); } /** * 移除并返回集合中的一个随机元素。 * 如果只想获取一个随机元素,但不想该元素从集合中被移除的话,可以使用 SRANDMEMBER 命令。 */ @SuppressWarnings("unchecked") public <T> T spop(Object key) { return (T) valueFromBytes(jedisCluster.spop(keyToBytes(key))); } /** * 返回集合 key 中的所有成员。 * 不存在的 key 被视为空集合。 */ @SuppressWarnings("rawtypes") public Set smembers(Object key) { Set<byte[]> data = jedisCluster.smembers(keyToBytes(key)); Set<Object> result = new HashSet<Object>(); valueSetFromBytesSet(data, result); return result; } /** * 判断 member 元素是否集合 key 的成员。 */ public boolean sismember(Object key, Object member) { return jedisCluster.sismember(keyToBytes(key), valueToBytes(member)); } /** * 返回多个集合的交集,多个集合由 keys 指定 */ @SuppressWarnings("rawtypes") public Set sinter(Object... keys) { Set<byte[]> data = jedisCluster.sinter(keysToBytesArray(keys)); Set<Object> result = new HashSet<Object>(); valueSetFromBytesSet(data, result); return result; } /** * 返回集合中的一个随机元素。 */ @SuppressWarnings("unchecked") public <T> T srandmember(Object key) { return (T) valueFromBytes(jedisCluster.srandmember(keyToBytes(key))); } /** * 返回集合中的 count 个随机元素。 * 从 JbootRedis 2.6 版本开始, SRANDMEMBER 命令接受可选的 count 参数: * 如果 count 为正数,且小于集合基数,那么命令返回一个包含 count 个元素的数组,数组中的元素各不相同。 * 如果 count 大于等于集合基数,那么返回整个集合。 * 如果 count 为负数,那么命令返回一个数组,数组中的元素可能会重复出现多次,而数组的长度为 count 的绝对值。 * 该操作和 SPOP 相似,但 SPOP 将随机元素从集合中移除并返回,而 SRANDMEMBER 则仅仅返回随机元素,而不对集合进行任何改动。 */ @SuppressWarnings("rawtypes") public List srandmember(Object key, int count) { List<byte[]> data = jedisCluster.srandmember(keyToBytes(key), count); return valueListFromBytesList(data); } /** * 移除集合 key 中的一个或多个 member 元素,不存在的 member 元素会被忽略。 */ public Long srem(Object key, Object... members) { return jedisCluster.srem(keyToBytes(key), valuesToBytesArray(members)); } /** * 返回多个集合的并集,多个集合由 keys 指定 * 不存在的 key 被视为空集。 */ @SuppressWarnings("rawtypes") public Set sunion(Object... keys) { Set<byte[]> data = jedisCluster.sunion(keysToBytesArray(keys)); Set<Object> result = new HashSet<Object>(); valueSetFromBytesSet(data, result); return result; } /** * 返回一个集合的全部成员,该集合是所有给定集合之间的差集。 * 不存在的 key 被视为空集。 */ @SuppressWarnings("rawtypes") public Set sdiff(Object... keys) { Set<byte[]> data = jedisCluster.sdiff(keysToBytesArray(keys)); Set<Object> result = new HashSet<Object>(); valueSetFromBytesSet(data, result); return result; } /** * 将一个或多个 member 元素及其 score 值加入到有序集 key 当中。 * 如果某个 member 已经是有序集的成员,那么更新这个 member 的 score 值, * 并通过重新插入这个 member 元素,来保证该 member 在正确的位置上。 */ public Long zadd(Object key, double score, Object member) { return jedisCluster.zadd(keyToBytes(key), score, valueToBytes(member)); } public Long zadd(Object key, Map<Object, Double> scoreMembers) { Map<byte[], Double> para = new HashMap<byte[], Double>(); for (Entry<Object, Double> e : scoreMembers.entrySet()) para.put(valueToBytes(e.getKey()), e.getValue()); // valueToBytes is important return jedisCluster.zadd(keyToBytes(key), para); } /** * 返回有序集 key 的基数。 */ public Long zcard(Object key) { return jedisCluster.zcard(keyToBytes(key)); } /** * 返回有序集 key 中, score 值在 min 和 max 之间(默认包括 score 值等于 min 或 max )的成员的数量。 * 关于参数 min 和 max 的详细使用方法,请参考 ZRANGEBYSCORE 命令。 */ public Long zcount(Object key, double min, double max) { return jedisCluster.zcount(keyToBytes(key), min, max); } /** * 为有序集 key 的成员 member 的 score 值加上增量 increment 。 */ public Double zincrby(Object key, double score, Object member) { return jedisCluster.zincrby(keyToBytes(key), score, valueToBytes(member)); } /** * 返回有序集 key 中,指定区间内的成员。 * 其中成员的位置按 score 值递增(从小到大)来排序。 * 具有相同 score 值的成员按字典序(lexicographical order )来排列。 * 如果你需要成员按 score 值递减(从大到小)来排列,请使用 ZREVRANGE 命令。 */ @SuppressWarnings("rawtypes") public Set zrange(Object key, long start, long end) { Set<byte[]> data = jedisCluster.zrange(keyToBytes(key), start, end); Set<Object> result = new LinkedHashSet<Object>(); // 有序集合必须 LinkedHashSet valueSetFromBytesSet(data, result); return result; } /** * 返回有序集 key 中,指定区间内的成员。 * 其中成员的位置按 score 值递减(从大到小)来排列。 * 具有相同 score 值的成员按字典序的逆序(reverse lexicographical order)排列。 * 除了成员按 score 值递减的次序排列这一点外, ZREVRANGE 命令的其他方面和 ZRANGE 命令一样。 */ @SuppressWarnings("rawtypes") public Set zrevrange(Object key, long start, long end) { Set<byte[]> data = jedisCluster.zrevrange(keyToBytes(key), start, end); Set<Object> result = new LinkedHashSet<Object>(); // 有序集合必须 LinkedHashSet valueSetFromBytesSet(data, result); return result; } /** * 返回有序集 key 中,所有 score 值介于 min 和 max 之间(包括等于 min 或 max )的成员。 * 有序集成员按 score 值递增(从小到大)次序排列。 */ @SuppressWarnings("rawtypes") public Set zrangeByScore(Object key, double min, double max) { Set<byte[]> data = jedisCluster.zrangeByScore(keyToBytes(key), min, max); Set<Object> result = new LinkedHashSet<Object>(); // 有序集合必须 LinkedHashSet valueSetFromBytesSet(data, result); return result; } /** * 返回有序集 key 中成员 member 的排名。其中有序集成员按 score 值递增(从小到大)顺序排列。 * 排名以 0 为底,也就是说, score 值最小的成员排名为 0 。 * 使用 ZREVRANK 命令可以获得成员按 score 值递减(从大到小)排列的排名。 */ public Long zrank(Object key, Object member) { return jedisCluster.zrank(keyToBytes(key), valueToBytes(member)); } /** * 返回有序集 key 中成员 member 的排名。其中有序集成员按 score 值递减(从大到小)排序。 * 排名以 0 为底,也就是说, score 值最大的成员排名为 0 。 * 使用 ZRANK 命令可以获得成员按 score 值递增(从小到大)排列的排名。 */ public Long zrevrank(Object key, Object member) { return jedisCluster.zrevrank(keyToBytes(key), valueToBytes(member)); } /** * 移除有序集 key 中的一个或多个成员,不存在的成员将被忽略。 * 当 key 存在但不是有序集类型时,返回一个错误。 */ public Long zrem(Object key, Object... members) { return jedisCluster.zrem(keyToBytes(key), valuesToBytesArray(members)); } /** * 返回有序集 key 中,成员 member 的 score 值。 * 如果 member 元素不是有序集 key 的成员,或 key 不存在,返回 nil 。 */ public Double zscore(Object key, Object member) { return jedisCluster.zscore(keyToBytes(key), valueToBytes(member)); } /** * 发布 * * @param channel * @param message */ public void publish(String channel, String message) { jedisCluster.publish(channel, message); } /** * 发布 * * @param channel * @param message */ public void publish(byte[] channel, byte[] message) { jedisCluster.publish(channel, message); } /** * 订阅 * * @param listener * @param channels */ public void subscribe(JedisPubSub listener, final String... channels) { /** * https://github.com/xetorthio/jedis/wiki/AdvancedUsage * Note that subscribe is a blocking operation because it will poll JbootRedis for responses on the thread that calls subscribe. * A single JedisPubSub instance can be used to subscribe to multiple channels. * You can call subscribe or psubscribe on an existing JedisPubSub instance to change your subscriptions. */ new Thread("jboot-redisCluster-subscribe-JedisPubSub") { @Override public void run() { while (true) { //订阅线程断开连接,需要进行重连 try { jedisCluster.subscribe(listener, channels); LOG.warn("Disconnect to redis channel in subscribe JedisPubSub!"); break; } catch (JedisConnectionException e) { LOG.error("failed connect to redis, reconnect it.", e); QuietlyUtil.quietlySleep(1000); } } } }.start(); } /** * 订阅 * * @param binaryListener * @param channels */ @Override public void subscribe(BinaryJedisPubSub binaryListener, final byte[]... channels) { /** * https://github.com/xetorthio/jedis/wiki/AdvancedUsage * Note that subscribe is a blocking operation because it will poll JbootRedis for responses on the thread that calls subscribe. * A single JedisPubSub instance can be used to subscribe to multiple channels. * You can call subscribe or psubscribe on an existing JedisPubSub instance to change your subscriptions. */ new Thread("jboot-redisCluster-subscribe-BinaryJedisPubSub") { @Override public void run() { while (!isClose()) { //订阅线程断开连接,需要进行重连 try { jedisCluster.subscribe(binaryListener, channels); LOG.warn("Disconnect to redis channel in subscribe BinaryJedisPubSub!"); break; } catch (Throwable e) { LOG.error("failed connect to redis, reconnect it.", e); QuietlyUtil.quietlySleep(1000); } } } }.start(); } @Override public RedisScanResult<String> scan(String pattern, String cursor, int scanCount) { ScanParams params = new ScanParams(); params.match(pattern).count(scanCount); ScanResult<String> scanResult = jedisCluster.scan(cursor, params); return new RedisScanResult<>(scanResult.getCursor(), scanResult.getResult()); } @Override public Object eval(String script, int keyCount, String... params) { return jedisCluster.eval(script, keyCount, params); } public JedisCluster getJedisCluster() { return jedisCluster; } }
26,137
653
// Copyright (c) 2020 by Chrono // // git clone <EMAIL>:nlohmann/json.git // wget https://github.com/nlohmann/json/releases/download/v3.7.3/json.hpp // // g++ json.cpp -std=c++14 -I../common -o a.out;./a.out #include <iostream> // you should put json.hpp in ../common #include "json.hpp" using namespace std; using json_t = nlohmann::json; void case1() { json_t j; j["age"] = 23; j["name"] = "spiderman"; j["gear"]["suits"] = "2099"; j["jobs"] = {"superhero", "neighborhood"}; vector<int> v = {1,2,3}; j["numbers"] = v; map<string, int> m = {{"one",1}, {"two", 2}}; j["kv"] = m; cout << j.dump() << endl; cout << j.dump(2) << endl; #if 0 json_t j = { {"age", 23}, {"name", "spiderman"}, {"jobs", {"superhero", "neighborhood"}}, {"gear", {"suits", "2099"}} }; cout << j.dump(2) << endl; #endif } void case2() { string str = R"({ "name": "peter", "age" : 23, "married" : true })"; auto j = json_t::parse(str); assert(j["age"] == 23); assert(j["name"] == "peter"); cout << j.dump(2) << endl; } void case3() { auto txt = "bad:data"s; try { auto j = json_t::parse(txt); } catch(std::exception& e) { cout << e.what() << endl; } } int main() { //cout << json_t::meta() << endl; cout << json_t::meta().dump(2) << endl; case1(); case2(); case3(); cout << "json demo" << endl; }
734